All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH][v5] PV extension of HVM(hybrid) in support in Xen
@ 2010-03-04  9:37 Sheng Yang
  2010-03-04 15:04 ` Tim Deegan
  0 siblings, 1 reply; 5+ messages in thread
From: Sheng Yang @ 2010-03-04  9:37 UTC (permalink / raw)
  To: Keir Fraser; +Cc: Ian Pratt, xen-devel, Ian Campbell, Tim Deegan

[-- Attachment #1: Type: Text/Plain, Size: 738 bytes --]

Hi Keir

The latest (hybrid) patchset again...

Change from v4:
1. Add support for PV clocksource on HVM. (Replace evtchn enabling with pv 
clock enabling).
2. Update the patch following Tim's comments.

Change from v3:

1. Minor polish the patchset. Replace several specific 
is_hvm_pv_evtchn_domain() judgement.
2. Name changed...

Change from v2:
 
1. Change the name "hybrid" to "PV featured HVM", as well as flag and macro
names.
2. Merge the hypercall page with HVM.
3. Clean up VIRQ delivery mechanism, fixing the lock issue.
4. Remove the reserved the region in E820(but another issue remains, I
 can't get location of grant table elegantly, as I described in Linux part
 patches)

Please review, thanks!

-- 
regards
Yang, Sheng

[-- Attachment #2: hybrid-xen.patch --]
[-- Type: text/x-patch, Size: 12165 bytes --]

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -686,7 +686,16 @@
 
     if ( is_hvm_vcpu(v) )
     {
+        unsigned long eip, cs;
+
         hvm_set_info_guest(v);
+
+        eip = c(user_regs.eip);
+        if (eip != 0) {
+            cs = eip >> 12 << 8;
+            hvm_vcpu_reset_state(v, cs, 0);
+            hvm_funcs.set_tsc_offset(v, 0);
+        }
         goto out;
     }
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2240,6 +2240,13 @@
     {
     case VCPUOP_register_runstate_memory_area:
     case VCPUOP_get_runstate_info:
+    /* For evtchn on HVM */
+    case VCPUOP_initialise:
+    case VCPUOP_up:
+    case VCPUOP_set_periodic_timer:
+    case VCPUOP_stop_periodic_timer:
+    case VCPUOP_set_singleshot_timer:
+    case VCPUOP_stop_singleshot_timer:
         rc = do_vcpu_op(cmd, vcpuid, arg);
         break;
     default:
@@ -2311,6 +2318,7 @@
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
     HYPERCALL(xen_version),
+    HYPERCALL(console_io),
     HYPERCALL(event_channel_op),
     HYPERCALL(sched_op),
     HYPERCALL(hvm_op)
@@ -3110,6 +3118,39 @@
         break;
     }
 
+    case HVMOP_enable_pv: {
+        struct xen_hvm_pv_type a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        if ( a.domid != DOMID_SELF )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(DOMID_SELF, &d);
+        if ( rc != 0 )
+            return rc;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) )
+            goto param_fail5;
+
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail5;
+
+	if (a.flags & HVM_PV_CLOCK) {
+		d->hvm_pv_enabled |= XEN_HVM_PV_CLOCK_ENABLED;
+		update_domain_wallclock_time(d);
+		/* Other vcpus are adjusted during the start-up */
+		hvm_funcs.set_tsc_offset(d->vcpu[0], 0);
+	}
+param_fail5:
+        rcu_unlock_domain(d);
+        break;
+    }
+
     default:
     {
         gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -26,6 +26,32 @@
 #include <asm/hvm/domain.h>
 #include <asm/hvm/support.h>
 
+/* Must be called with hvm_domain->irq_lock hold */
+static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
+{
+    struct vcpu *v = d->vcpu[0];
+    int virq_pin = VIRQ_EMUL_PIN(pic_irq);
+
+    if ( v->virq_to_evtchn[virq_pin] != 0 )
+    {
+        send_guest_global_virq(d, virq_pin);
+        return;
+    }
+    vioapic_irq_positive_edge(d, ioapic_gsi);
+    vpic_irq_positive_edge(d, pic_irq);
+}
+
+/* Must be called with hvm_domain->irq_lock hold */
+static void deassert_irq(struct domain *d, unsigned isa_irq)
+{
+    struct vcpu *v = d->vcpu[0];
+    int virq_pin = VIRQ_EMUL_PIN(isa_irq);
+
+    if ( v->virq_to_evtchn[virq_pin] == 0 )
+        /* Not VIRQ drove */
+        vpic_irq_negative_edge(d, isa_irq);
+}
+
 static void __hvm_pci_intx_assert(
     struct domain *d, unsigned int device, unsigned int intx)
 {
@@ -45,10 +71,7 @@
     isa_irq = hvm_irq->pci_link.route[link];
     if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
          (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
-    {
-        vioapic_irq_positive_edge(d, isa_irq);
-        vpic_irq_positive_edge(d, isa_irq);
-    }
+        assert_irq(d, isa_irq, isa_irq);
 }
 
 void hvm_pci_intx_assert(
@@ -77,7 +100,7 @@
     isa_irq = hvm_irq->pci_link.route[link];
     if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
          (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
-        vpic_irq_negative_edge(d, isa_irq);
+        deassert_irq(d, isa_irq);
 }
 
 void hvm_pci_intx_deassert(
@@ -100,10 +123,7 @@
 
     if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
          (hvm_irq->gsi_assert_count[gsi]++ == 0) )
-    {
-        vioapic_irq_positive_edge(d, gsi);
-        vpic_irq_positive_edge(d, isa_irq);
-    }
+        assert_irq(d, gsi, isa_irq);
 
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
@@ -120,7 +140,7 @@
 
     if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
          (--hvm_irq->gsi_assert_count[gsi] == 0) )
-        vpic_irq_negative_edge(d, isa_irq);
+        deassert_irq(d, isa_irq);
 
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
@@ -185,8 +205,8 @@
 
 void hvm_assert_evtchn_irq(struct vcpu *v)
 {
-    if ( v->vcpu_id != 0 )
-        return;
+    struct domain *d = v->domain;
+    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
 
     if ( unlikely(in_irq() || !local_irq_is_enabled()) )
     {
@@ -194,7 +214,10 @@
         return;
     }
 
-    hvm_set_callback_irq_level(v);
+    if (hvm_irq->callback_via_type == HVMIRQ_callback_vector)
+        vcpu_kick(v);
+    else
+        hvm_set_callback_irq_level(v);
 }
 
 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
@@ -251,7 +274,7 @@
 
     via_type = (uint8_t)(via >> 56) + 1;
     if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
-         (via_type > HVMIRQ_callback_pci_intx) )
+         (via_type > HVMIRQ_callback_vector) )
         via_type = HVMIRQ_callback_none;
 
     spin_lock(&d->arch.hvm_domain.irq_lock);
@@ -297,6 +320,9 @@
         if ( hvm_irq->callback_via_asserted )
              __hvm_pci_intx_assert(d, pdev, pintx);
         break;
+    case HVMIRQ_callback_vector:
+        hvm_irq->callback_via.vector = (uint8_t)via;
+        break;
     default:
         break;
     }
@@ -312,6 +338,10 @@
     case HVMIRQ_callback_pci_intx:
         printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
         break;
+    case HVMIRQ_callback_vector:
+        printk("Set HVMIRQ_callback_vector to %u\n",
+               hvm_irq->callback_via.vector);
+        break;
     default:
         printk("None\n");
         break;
@@ -322,6 +352,10 @@
 {
     struct hvm_domain *plat = &v->domain->arch.hvm_domain;
     int vector;
+
+    if (plat->irq.callback_via_type == HVMIRQ_callback_vector &&
+            vcpu_info(v, evtchn_upcall_pending))
+        return hvm_intack_vector(plat->irq.callback_via.vector);
 
     if ( unlikely(v->nmi_pending) )
         return hvm_intack_nmi;
@@ -363,6 +397,8 @@
     case hvm_intsrc_lapic:
         if ( !vlapic_ack_pending_irq(v, intack.vector) )
             intack = hvm_intack_none;
+        break;
+    case hvm_intsrc_vector:
         break;
     default:
         intack = hvm_intack_none;
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -289,6 +289,10 @@
         return X86EMUL_RETRY;
 
     hvm_vcpu_reset_state(v, trampoline_vector << 8, 0);
+
+    /* Adjust TSC for PV clock */
+    if (is_hvm_pv_clock_enabled_domain(v->domain))
+	hvm_funcs.set_tsc_offset(v, 0);
 
     vcpu_unpause(v);
 
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -164,7 +164,8 @@
     {
         HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
         vmx_inject_extint(intack.vector);
-        pt_intr_post(v, intack);
+        if (intack.source != hvm_intsrc_vector)
+             pt_intr_post(v, intack);
     }
 
     /* Is there another IRQ to queue up behind this one? */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -686,6 +686,7 @@
     struct domain *d = current->domain;
     /* Optionally shift out of the way of Viridian architectural leaves. */
     uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000;
+    unsigned int tmp_eax, tmp_ebx, tmp_ecx, tmp_edx;
 
     idx -= base;
     if ( idx > 3 ) 
@@ -716,6 +717,14 @@
         *edx = 0;          /* Features 2 */
         if ( !is_hvm_vcpu(current) )
             *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
+
+        /* Check if additional feature specified, e.g. Hybrid */
+        if ( !is_viridian_domain(d) ) {
+            domain_cpuid(d, 0x40000002, 0,
+                         &tmp_eax, &tmp_ebx, &tmp_ecx, &tmp_edx);
+            if (tmp_edx != 0)
+                *edx = tmp_edx & XEN_CPUID_FEAT2_MASK;
+        }
         break;
 
     case 3:
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -33,7 +33,8 @@
     hvm_intsrc_pic,
     hvm_intsrc_lapic,
     hvm_intsrc_nmi,
-    hvm_intsrc_mce
+    hvm_intsrc_mce,
+    hvm_intsrc_vector,
 };
 struct hvm_intack {
     uint8_t source; /* enum hvm_intsrc */
@@ -44,6 +45,7 @@
 #define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
 #define hvm_intack_nmi        ( (struct hvm_intack) { hvm_intsrc_nmi,   2 } )
 #define hvm_intack_mce        ( (struct hvm_intack) { hvm_intsrc_mce,   18 } )
+#define hvm_intack_vector(vec)( (struct hvm_intack) { hvm_intsrc_vector, vec } )
 enum hvm_intblk {
     hvm_intblk_none,      /* not blocked (deliverable) */
     hvm_intblk_shadow,    /* MOV-SS or STI shadow */
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -54,12 +54,14 @@
         enum {
             HVMIRQ_callback_none,
             HVMIRQ_callback_gsi,
-            HVMIRQ_callback_pci_intx
+            HVMIRQ_callback_pci_intx,
+            HVMIRQ_callback_vector,
         } callback_via_type;
     };
     union {
         uint32_t gsi;
         struct { uint8_t dev, intx; } pci;
+        uint32_t vector;
     } callback_via;
 
     /* Number of INTx wires asserting each PCI-ISA link. */
diff --git a/xen/include/public/arch-x86/cpuid.h b/xen/include/public/arch-x86/cpuid.h
--- a/xen/include/public/arch-x86/cpuid.h
+++ b/xen/include/public/arch-x86/cpuid.h
@@ -65,4 +65,11 @@
 #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD  (1u<<0)
 
+/* Mask unsupported CPUID specified by user */
+#define XEN_CPUID_FEAT2_MASK 0x3ul
+#define _XEN_CPUID_FEAT2_HVM_PV 0
+#define XEN_CPUID_FEAT2_HVM_PV (1u<<0)
+#define _XEN_CPUID_FEAT2_HVM_PV_EVTCHN 1
+#define XEN_CPUID_FEAT2_HVM_PV_EVTCHN (1u<<1)
+
 #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -127,6 +127,13 @@
 typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
 
+#define HVMOP_enable_pv 9
+struct xen_hvm_pv_type {
+    domid_t domid;
+    uint32_t flags;
+#define HVM_PV_CLOCK  (1ull<<0)
+#define HVM_PV_EVTCHN (1ull<<1)
+};
 
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -158,7 +158,12 @@
 #define VIRQ_ARCH_6    22
 #define VIRQ_ARCH_7    23
 
-#define NR_VIRQS       24
+#define VIRQ_EMUL_PIN_START 24
+#define VIRQ_EMUL_PIN_END 39
+#define VIRQ_EMUL_PIN_NUM 16
+#define VIRQ_EMUL_PIN(x) (VIRQ_EMUL_PIN_START + x)
+
+#define NR_VIRQS       40
 
 /*
  * HYPERVISOR_mmu_update(reqs, count, pdone, foreigndom)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -311,6 +311,9 @@
 
     /* Memory paging support */
     struct mem_event_domain mem_event;
+
+#define XEN_HVM_PV_CLOCK_ENABLED   (1u << 0)
+    uint64_t hvm_pv_enabled;
 };
 
 struct domain_setup_info
@@ -597,6 +600,9 @@
 #define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))
 #define need_iommu(d)    ((d)->need_iommu)
 
+#define is_hvm_pv_clock_enabled_domain(d) \
+                         ((d)->hvm_pv_enabled & XEN_HVM_PV_CLOCK_ENABLED)
+
 void set_vcpu_migration_delay(unsigned int delay);
 unsigned int get_vcpu_migration_delay(void);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH][v5] PV extension of HVM(hybrid) in support in Xen
  2010-03-04  9:37 [PATCH][v5] PV extension of HVM(hybrid) in support in Xen Sheng Yang
@ 2010-03-04 15:04 ` Tim Deegan
  2010-03-05  2:01   ` Sheng Yang
  0 siblings, 1 reply; 5+ messages in thread
From: Tim Deegan @ 2010-03-04 15:04 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Ian Campbell, Ian Pratt, xen-devel, Keir Fraser

Hi,

Given that we now have a similar feature from Stefano I'd like to see
them resolved into a single patchset. 

At 09:37 +0000 on 04 Mar (1267695434), Sheng Yang wrote:
> +	if (a.flags & HVM_PV_CLOCK) {
> +		d->hvm_pv_enabled |= XEN_HVM_PV_CLOCK_ENABLED;
> +		update_domain_wallclock_time(d);
> +		/* Other vcpus are adjusted during the start-up */
> +		hvm_funcs.set_tsc_offset(d->vcpu[0], 0);
> +	}

This still makes no sense to me.  Either it should affect the calling
vcpu (i.e., current) or it should affect all online vcpus.

Also, whatever the semantics, they should be documented in comments in
the header file.

Tim.

-- 
Tim Deegan <Tim.Deegan@citrix.com>
Principal Software Engineer, XenServer Engineering
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH][v5] PV extension of HVM(hybrid) in support in Xen
  2010-03-04 15:04 ` Tim Deegan
@ 2010-03-05  2:01   ` Sheng Yang
  2010-03-05 10:40     ` Ian Campbell
  0 siblings, 1 reply; 5+ messages in thread
From: Sheng Yang @ 2010-03-05  2:01 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Ian Campbell, Ian Pratt, xen-devel, Keir Fraser

On Thursday 04 March 2010 23:04:31 Tim Deegan wrote:
> Hi,
> 
> Given that we now have a similar feature from Stefano I'd like to see
> them resolved into a single patchset.

I think Stefano's patch focused on pv_ops dom0? We are focused on upstream 
Linux as domU...

I think we can get dom0 approach step by step. 
> 
> At 09:37 +0000 on 04 Mar (1267695434), Sheng Yang wrote:
> > +	if (a.flags & HVM_PV_CLOCK) {
> > +		d->hvm_pv_enabled |= XEN_HVM_PV_CLOCK_ENABLED;
> > +		update_domain_wallclock_time(d);
> > +		/* Other vcpus are adjusted during the start-up */
> > +		hvm_funcs.set_tsc_offset(d->vcpu[0], 0);
> > +	}
> 
> This still makes no sense to me.  Either it should affect the calling
> vcpu (i.e., current) or it should affect all online vcpus.

Yeah... I would make it affect the calling vcpu.
> 
> Also, whatever the semantics, they should be documented in comments in
> the header file.

Sure.

-- 
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH][v5] PV extension of HVM(hybrid) in support in Xen
  2010-03-05  2:01   ` Sheng Yang
@ 2010-03-05 10:40     ` Ian Campbell
  2010-03-05 11:19       ` Stefano Stabellini
  0 siblings, 1 reply; 5+ messages in thread
From: Ian Campbell @ 2010-03-05 10:40 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Tim Deegan, Ian Pratt, xen-devel, Keir Fraser

On Fri, 2010-03-05 at 02:01 +0000, Sheng Yang wrote:
> On Thursday 04 March 2010 23:04:31 Tim Deegan wrote:
> > Hi,
> > 
> > Given that we now have a similar feature from Stefano I'd like to see
> > them resolved into a single patchset.
> 
> I think Stefano's patch focused on pv_ops dom0? We are focused on upstream 
> Linux as domU...

Stefano's patches are based on the latest Xen branches which support
dom0 but his work is focused on domU. That is because there is a lot of
similarity between the needs of dom0 and the needs of PVonHVM event
channels so it makes sense to leverage one for the other.

> I think we can get dom0 approach step by step. 

I think we'd be better served in extracting the bits of the domain0
patchset which support/enable Stefano's patches and pushing that stuff
upstream as a complete series. Otherwise we basically need to
reimplement domain 0 stuff on top of / alongside your patcheset.

Ian.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: Re: [PATCH][v5] PV extension of HVM(hybrid) in support in Xen
  2010-03-05 10:40     ` Ian Campbell
@ 2010-03-05 11:19       ` Stefano Stabellini
  0 siblings, 0 replies; 5+ messages in thread
From: Stefano Stabellini @ 2010-03-05 11:19 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Tim Deegan, Ian Pratt, xen-devel, Keir Fraser, Sheng Yang

On Fri, 5 Mar 2010, Ian Campbell wrote:
> On Fri, 2010-03-05 at 02:01 +0000, Sheng Yang wrote:
> > On Thursday 04 March 2010 23:04:31 Tim Deegan wrote:
> > > Hi,
> > > 
> > > Given that we now have a similar feature from Stefano I'd like to see
> > > them resolved into a single patchset.
> > 
> > I think Stefano's patch focused on pv_ops dom0? We are focused on upstream 
> > Linux as domU...
> 
> Stefano's patches are based on the latest Xen branches which support
> dom0 but his work is focused on domU. That is because there is a lot of
> similarity between the needs of dom0 and the needs of PVonHVM event
> channels so it makes sense to leverage one for the other.
> 
> > I think we can get dom0 approach step by step. 
> 
> I think we'd be better served in extracting the bits of the domain0
> patchset which support/enable Stefano's patches and pushing that stuff
> upstream as a complete series. Otherwise we basically need to
> reimplement domain 0 stuff on top of / alongside your patcheset.
> 

Indeed.

If you look at the fourth patch of my series you'll find that the only
thing needed is the xen_setup_pirqs mechanism, that has been recently
refactored to address the comments from the kernel community, so it
shouldn't be difficult to upstream.

Besides upstreaming something now that you know has to be changed soon
is certainly not going to help Xen or its users.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2010-03-05 11:19 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-03-04  9:37 [PATCH][v5] PV extension of HVM(hybrid) in support in Xen Sheng Yang
2010-03-04 15:04 ` Tim Deegan
2010-03-05  2:01   ` Sheng Yang
2010-03-05 10:40     ` Ian Campbell
2010-03-05 11:19       ` Stefano Stabellini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.