All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery
@ 2013-02-26 12:34 Yang Zhang
  2013-02-26 12:41 ` Gleb Natapov
  0 siblings, 1 reply; 4+ messages in thread
From: Yang Zhang @ 2013-02-26 12:34 UTC (permalink / raw)
  To: kvm; +Cc: gleb, mtosatti, xiantao.zhang, Yang Zhang

From: Yang Zhang <yang.z.zhang@Intel.com>

In the platform which supporing virtual interrupt delivery feature,
hardware will clear vIRR atomatically when target vcpu is running.
So software should not modify vIRR when target vcpu is running. This
patch will record the virtual interrupt into posted_irr when delivering
virtual interrupt to guest. And then sync posted_irr into vIRR in target
vcpu context.

The patch to enable Posted Interrupt has the similar logic. Since we are
still discussing it, so split this part from Posted Interrupt patch to
fix the virtual interrupt delivery issue.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
---
 arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/lapic.h |    3 +++
 arch/x86/kvm/vmx.c   |    3 ++-
 3 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd..98bc37e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
 	return count;
 }
 
+static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
+{
+	return apic_test_vector(vec, apic->regs + APIC_IRR);
+}
+
 static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
 {
 	apic->irr_pending = true;
@@ -368,6 +373,26 @@ static inline int apic_search_irr(struct kvm_lapic *apic)
 	return find_highest_vector(apic->regs + APIC_IRR);
 }
 
+static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
+{
+	int index;
+
+	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
+		return;
+
+	if (apic->posted_irr_changed) {
+		spin_lock(&apic->posted_irr_lock);
+		for_each_set_bit(index,
+			(unsigned long *)(&apic->posted_irr_changed), 8) {
+			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
+						apic->posted_irr[index];
+			apic->posted_irr[index] = 0;
+		}
+		apic->posted_irr_changed = 0;
+		spin_unlock(&apic->posted_irr_lock);
+	}
+}
+
 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 {
 	int result;
@@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 	if (!apic->irr_pending)
 		return -1;
 
+	kvm_apic_update_irr(apic);
 	result = apic_search_irr(apic);
 	ASSERT(result == -1 || result >= 16);
 
@@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 		} else
 			apic_clear_vector(vector, apic->regs + APIC_TMR);
 
-		result = !apic_test_and_set_irr(vector, apic);
+		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
+			spin_lock(&apic->posted_irr_lock);
+			if (!apic_test_irr(vector, apic))
+				result = !test_and_set_bit(vector,
+					(unsigned long *)apic->posted_irr);
+			if (result)
+				apic->posted_irr_changed |= 1 << (vector >> 5);
+			spin_unlock(&apic->posted_irr_lock);
+		} else {
+			result = !apic_test_and_set_irr(vector, apic);
+		}
 		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
 					  trig_mode, vector, !result);
 		if (!result) {
@@ -1567,6 +1603,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
 	kvm_lapic_reset(vcpu);
 	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
+	spin_lock_init(&apic->posted_irr_lock);
 
 	return 0;
 nomem_free_apic:
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 1676d34..38bebc8 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -20,6 +20,9 @@ struct kvm_lapic {
 	u32 divide_count;
 	struct kvm_vcpu *vcpu;
 	bool irr_pending;
+	u32 posted_irr[8];
+	u8 posted_irr_changed;
+	spinlock_t posted_irr_lock;
 	/* Number of bits set in ISR. */
 	s16 isr_count;
 	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c1b3041..c8e6036 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
 static bool __read_mostly fasteoi = 1;
 module_param(fasteoi, bool, S_IRUGO);
 
-static bool __read_mostly enable_apicv_reg_vid;
+static bool __read_mostly enable_apicv_reg_vid = 1;
+module_param(enable_apicv_reg_vid, bool, S_IRUGO);
 
 /*
  * If nested=1, nested virtualization is supported, i.e., guests may use
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery
  2013-02-26 12:34 [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery Yang Zhang
@ 2013-02-26 12:41 ` Gleb Natapov
  2013-02-26 12:48   ` Zhang, Yang Z
  0 siblings, 1 reply; 4+ messages in thread
From: Gleb Natapov @ 2013-02-26 12:41 UTC (permalink / raw)
  To: Yang Zhang; +Cc: kvm, mtosatti, xiantao.zhang

On Tue, Feb 26, 2013 at 08:34:16PM +0800, Yang Zhang wrote:
> From: Yang Zhang <yang.z.zhang@Intel.com>
> 
> In the platform which supporing virtual interrupt delivery feature,
> hardware will clear vIRR atomatically when target vcpu is running.
> So software should not modify vIRR when target vcpu is running. This
> patch will record the virtual interrupt into posted_irr when delivering
> virtual interrupt to guest. And then sync posted_irr into vIRR in target
> vcpu context.
> 
> The patch to enable Posted Interrupt has the similar logic. Since we are
> still discussing it, so split this part from Posted Interrupt patch to
> fix the virtual interrupt delivery issue.
> 
What the point of having this over posted interrupt patches considering
that PI patches do this and more?

> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
> ---
>  arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
>  arch/x86/kvm/lapic.h |    3 +++
>  arch/x86/kvm/vmx.c   |    3 ++-
>  3 files changed, 43 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 02b51dd..98bc37e 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
>  	return count;
>  }
>  
> +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
> +{
> +	return apic_test_vector(vec, apic->regs + APIC_IRR);
> +}
> +
>  static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
>  {
>  	apic->irr_pending = true;
> @@ -368,6 +373,26 @@ static inline int apic_search_irr(struct kvm_lapic *apic)
>  	return find_highest_vector(apic->regs + APIC_IRR);
>  }
>  
> +static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
> +{
> +	int index;
> +
> +	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
> +		return;
> +
> +	if (apic->posted_irr_changed) {
> +		spin_lock(&apic->posted_irr_lock);
> +		for_each_set_bit(index,
> +			(unsigned long *)(&apic->posted_irr_changed), 8) {
> +			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
> +						apic->posted_irr[index];
> +			apic->posted_irr[index] = 0;
> +		}
> +		apic->posted_irr_changed = 0;
> +		spin_unlock(&apic->posted_irr_lock);
> +	}
> +}
> +
>  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
>  {
>  	int result;
> @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
>  	if (!apic->irr_pending)
>  		return -1;
>  
> +	kvm_apic_update_irr(apic);
>  	result = apic_search_irr(apic);
>  	ASSERT(result == -1 || result >= 16);
>  
> @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
>  		} else
>  			apic_clear_vector(vector, apic->regs + APIC_TMR);
>  
> -		result = !apic_test_and_set_irr(vector, apic);
> +		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
> +			spin_lock(&apic->posted_irr_lock);
> +			if (!apic_test_irr(vector, apic))
> +				result = !test_and_set_bit(vector,
> +					(unsigned long *)apic->posted_irr);
> +			if (result)
> +				apic->posted_irr_changed |= 1 << (vector >> 5);
> +			spin_unlock(&apic->posted_irr_lock);
> +		} else {
> +			result = !apic_test_and_set_irr(vector, apic);
> +		}
>  		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
>  					  trig_mode, vector, !result);
>  		if (!result) {
> @@ -1567,6 +1603,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
>  	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
>  	kvm_lapic_reset(vcpu);
>  	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
> +	spin_lock_init(&apic->posted_irr_lock);
>  
>  	return 0;
>  nomem_free_apic:
> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> index 1676d34..38bebc8 100644
> --- a/arch/x86/kvm/lapic.h
> +++ b/arch/x86/kvm/lapic.h
> @@ -20,6 +20,9 @@ struct kvm_lapic {
>  	u32 divide_count;
>  	struct kvm_vcpu *vcpu;
>  	bool irr_pending;
> +	u32 posted_irr[8];
> +	u8 posted_irr_changed;
> +	spinlock_t posted_irr_lock;
>  	/* Number of bits set in ISR. */
>  	s16 isr_count;
>  	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c1b3041..c8e6036 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
>  static bool __read_mostly fasteoi = 1;
>  module_param(fasteoi, bool, S_IRUGO);
>  
> -static bool __read_mostly enable_apicv_reg_vid;
> +static bool __read_mostly enable_apicv_reg_vid = 1;
> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
>  
>  /*
>   * If nested=1, nested virtualization is supported, i.e., guests may use
> -- 
> 1.7.1

--
			Gleb.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery
  2013-02-26 12:41 ` Gleb Natapov
@ 2013-02-26 12:48   ` Zhang, Yang Z
  2013-02-26 12:54     ` Gleb Natapov
  0 siblings, 1 reply; 4+ messages in thread
From: Zhang, Yang Z @ 2013-02-26 12:48 UTC (permalink / raw)
  To: Gleb Natapov; +Cc: kvm, mtosatti, Zhang, Xiantao

Gleb Natapov wrote on 2013-02-26:
> On Tue, Feb 26, 2013 at 08:34:16PM +0800, Yang Zhang wrote:
>> From: Yang Zhang <yang.z.zhang@Intel.com>
>> 
>> In the platform which supporing virtual interrupt delivery feature,
>> hardware will clear vIRR atomatically when target vcpu is running.
>> So software should not modify vIRR when target vcpu is running. This
>> patch will record the virtual interrupt into posted_irr when delivering
>> virtual interrupt to guest. And then sync posted_irr into vIRR in target
>> vcpu context.
>> 
>> The patch to enable Posted Interrupt has the similar logic. Since we are
>> still discussing it, so split this part from Posted Interrupt patch to
>> fix the virtual interrupt delivery issue.
>> 
> What the point of having this over posted interrupt patches considering
> that PI patches do this and more?
We are not doing duplicated work. Just split it from posted interrupt patches and sent out it to fix the current issue related to virtual interrupt delivery. Then we can benefit from virtual interrupt delivery feature.

>> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
>> ---
>>  arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
>>  arch/x86/kvm/lapic.h |    3 +++ arch/x86/kvm/vmx.c   |    3 ++- 3
>>  files changed, 43 insertions(+), 2 deletions(-)
>> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
>> index 02b51dd..98bc37e 100644
>> --- a/arch/x86/kvm/lapic.c
>> +++ b/arch/x86/kvm/lapic.c
>> @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
>>  	return count;
>>  }
>> +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
>> +{
>> +	return apic_test_vector(vec, apic->regs + APIC_IRR);
>> +}
>> +
>>  static inline int apic_test_and_set_irr(int vec, struct kvm_lapic
>>  *apic) { 	apic->irr_pending = true; @@ -368,6 +373,26 @@ static inline
>>  int apic_search_irr(struct kvm_lapic *apic) 	return
>>  find_highest_vector(apic->regs + APIC_IRR); }
>> +static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
>> +{
>> +	int index;
>> +
>> +	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
>> +		return;
>> +
>> +	if (apic->posted_irr_changed) {
>> +		spin_lock(&apic->posted_irr_lock);
>> +		for_each_set_bit(index,
>> +			(unsigned long *)(&apic->posted_irr_changed), 8) {
>> +			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
>> +						apic->posted_irr[index];
>> +			apic->posted_irr[index] = 0;
>> +		}
>> +		apic->posted_irr_changed = 0;
>> +		spin_unlock(&apic->posted_irr_lock);
>> +	}
>> +}
>> +
>>  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
>>  {
>>  	int result;
>> @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic
> *apic)
>>  	if (!apic->irr_pending)
>>  		return -1;
>> +	kvm_apic_update_irr(apic);
>>  	result = apic_search_irr(apic);
>>  	ASSERT(result == -1 || result >= 16);
>> @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int
> delivery_mode,
>>  		} else
>>  			apic_clear_vector(vector, apic->regs + APIC_TMR);
>> -		result = !apic_test_and_set_irr(vector, apic);
>> +		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
>> +			spin_lock(&apic->posted_irr_lock);
>> +			if (!apic_test_irr(vector, apic))
>> +				result = !test_and_set_bit(vector,
>> +					(unsigned long *)apic->posted_irr);
>> +			if (result)
>> +				apic->posted_irr_changed |= 1 << (vector >> 5);
>> +			spin_unlock(&apic->posted_irr_lock);
>> +		} else {
>> +			result = !apic_test_and_set_irr(vector, apic);
>> +		}
>>  		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 					 
>>  trig_mode, vector, !result); 		if (!result) { @@ -1567,6 +1603,7 @@
>>  int kvm_create_lapic(struct kvm_vcpu *vcpu)
>>  	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset
>>  */ 	kvm_lapic_reset(vcpu); 	kvm_iodevice_init(&apic->dev,
>>  &apic_mmio_ops);
>> +	spin_lock_init(&apic->posted_irr_lock);
>> 
>>  	return 0;
>>  nomem_free_apic:
>> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
>> index 1676d34..38bebc8 100644
>> --- a/arch/x86/kvm/lapic.h
>> +++ b/arch/x86/kvm/lapic.h
>> @@ -20,6 +20,9 @@ struct kvm_lapic {
>>  	u32 divide_count;
>>  	struct kvm_vcpu *vcpu;
>>  	bool irr_pending;
>> +	u32 posted_irr[8];
>> +	u8 posted_irr_changed;
>> +	spinlock_t posted_irr_lock;
>>  	/* Number of bits set in ISR. */
>>  	s16 isr_count;
>>  	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c1b3041..c8e6036 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
>>  static bool __read_mostly fasteoi = 1;
>>  module_param(fasteoi, bool, S_IRUGO);
>> -static bool __read_mostly enable_apicv_reg_vid;
>> +static bool __read_mostly enable_apicv_reg_vid = 1;
>> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
>> 
>>  /*
>>   * If nested=1, nested virtualization is supported, i.e., guests may use
>> --
>> 1.7.1
> 
> --
> 			Gleb.
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


Best regards,
Yang


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery
  2013-02-26 12:48   ` Zhang, Yang Z
@ 2013-02-26 12:54     ` Gleb Natapov
  0 siblings, 0 replies; 4+ messages in thread
From: Gleb Natapov @ 2013-02-26 12:54 UTC (permalink / raw)
  To: Zhang, Yang Z; +Cc: kvm, mtosatti, Zhang, Xiantao

On Tue, Feb 26, 2013 at 12:48:15PM +0000, Zhang, Yang Z wrote:
> Gleb Natapov wrote on 2013-02-26:
> > On Tue, Feb 26, 2013 at 08:34:16PM +0800, Yang Zhang wrote:
> >> From: Yang Zhang <yang.z.zhang@Intel.com>
> >> 
> >> In the platform which supporing virtual interrupt delivery feature,
> >> hardware will clear vIRR atomatically when target vcpu is running.
> >> So software should not modify vIRR when target vcpu is running. This
> >> patch will record the virtual interrupt into posted_irr when delivering
> >> virtual interrupt to guest. And then sync posted_irr into vIRR in target
> >> vcpu context.
> >> 
> >> The patch to enable Posted Interrupt has the similar logic. Since we are
> >> still discussing it, so split this part from Posted Interrupt patch to
> >> fix the virtual interrupt delivery issue.
> >> 
> > What the point of having this over posted interrupt patches considering
> > that PI patches do this and more?
> We are not doing duplicated work. Just split it from posted interrupt patches and sent out it to fix the current issue related to virtual interrupt delivery. Then we can benefit from virtual interrupt delivery feature.
> 
It is duplicated work because PI patch will replace half of the code in
this patch. This patch have the same lock I want to avoid in PI patches
anyway, so it is not good as is. We have time till 3.10 for PI to make
it so no need to waste energy on half-solutions.

>> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
> >> ---
> >>  arch/x86/kvm/lapic.c |   39 ++++++++++++++++++++++++++++++++++++++-
> >>  arch/x86/kvm/lapic.h |    3 +++ arch/x86/kvm/vmx.c   |    3 ++- 3
> >>  files changed, 43 insertions(+), 2 deletions(-)
> >> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> >> index 02b51dd..98bc37e 100644
> >> --- a/arch/x86/kvm/lapic.c
> >> +++ b/arch/x86/kvm/lapic.c
> >> @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap)
> >>  	return count;
> >>  }
> >> +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic)
> >> +{
> >> +	return apic_test_vector(vec, apic->regs + APIC_IRR);
> >> +}
> >> +
> >>  static inline int apic_test_and_set_irr(int vec, struct kvm_lapic
> >>  *apic) { 	apic->irr_pending = true; @@ -368,6 +373,26 @@ static inline
> >>  int apic_search_irr(struct kvm_lapic *apic) 	return
> >>  find_highest_vector(apic->regs + APIC_IRR); }
> >> +static inline void kvm_apic_update_irr(struct kvm_lapic *apic)
> >> +{
> >> +	int index;
> >> +
> >> +	if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm))
> >> +		return;
> >> +
> >> +	if (apic->posted_irr_changed) {
> >> +		spin_lock(&apic->posted_irr_lock);
> >> +		for_each_set_bit(index,
> >> +			(unsigned long *)(&apic->posted_irr_changed), 8) {
> >> +			*((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |=
> >> +						apic->posted_irr[index];
> >> +			apic->posted_irr[index] = 0;
> >> +		}
> >> +		apic->posted_irr_changed = 0;
> >> +		spin_unlock(&apic->posted_irr_lock);
> >> +	}
> >> +}
> >> +
> >>  static inline int apic_find_highest_irr(struct kvm_lapic *apic)
> >>  {
> >>  	int result;
> >> @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic
> > *apic)
> >>  	if (!apic->irr_pending)
> >>  		return -1;
> >> +	kvm_apic_update_irr(apic);
> >>  	result = apic_search_irr(apic);
> >>  	ASSERT(result == -1 || result >= 16);
> >> @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int
> > delivery_mode,
> >>  		} else
> >>  			apic_clear_vector(vector, apic->regs + APIC_TMR);
> >> -		result = !apic_test_and_set_irr(vector, apic);
> >> +		if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) {
> >> +			spin_lock(&apic->posted_irr_lock);
> >> +			if (!apic_test_irr(vector, apic))
> >> +				result = !test_and_set_bit(vector,
> >> +					(unsigned long *)apic->posted_irr);
> >> +			if (result)
> >> +				apic->posted_irr_changed |= 1 << (vector >> 5);
> >> +			spin_unlock(&apic->posted_irr_lock);
> >> +		} else {
> >> +			result = !apic_test_and_set_irr(vector, apic);
> >> +		}
> >>  		trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 					 
> >>  trig_mode, vector, !result); 		if (!result) { @@ -1567,6 +1603,7 @@
> >>  int kvm_create_lapic(struct kvm_vcpu *vcpu)
> >>  	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset
> >>  */ 	kvm_lapic_reset(vcpu); 	kvm_iodevice_init(&apic->dev,
> >>  &apic_mmio_ops);
> >> +	spin_lock_init(&apic->posted_irr_lock);
> >> 
> >>  	return 0;
> >>  nomem_free_apic:
> >> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> >> index 1676d34..38bebc8 100644
> >> --- a/arch/x86/kvm/lapic.h
> >> +++ b/arch/x86/kvm/lapic.h
> >> @@ -20,6 +20,9 @@ struct kvm_lapic {
> >>  	u32 divide_count;
> >>  	struct kvm_vcpu *vcpu;
> >>  	bool irr_pending;
> >> +	u32 posted_irr[8];
> >> +	u8 posted_irr_changed;
> >> +	spinlock_t posted_irr_lock;
> >>  	/* Number of bits set in ISR. */
> >>  	s16 isr_count;
> >>  	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
> >> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> >> index c1b3041..c8e6036 100644
> >> --- a/arch/x86/kvm/vmx.c
> >> +++ b/arch/x86/kvm/vmx.c
> >> @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
> >>  static bool __read_mostly fasteoi = 1;
> >>  module_param(fasteoi, bool, S_IRUGO);
> >> -static bool __read_mostly enable_apicv_reg_vid;
> >> +static bool __read_mostly enable_apicv_reg_vid = 1;
> >> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
> >> 
> >>  /*
> >>   * If nested=1, nested virtualization is supported, i.e., guests may use
> >> --
> >> 1.7.1
> > 
> > --
> > 			Gleb.
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> 
> Best regards,
> Yang

--
			Gleb.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2013-02-26 12:54 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-02-26 12:34 [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery Yang Zhang
2013-02-26 12:41 ` Gleb Natapov
2013-02-26 12:48   ` Zhang, Yang Z
2013-02-26 12:54     ` Gleb Natapov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.