All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH stub] kvm: caching API for interrupts
@ 2012-07-29 20:00 Michael S. Tsirkin
  2012-07-29 20:03 ` Paolo Bonzini
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Michael S. Tsirkin @ 2012-07-29 20:00 UTC (permalink / raw)
  To: avi, kvm

I've been looking at adding caching for IRQs so that we don't need to
scan all VCPUs on each interrupt.  One issue I had a problem with, is
how the cache structure can be used from both a thread (to fill out the
cache) and interrupt (to actually send if cache is valid).

For now just added a lock field in the cache so we don't need to worry
about this, and with such a lock in place we don't have to worry about
RCU as cache can be invalidated simply under this lock.

For now this just declares the structure and updates the APIs
so it's not intended to be applied, but just to give you
the idea.

Comments, flames wellcome.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

--

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 96c158a..3384b39 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -214,11 +214,28 @@ static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memsl
 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 }
 
+struct kvm_irq_cache {
+	spinlock_t lock;
+	bool valid;
+	struct kvm_vcpu *dest;
+	/* For now we only cache lapic irqs */
+	struct kvm_lapic_irq irq;
+	/* Protected by kvm->irq_cache_lock */
+	struct list_head list;
+};
+
+int kvm_set_irq_and_cache(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+			  struct kvm_irq_cache *cache);
+int kvm_set_irq_cached(struct kvm *kvm, struct kvm_irq_cache *cache);
+void kvm_irq_cache_init(struct kvm *kvm, struct kvm_irq_cache *cache);
+void kvm_irq_cache_cleanup(struct kvm_irq_cache *cache);
+
 struct kvm_kernel_irq_routing_entry {
 	u32 gsi;
 	u32 type;
 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
-		   struct kvm *kvm, int irq_source_id, int level);
+		   struct kvm *kvm, int irq_source_id, int level,
+		   struct kvm_irq_cache *cache);
 	union {
 		struct {
 			unsigned irqchip;
@@ -304,6 +321,8 @@ struct kvm {
 	struct kvm_irq_routing_table __rcu *irq_routing;
 	struct hlist_head mask_notifier_list;
 	struct hlist_head irq_ack_notifier_list;
+	struct mutex irq_cache_lock;
+	struct list_head irq_cache_list;
 #endif
 
 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
@@ -617,7 +636,8 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 #endif
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
-		int irq_source_id, int level);
+		int irq_source_id, int level,
+		struct kvm_irq_cache *cache);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 7d7e2aa..9622076 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -138,7 +138,7 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
 		irq = rcu_dereference(irqfd->irq_entry);
 		/* An event has been signaled, inject an interrupt */
 		if (irq)
-			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, NULL);
 		else
 			schedule_work(&irqfd->inject);
 		rcu_read_unlock();
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 5afb431..d92f3d3 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -46,7 +46,8 @@ static inline int kvm_irq_line_state(unsigned long *irq_state,
 }
 
 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
-			   struct kvm *kvm, int irq_source_id, int level)
+			   struct kvm *kvm, int irq_source_id, int level,
+			   struct kvm_irq_cache *cache)
 {
 #ifdef CONFIG_X86
 	struct kvm_pic *pic = pic_irqchip(kvm);
@@ -59,7 +60,8 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
 }
 
 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
-			      struct kvm *kvm, int irq_source_id, int level)
+			      struct kvm *kvm, int irq_source_id, int level,
+			      struct kvm_irq_cache *cache)
 {
 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 	level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin],
@@ -115,7 +117,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
 }
 
 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
-		struct kvm *kvm, int irq_source_id, int level)
+		struct kvm *kvm, int irq_source_id, int level,
+		struct kvm_irq_cache *cache)
 {
 	struct kvm_lapic_irq irq;
 
@@ -149,7 +152,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
 	route.msi.address_hi = msi->address_hi;
 	route.msi.data = msi->data;
 
-	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, NULL);
 }
 
 /*
@@ -180,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
 
 	while(i--) {
 		int r;
-		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
+		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, NULL);
 		if (r < 0)
 			continue;
 

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH stub] kvm: caching API for interrupts
  2012-07-29 20:00 [PATCH stub] kvm: caching API for interrupts Michael S. Tsirkin
@ 2012-07-29 20:03 ` Paolo Bonzini
  2012-07-29 20:29   ` Michael S. Tsirkin
  2012-07-30  7:09 ` Gleb Natapov
  2012-07-30  8:34 ` Avi Kivity
  2 siblings, 1 reply; 6+ messages in thread
From: Paolo Bonzini @ 2012-07-29 20:03 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: avi, kvm

Il 29/07/2012 22:00, Michael S. Tsirkin ha scritto:
> I've been looking at adding caching for IRQs so that we don't need to
> scan all VCPUs on each interrupt.  One issue I had a problem with, is
> how the cache structure can be used from both a thread (to fill out the
> cache) and interrupt (to actually send if cache is valid).
> 
> For now just added a lock field in the cache so we don't need to worry
> about this, and with such a lock in place we don't have to worry about
> RCU as cache can be invalidated simply under this lock.

seqlock?

Paolo


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH stub] kvm: caching API for interrupts
  2012-07-29 20:03 ` Paolo Bonzini
@ 2012-07-29 20:29   ` Michael S. Tsirkin
  2012-07-30  6:57     ` Paolo Bonzini
  0 siblings, 1 reply; 6+ messages in thread
From: Michael S. Tsirkin @ 2012-07-29 20:29 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: avi, kvm

On Sun, Jul 29, 2012 at 10:03:45PM +0200, Paolo Bonzini wrote:
> Il 29/07/2012 22:00, Michael S. Tsirkin ha scritto:
> > I've been looking at adding caching for IRQs so that we don't need to
> > scan all VCPUs on each interrupt.  One issue I had a problem with, is
> > how the cache structure can be used from both a thread (to fill out the
> > cache) and interrupt (to actually send if cache is valid).
> > 
> > For now just added a lock field in the cache so we don't need to worry
> > about this, and with such a lock in place we don't have to worry about
> > RCU as cache can be invalidated simply under this lock.
> 
> seqlock?
> 
> Paolo

AFAIK seqlock only works if uses of stale data have no side-effects,
this is not the case here. We could use an rwlock I think.

-- 
MST

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH stub] kvm: caching API for interrupts
  2012-07-29 20:29   ` Michael S. Tsirkin
@ 2012-07-30  6:57     ` Paolo Bonzini
  0 siblings, 0 replies; 6+ messages in thread
From: Paolo Bonzini @ 2012-07-30  6:57 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: avi, kvm

Il 29/07/2012 22:29, Michael S. Tsirkin ha scritto:
> On Sun, Jul 29, 2012 at 10:03:45PM +0200, Paolo Bonzini wrote:
>> Il 29/07/2012 22:00, Michael S. Tsirkin ha scritto:
>>> I've been looking at adding caching for IRQs so that we don't need to
>>> scan all VCPUs on each interrupt.  One issue I had a problem with, is
>>> how the cache structure can be used from both a thread (to fill out the
>>> cache) and interrupt (to actually send if cache is valid).
>>>
>>> For now just added a lock field in the cache so we don't need to worry
>>> about this, and with such a lock in place we don't have to worry about
>>> RCU as cache can be invalidated simply under this lock.
>>
>> seqlock?
> 
> AFAIK seqlock only works if uses of stale data have no side-effects,
> this is not the case here. We could use an rwlock I think.

If you can inject an interrupt using stale data (I think so, that would
be a race in the guest between setting the route and getting the
interrupt) you can use seqlock to get a consistent copy of the entry:

  struct kvm_irq_cache ent, *list;

  rcu_read_lock();
  ...
  do {
    seq = read_seqcount_begin(&list->cnt);
    ent = *list;
  } while (!read_seqcount_retry(&list->cnt, seq));
  rcu_read_unlock();

Paolo

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH stub] kvm: caching API for interrupts
  2012-07-29 20:00 [PATCH stub] kvm: caching API for interrupts Michael S. Tsirkin
  2012-07-29 20:03 ` Paolo Bonzini
@ 2012-07-30  7:09 ` Gleb Natapov
  2012-07-30  8:34 ` Avi Kivity
  2 siblings, 0 replies; 6+ messages in thread
From: Gleb Natapov @ 2012-07-30  7:09 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: avi, kvm

On Sun, Jul 29, 2012 at 11:00:58PM +0300, Michael S. Tsirkin wrote:
> I've been looking at adding caching for IRQs so that we don't need to
> scan all VCPUs on each interrupt.  One issue I had a problem with, is
> how the cache structure can be used from both a thread (to fill out the
> cache) and interrupt (to actually send if cache is valid).
I do not get this. Cache should be fill out at the time of use.

> 
> For now just added a lock field in the cache so we don't need to worry
> about this, and with such a lock in place we don't have to worry about
> RCU as cache can be invalidated simply under this lock.
Cache should be invalidated at the time of use if irq routing changed.

> 
> For now this just declares the structure and updates the APIs
> so it's not intended to be applied, but just to give you
> the idea.
> 
> Comments, flames wellcome.
> 
There is no enough meat to discuss here I am afraid.

> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> 
> --
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 96c158a..3384b39 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -214,11 +214,28 @@ static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memsl
>  	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
>  }
>  
> +struct kvm_irq_cache {
> +	spinlock_t lock;
> +	bool valid;
> +	struct kvm_vcpu *dest;
> +	/* For now we only cache lapic irqs */
> +	struct kvm_lapic_irq irq;
> +	/* Protected by kvm->irq_cache_lock */
> +	struct list_head list;
Why are you storing them in the list? This should be embedded structure.

> +};
> +
> +int kvm_set_irq_and_cache(struct kvm *kvm, int irq_source_id, u32 irq, int level,
> +			  struct kvm_irq_cache *cache);
> +int kvm_set_irq_cached(struct kvm *kvm, struct kvm_irq_cache *cache);
> +void kvm_irq_cache_init(struct kvm *kvm, struct kvm_irq_cache *cache);
> +void kvm_irq_cache_cleanup(struct kvm_irq_cache *cache);
> +
>  struct kvm_kernel_irq_routing_entry {
>  	u32 gsi;
>  	u32 type;
>  	int (*set)(struct kvm_kernel_irq_routing_entry *e,
> -		   struct kvm *kvm, int irq_source_id, int level);
> +		   struct kvm *kvm, int irq_source_id, int level,
> +		   struct kvm_irq_cache *cache);
>  	union {
>  		struct {
>  			unsigned irqchip;
> @@ -304,6 +321,8 @@ struct kvm {
>  	struct kvm_irq_routing_table __rcu *irq_routing;
>  	struct hlist_head mask_notifier_list;
>  	struct hlist_head irq_ack_notifier_list;
> +	struct mutex irq_cache_lock;
> +	struct list_head irq_cache_list;
>  #endif
>  
>  #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> @@ -617,7 +636,8 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
>  #endif
>  int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
>  int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
> -		int irq_source_id, int level);
> +		int irq_source_id, int level,
> +		struct kvm_irq_cache *cache);
>  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
>  void kvm_register_irq_ack_notifier(struct kvm *kvm,
>  				   struct kvm_irq_ack_notifier *kian);
> diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
> index 7d7e2aa..9622076 100644
> --- a/virt/kvm/eventfd.c
> +++ b/virt/kvm/eventfd.c
> @@ -138,7 +138,7 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
>  		irq = rcu_dereference(irqfd->irq_entry);
>  		/* An event has been signaled, inject an interrupt */
>  		if (irq)
> -			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, NULL);
>  		else
>  			schedule_work(&irqfd->inject);
>  		rcu_read_unlock();
> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
> index 5afb431..d92f3d3 100644
> --- a/virt/kvm/irq_comm.c
> +++ b/virt/kvm/irq_comm.c
> @@ -46,7 +46,8 @@ static inline int kvm_irq_line_state(unsigned long *irq_state,
>  }
>  
>  static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
> -			   struct kvm *kvm, int irq_source_id, int level)
> +			   struct kvm *kvm, int irq_source_id, int level,
> +			   struct kvm_irq_cache *cache)
>  {
>  #ifdef CONFIG_X86
>  	struct kvm_pic *pic = pic_irqchip(kvm);
> @@ -59,7 +60,8 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
>  }
>  
>  static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
> -			      struct kvm *kvm, int irq_source_id, int level)
> +			      struct kvm *kvm, int irq_source_id, int level,
> +			      struct kvm_irq_cache *cache)
>  {
>  	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
>  	level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin],
> @@ -115,7 +117,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
>  }
>  
>  int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
> -		struct kvm *kvm, int irq_source_id, int level)
> +		struct kvm *kvm, int irq_source_id, int level,
> +		struct kvm_irq_cache *cache)
>  {
>  	struct kvm_lapic_irq irq;
>  
> @@ -149,7 +152,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
>  	route.msi.address_hi = msi->address_hi;
>  	route.msi.data = msi->data;
>  
> -	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, NULL);
>  }
>  
>  /*
> @@ -180,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
>  
>  	while(i--) {
>  		int r;
> -		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
> +		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, NULL);
>  		if (r < 0)
>  			continue;
>  
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
			Gleb.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH stub] kvm: caching API for interrupts
  2012-07-29 20:00 [PATCH stub] kvm: caching API for interrupts Michael S. Tsirkin
  2012-07-29 20:03 ` Paolo Bonzini
  2012-07-30  7:09 ` Gleb Natapov
@ 2012-07-30  8:34 ` Avi Kivity
  2 siblings, 0 replies; 6+ messages in thread
From: Avi Kivity @ 2012-07-30  8:34 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: kvm

On 07/29/2012 11:00 PM, Michael S. Tsirkin wrote:
> I've been looking at adding caching for IRQs so that we don't need to
> scan all VCPUs on each interrupt.  One issue I had a problem with, is
> how the cache structure can be used from both a thread (to fill out the
> cache) and interrupt (to actually send if cache is valid).
> 
> For now just added a lock field in the cache so we don't need to worry
> about this, and with such a lock in place we don't have to worry about
> RCU as cache can be invalidated simply under this lock.
> 
> For now this just declares the structure and updates the APIs
> so it's not intended to be applied, but just to give you
> the idea.
> 
> Comments, flames wellcome.

I hope you aren't expecting any of the latter.

> 
> +struct kvm_irq_cache {
> +	spinlock_t lock;
> +	bool valid;
> +	struct kvm_vcpu *dest;
> +	/* For now we only cache lapic irqs */
> +	struct kvm_lapic_irq irq;
> +	/* Protected by kvm->irq_cache_lock */
> +	struct list_head list;
> +};
> +

Why an external structure?

Why not add something to kvm_kernel_irq_routing_entry?  It is already
protected by rcu.

The atomic context code could look like this:


  if (kire->cache.valid) {
      kvm_apic_set_irq(kire->cache.vcpu, &kire->cache.irq);
      return;
  }
  queue_work(process_irq_slowpath);

The slow path tries to fills the cache, and processes the interrupt in
any case.

-- 
error compiling committee.c: too many arguments to function



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2012-07-30  8:34 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-07-29 20:00 [PATCH stub] kvm: caching API for interrupts Michael S. Tsirkin
2012-07-29 20:03 ` Paolo Bonzini
2012-07-29 20:29   ` Michael S. Tsirkin
2012-07-30  6:57     ` Paolo Bonzini
2012-07-30  7:09 ` Gleb Natapov
2012-07-30  8:34 ` Avi Kivity

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.