kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/7][v5] GSI route layer for MSI/MSI-X
@ 2009-01-08 10:45 Sheng Yang
  2009-01-08 10:45 ` [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
                   ` (6 more replies)
  0 siblings, 7 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm

Update from v4:
Addressed Marcelo's comments on the first patch, as well as separate bitmap
optimize patchset, which would be posted after this one is checked in.

--
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  2009-01-08 14:20   ` Marcelo Tosatti
  2009-01-08 15:08   ` Avi Kivity
  2009-01-08 10:45 ` [PATCH 2/7] KVM: Using gsi route for MSI device assignment Sheng Yang
                   ` (5 subsequent siblings)
  6 siblings, 2 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang

Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including
MSI. So here is it.

struct gsi_route_entry is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to
MSI/MSI-X message address/data. And the struct can also be extended for other
purpose.

Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by kernel and
provide two ioctls to userspace, which is more flexiable.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm.h      |   26 +++++++++++
 include/linux/kvm_host.h |   20 +++++++++
 virt/kvm/irq_comm.c      |   70 ++++++++++++++++++++++++++++++
 virt/kvm/kvm_main.c      |  105 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 221 insertions(+), 0 deletions(-)

diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 71c150f..bbefce6 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -399,6 +399,9 @@ struct kvm_trace_rec {
 #if defined(CONFIG_X86)
 #define KVM_CAP_REINJECT_CONTROL 24
 #endif
+#if defined(CONFIG_X86)
+#define KVM_CAP_GSI_ROUTE 25
+#endif
 
 /*
  * ioctls for VM fds
@@ -433,6 +436,8 @@ struct kvm_trace_rec {
 #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
 			    struct kvm_assigned_irq)
 #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
+#define KVM_REQUEST_GSI_ROUTE	  _IOWR(KVMIO, 0x72, void *)
+#define KVM_FREE_GSI_ROUTE	  _IOR(KVMIO, 0x73, void *)
 
 /*
  * ioctls for vcpu fds
@@ -553,4 +558,25 @@ struct kvm_assigned_irq {
 #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
 #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
 
+struct kvm_gsi_route_guest {
+	__u32 entries_nr;
+	struct kvm_gsi_route_entry_guest *entries;
+};
+
+#define KVM_GSI_ROUTE_MSI	(1 << 0)
+struct kvm_gsi_route_entry_guest {
+	__u32 gsi;
+	__u32 type;
+	__u32 flags;
+	__u32 reserved;
+	union {
+		struct {
+			__u32 addr_lo;
+			__u32 addr_hi;
+			__u32 data;
+		} msi;
+		__u32 padding[8];
+	};
+};
+
 #endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a8bcad0..6a00201 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -136,6 +136,9 @@ struct kvm {
 	unsigned long mmu_notifier_seq;
 	long mmu_notifier_count;
 #endif
+	struct hlist_head gsi_route_list;
+#define KVM_NR_GSI_ROUTE_ENTRIES    256
+	DECLARE_BITMAP(gsi_route_bitmap, KVM_NR_GSI_ROUTE_ENTRIES);
 };
 
 /* The guest did something we don't support. */
@@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 				      struct kvm_irq_mask_notifier *kimn);
 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
 
+#define KVM_GSI_ROUTE_MASK    0x1000000ull
+struct kvm_gsi_route_entry {
+	u32 gsi;
+	u32 type;
+	u32 flags;
+	u32 reserved;
+	union {
+		struct msi_msg msi;
+		u32 reserved[8];
+	};
+	struct hlist_node link;
+};
+
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -343,6 +359,10 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
 int kvm_request_irq_source_id(struct kvm *kvm);
 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
+struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi);
+void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
+void kvm_free_gsi_route_list(struct kvm *kvm);
 
 #ifdef CONFIG_DMAR
 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 5162a41..64ca4cf 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
 			kimn->func(kimn, mask);
 }
 
+int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
+{
+	struct kvm_gsi_route_entry *found_entry, *new_entry;
+	int r, gsi;
+
+	mutex_lock(&kvm->lock);
+	/* Find whether we need a update or a new entry */
+	found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
+	if (found_entry)
+		*found_entry = *entry;
+	else {
+		gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
+					  KVM_NR_GSI_ROUTE_ENTRIES);
+		if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
+			r = -ENOSPC;
+			goto out;
+		}
+		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+		if (!new_entry) {
+			r = -ENOMEM;
+			goto out;
+		}
+		*new_entry = *entry;
+		entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
+		__set_bit(gsi, kvm->gsi_route_bitmap);
+		hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
+	}
+	r = 0;
+out:
+	mutex_unlock(&kvm->lock);
+	return r;
+}
+
+/* Call with kvm->lock hold */
+struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi)
+{
+	struct kvm_gsi_route_entry *entry;
+	struct hlist_node *n;
+
+	if (!(gsi & KVM_GSI_ROUTE_MASK))
+		return NULL;
+	hlist_for_each_entry(entry, n, &kvm->gsi_route_list, link)
+		if (entry->gsi == gsi)
+			goto out;
+	entry = NULL;
+out:
+	return entry;
+}
+
+/* Call with kvm->lock hold */
+void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
+{
+	if (!entry)
+		return;
+	__clear_bit(entry->gsi & ~KVM_GSI_ROUTE_MASK, kvm->gsi_route_bitmap);
+	hlist_del(&entry->link);
+	kfree(entry);
+}
+
+void kvm_free_gsi_route_list(struct kvm *kvm)
+{
+	struct kvm_gsi_route_entry *entry;
+	struct hlist_node *pos, *n;
+
+	mutex_lock(&kvm->lock);
+	hlist_for_each_entry_safe(entry, pos, n, &kvm->gsi_route_list, link)
+		kvm_free_gsi_route(kvm, entry);
+	mutex_unlock(&kvm->lock);
+}
+
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 61688a6..8aa939c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -839,6 +839,7 @@ static struct kvm *kvm_create_vm(void)
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 	kvm_coalesced_mmio_init(kvm);
 #endif
+	INIT_HLIST_HEAD(&kvm->gsi_route_list);
 out:
 	return kvm;
 }
@@ -877,6 +878,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
 	struct mm_struct *mm = kvm->mm;
 
 	kvm_arch_sync_events(kvm);
+	kvm_free_gsi_route_list(kvm);
 	spin_lock(&kvm_lock);
 	list_del(&kvm->vm_list);
 	spin_unlock(&kvm_lock);
@@ -1605,6 +1607,45 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
 	return 0;
 }
 
+static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm,
+			struct kvm_gsi_route_guest *gsi_route,
+			struct kvm_gsi_route_entry_guest *guest_entries)
+{
+	struct kvm_gsi_route_entry entry;
+	int r, i;
+
+	for (i = 0; i < gsi_route->entries_nr; i++) {
+		memcpy(&entry, &guest_entries[i], sizeof(entry));
+		r = kvm_update_gsi_route(kvm, &entry);
+		if (r == 0)
+			guest_entries[i].gsi = entry.gsi;
+		else
+			break;
+	}
+	return r;
+}
+
+static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm,
+			struct kvm_gsi_route_guest *gsi_route,
+			struct kvm_gsi_route_entry_guest *guest_entries)
+{
+	struct kvm_gsi_route_entry *entry;
+	int r, i;
+
+	mutex_lock(&kvm->lock);
+	for (i = 0; i < gsi_route->entries_nr; i++) {
+		entry = kvm_find_gsi_route_entry(kvm, guest_entries[i].gsi);
+		if (!entry) {
+			r = -EINVAL;
+			goto out;
+		}
+		kvm_free_gsi_route(kvm, entry);
+	}
+out:
+	mutex_unlock(&kvm->lock);
+	return r;
+}
+
 static long kvm_vcpu_ioctl(struct file *filp,
 			   unsigned int ioctl, unsigned long arg)
 {
@@ -1803,6 +1844,7 @@ static long kvm_vm_ioctl(struct file *filp,
 {
 	struct kvm *kvm = filp->private_data;
 	void __user *argp = (void __user *)arg;
+	struct kvm_gsi_route_entry_guest *gsi_entries = NULL;
 	int r;
 
 	if (kvm->mm != current->mm)
@@ -1887,10 +1929,73 @@ static long kvm_vm_ioctl(struct file *filp,
 		break;
 	}
 #endif
+	case KVM_REQUEST_GSI_ROUTE: {
+		struct kvm_gsi_route_guest gsi_route;
+		r = -EFAULT;
+		if (copy_from_user(&gsi_route, argp, sizeof gsi_route))
+			goto out;
+		if (gsi_route.entries_nr == 0 ||
+			    gsi_route.entries_nr >= KVM_NR_GSI_ROUTE_ENTRIES) {
+			r = -EFAULT;
+			goto out;
+		}
+		gsi_entries = vmalloc(gsi_route.entries_nr *
+				      sizeof(struct kvm_gsi_route_entry_guest));
+		if (!gsi_entries) {
+			r = -ENOMEM;
+			goto out;
+		}
+		r = -EFAULT;
+		if (copy_from_user(gsi_entries,
+				   (void __user *)gsi_route.entries,
+				   gsi_route.entries_nr *
+				   sizeof(struct kvm_gsi_route_entry_guest)))
+			goto out;
+		r = kvm_vm_ioctl_request_gsi_route(kvm, &gsi_route,
+						   gsi_entries);
+		if (r)
+			goto out;
+		r = -EFAULT;
+		if (copy_to_user((void __user *)gsi_route.entries,
+				gsi_entries,
+				gsi_route.entries_nr *
+				sizeof(struct kvm_gsi_route_entry_guest)))
+			goto out;
+		r = 0;
+		break;
+	}
+	case KVM_FREE_GSI_ROUTE: {
+		struct kvm_gsi_route_guest gsi_route;
+		r = -EFAULT;
+		if (copy_from_user(&gsi_route, argp, sizeof gsi_route))
+			goto out;
+		if (gsi_route.entries_nr == 0 ||
+			    gsi_route.entries_nr >= KVM_NR_GSI_ROUTE_ENTRIES) {
+			r = -EFAULT;
+			goto out;
+		}
+		gsi_entries = vmalloc(gsi_route.entries_nr *
+				      sizeof(struct kvm_gsi_route_entry_guest));
+		if (!gsi_entries) {
+			r = -ENOMEM;
+			goto out;
+		}
+		r = -EFAULT;
+		if (copy_from_user(gsi_entries,
+				   (void __user *)gsi_route.entries,
+				   gsi_route.entries_nr *
+				   sizeof(struct kvm_gsi_route_entry_guest)))
+			goto out;
+		r = kvm_vm_ioctl_free_gsi_route(kvm, &gsi_route, gsi_entries);
+		if (r)
+			goto out;
+		break;
+	}
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 	}
 out:
+	vfree(gsi_entries);
 	return r;
 }
 
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 2/7] KVM: Using gsi route for MSI device assignment
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
  2009-01-08 10:45 ` [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  2009-01-08 15:11   ` Avi Kivity
  2009-01-08 10:45 ` [PATCH 3/7] KVM: Improve MSI dispatch function Sheng Yang
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang

Convert MSI userspace interface to support gsi_msg mapping(and nobody should
be the user of the old interface...).

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    1 -
 virt/kvm/kvm_main.c      |   79 ++++++++++++++++++++++++++--------------------
 2 files changed, 45 insertions(+), 35 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6a00201..eab9588 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -316,7 +316,6 @@ struct kvm_assigned_dev_kernel {
 	int host_irq;
 	bool host_irq_disabled;
 	int guest_irq;
-	struct msi_msg guest_msi;
 #define KVM_ASSIGNED_DEV_GUEST_INTX	(1 << 0)
 #define KVM_ASSIGNED_DEV_GUEST_MSI	(1 << 1)
 #define KVM_ASSIGNED_DEV_HOST_INTX	(1 << 8)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8aa939c..3bbb59f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -92,44 +92,56 @@ static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
 	struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
-	int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-	int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-	int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
-				(unsigned long *)&dev->guest_msi.address_lo);
-	int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
-				(unsigned long *)&dev->guest_msi.data);
-	int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
-				(unsigned long *)&dev->guest_msi.data);
+	struct kvm_gsi_route_entry *gsi_entry;
+	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
 	u32 deliver_bitmask;
 
 	BUG_ON(!ioapic);
 
-	deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, dev->guest_irq);
+	if (!gsi_entry) {
+		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
+		return;
+	}
+
+	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
+		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
+			>> MSI_ADDR_DEST_ID_SHIFT;
+		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
+			>> MSI_DATA_VECTOR_SHIFT;
+		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.address_lo);
+		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
 				dest_id, dest_mode);
-	/* IOAPIC delivery mode value is the same as MSI here */
-	switch (delivery_mode) {
-	case IOAPIC_LOWEST_PRIORITY:
-		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-				deliver_bitmask);
-		if (vcpu != NULL)
-			kvm_apic_set_irq(vcpu, vector, trig_mode);
-		else
-			printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
-		break;
-	case IOAPIC_FIXED:
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu)
+		/* IOAPIC delivery mode value is the same as MSI here */
+		switch (delivery_mode) {
+		case IOAPIC_LOWEST_PRIORITY:
+			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+					deliver_bitmask);
+			if (vcpu != NULL)
 				kvm_apic_set_irq(vcpu, vector, trig_mode);
+			else
+				printk(KERN_INFO
+				       "kvm: null lowest priority vcpu!\n");
+			break;
+		case IOAPIC_FIXED:
+			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+				if (!(deliver_bitmask & (1 << vcpu_id)))
+					continue;
+				deliver_bitmask &= ~(1 << vcpu_id);
+				vcpu = ioapic->kvm->vcpus[vcpu_id];
+				if (vcpu)
+					kvm_apic_set_irq(vcpu, vector,
+							trig_mode);
+			}
+			break;
+		default:
+			break;
 		}
-		break;
-	default:
-		printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
 	}
 }
 #else
@@ -331,17 +343,16 @@ static int assigned_device_update_msi(struct kvm *kvm,
 {
 	int r;
 
+	adev->guest_irq = airq->guest_irq;
+
 	if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
 		/* x86 don't care upper address of guest msi message addr */
 		adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
 		adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
-		adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
-		adev->guest_msi.data = airq->guest_msi.data;
 		adev->ack_notifier.gsi = -1;
 	} else if (msi2intx) {
 		adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
 		adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
-		adev->guest_irq = airq->guest_irq;
 		adev->ack_notifier.gsi = airq->guest_irq;
 	} else {
 		/*
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 3/7] KVM: Improve MSI dispatch function
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
  2009-01-08 10:45 ` [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
  2009-01-08 10:45 ` [PATCH 2/7] KVM: Using gsi route for MSI device assignment Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  2009-01-08 10:45 ` [PATCH 4/7] KVM: Using ioapic_irqchip() macro for kvm_set_irq Sheng Yang
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang

Prepare to merge with kvm_set_irq().

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 virt/kvm/kvm_main.c |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3bbb59f..fb2d639 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -87,7 +87,7 @@ static bool kvm_rebooting;
 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
 
 #ifdef CONFIG_X86
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi)
 {
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
@@ -98,7 +98,7 @@ static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
 
 	BUG_ON(!ioapic);
 
-	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, dev->guest_irq);
+	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, gsi);
 	if (!gsi_entry) {
 		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
 		return;
@@ -145,7 +145,7 @@ static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
 	}
 }
 #else
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi) {}
 #endif
 
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -180,7 +180,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
 			    assigned_dev->guest_irq, 1);
 	else if (assigned_dev->irq_requested_type &
 				KVM_ASSIGNED_DEV_GUEST_MSI) {
-		assigned_device_msi_dispatch(assigned_dev);
+		assigned_device_msi_dispatch(assigned_dev, assigned_dev->guest_irq);
 		enable_irq(assigned_dev->host_irq);
 		assigned_dev->host_irq_disabled = false;
 	}
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 4/7] KVM: Using ioapic_irqchip() macro for kvm_set_irq
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
                   ` (2 preceding siblings ...)
  2009-01-08 10:45 ` [PATCH 3/7] KVM: Improve MSI dispatch function Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  2009-01-08 10:45 ` [PATCH 5/7] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang


Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 virt/kvm/irq_comm.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 64ca4cf..f8b22a1 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -39,7 +39,7 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
 	 * IOAPIC.  So set the bit in both. The guest will ignore
 	 * writes to the unused one.
 	 */
-	kvm_ioapic_set_irq(kvm->arch.vioapic, irq, !!(*irq_state));
+	kvm_ioapic_set_irq(ioapic_irqchip(kvm), irq, !!(*irq_state));
 #ifdef CONFIG_X86
 	kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state));
 #endif
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 5/7] KVM: Merge MSI handling to kvm_set_irq
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
                   ` (3 preceding siblings ...)
  2009-01-08 10:45 ` [PATCH 4/7] KVM: Using ioapic_irqchip() macro for kvm_set_irq Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  2009-01-08 10:45 ` [PATCH 6/7] KVM: Split IOAPIC structure Sheng Yang
  2009-01-08 10:45 ` [PATCH 7/7] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang
  6 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang

Using kvm_set_irq to handle all interrupt injection.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    2 +-
 virt/kvm/irq_comm.c      |   79 +++++++++++++++++++++++++++++++++++++++++++--
 virt/kvm/kvm_main.c      |   79 +++-------------------------------------------
 3 files changed, 81 insertions(+), 79 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eab9588..bfdaab9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -351,7 +351,7 @@ struct kvm_gsi_route_entry {
 	struct hlist_node link;
 };
 
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
+void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index f8b22a1..e6ce472 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -24,10 +24,81 @@
 
 #include "ioapic.h"
 
+#ifdef CONFIG_X86
+#include <asm/msidef.h>
+#endif
+
+static void gsi_dispatch(struct kvm *kvm, u32 gsi)
+{
+	int vcpu_id;
+	struct kvm_vcpu *vcpu;
+	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
+	struct kvm_gsi_route_entry *gsi_entry;
+	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
+	u32 deliver_bitmask;
+
+	BUG_ON(!ioapic);
+
+	gsi_entry = kvm_find_gsi_route_entry(kvm, gsi);
+	if (!gsi_entry) {
+		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
+		return;
+	}
+
+#ifdef CONFIG_X86
+	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
+		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
+			>> MSI_ADDR_DEST_ID_SHIFT;
+		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
+			>> MSI_DATA_VECTOR_SHIFT;
+		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.address_lo);
+		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+				dest_id, dest_mode);
+		/* IOAPIC delivery mode value is the same as MSI here */
+		switch (delivery_mode) {
+		case IOAPIC_LOWEST_PRIORITY:
+			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+					deliver_bitmask);
+			if (vcpu != NULL)
+				kvm_apic_set_irq(vcpu, vector, trig_mode);
+			else
+				printk(KERN_INFO
+				       "kvm: null lowest priority vcpu!\n");
+			break;
+		case IOAPIC_FIXED:
+			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+				if (!(deliver_bitmask & (1 << vcpu_id)))
+					continue;
+				deliver_bitmask &= ~(1 << vcpu_id);
+				vcpu = ioapic->kvm->vcpus[vcpu_id];
+				if (vcpu)
+					kvm_apic_set_irq(vcpu, vector,
+							trig_mode);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+#endif /* CONFIG_X86 */
+}
+
 /* This should be called with the kvm->lock mutex held */
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
+void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level)
 {
-	unsigned long *irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
+	unsigned long *irq_state;
+
+	if (gsi & KVM_GSI_ROUTE_MASK) {
+		gsi_dispatch(kvm, gsi);
+		return;
+	}
+
+	irq_state = (unsigned long *)&kvm->arch.irq_states[gsi];
 
 	/* Logical OR for level trig interrupt */
 	if (level)
@@ -39,9 +110,9 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
 	 * IOAPIC.  So set the bit in both. The guest will ignore
 	 * writes to the unused one.
 	 */
-	kvm_ioapic_set_irq(ioapic_irqchip(kvm), irq, !!(*irq_state));
+	kvm_ioapic_set_irq(ioapic_irqchip(kvm), gsi, !!(*irq_state));
 #ifdef CONFIG_X86
-	kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state));
+	kvm_pic_set_irq(pic_irqchip(kvm), gsi, !!(*irq_state));
 #endif
 }
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fb2d639..d293714 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -47,10 +47,6 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 
-#ifdef CONFIG_X86
-#include <asm/msidef.h>
-#endif
-
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 #include "coalesced_mmio.h"
 #endif
@@ -85,69 +81,6 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 static bool kvm_rebooting;
 
 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
-
-#ifdef CONFIG_X86
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi)
-{
-	int vcpu_id;
-	struct kvm_vcpu *vcpu;
-	struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
-	struct kvm_gsi_route_entry *gsi_entry;
-	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
-	u32 deliver_bitmask;
-
-	BUG_ON(!ioapic);
-
-	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, gsi);
-	if (!gsi_entry) {
-		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
-		return;
-	}
-
-	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
-		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
-				(unsigned long *)&gsi_entry->msi.address_lo);
-		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
-				(unsigned long *)&gsi_entry->msi.data);
-		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
-				(unsigned long *)&gsi_entry->msi.data);
-		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
-				dest_id, dest_mode);
-		/* IOAPIC delivery mode value is the same as MSI here */
-		switch (delivery_mode) {
-		case IOAPIC_LOWEST_PRIORITY:
-			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-					deliver_bitmask);
-			if (vcpu != NULL)
-				kvm_apic_set_irq(vcpu, vector, trig_mode);
-			else
-				printk(KERN_INFO
-				       "kvm: null lowest priority vcpu!\n");
-			break;
-		case IOAPIC_FIXED:
-			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-				if (!(deliver_bitmask & (1 << vcpu_id)))
-					continue;
-				deliver_bitmask &= ~(1 << vcpu_id);
-				vcpu = ioapic->kvm->vcpus[vcpu_id];
-				if (vcpu)
-					kvm_apic_set_irq(vcpu, vector,
-							trig_mode);
-			}
-			break;
-		default:
-			break;
-		}
-	}
-}
-#else
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi) {}
-#endif
-
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
 						      int assigned_dev_id)
 {
@@ -174,13 +107,11 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
 	 * finer-grained lock, update this
 	 */
 	mutex_lock(&assigned_dev->kvm->lock);
-	if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
-		kvm_set_irq(assigned_dev->kvm,
-			    assigned_dev->irq_source_id,
-			    assigned_dev->guest_irq, 1);
-	else if (assigned_dev->irq_requested_type &
-				KVM_ASSIGNED_DEV_GUEST_MSI) {
-		assigned_device_msi_dispatch(assigned_dev, assigned_dev->guest_irq);
+
+	kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
+			assigned_dev->guest_irq, 1);
+
+	if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI) {
 		enable_irq(assigned_dev->host_irq);
 		assigned_dev->host_irq_disabled = false;
 	}
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 6/7] KVM: Split IOAPIC structure
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
                   ` (4 preceding siblings ...)
  2009-01-08 10:45 ` [PATCH 5/7] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  2009-01-08 15:27   ` Marcelo Tosatti
  2009-01-08 10:45 ` [PATCH 7/7] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang
  6 siblings, 1 reply; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang

Prepared for reuse ioapic_redir_entry for MSI.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_types.h |   17 +++++++++++++++++
 virt/kvm/ioapic.c         |    6 +++---
 virt/kvm/ioapic.h         |   17 +----------------
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 5f4a18c..46e3d8d 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -52,4 +52,21 @@ struct kvm_pio_request {
 	int rep;
 };
 
+union kvm_ioapic_redirect_entry {
+	u64 bits;
+	struct {
+		u8 vector;
+		u8 delivery_mode:3;
+		u8 dest_mode:1;
+		u8 delivery_status:1;
+		u8 polarity:1;
+		u8 remote_irr:1;
+		u8 trig_mode:1;
+		u8 mask:1;
+		u8 reserve:7;
+		u8 reserved[4];
+		u8 dest_id;
+	} fields;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index e85a2bc..b6530e9 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -85,7 +85,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 
 static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
 {
-	union ioapic_redir_entry *pent;
+	union kvm_ioapic_redirect_entry *pent;
 
 	pent = &ioapic->redirtbl[idx];
 
@@ -277,7 +277,7 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
 {
 	u32 old_irr = ioapic->irr;
 	u32 mask = 1 << irq;
-	union ioapic_redir_entry entry;
+	union kvm_ioapic_redirect_entry entry;
 
 	if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
 		entry = ioapic->redirtbl[irq];
@@ -296,7 +296,7 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
 static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi,
 				    int trigger_mode)
 {
-	union ioapic_redir_entry *ent;
+	union kvm_ioapic_redirect_entry *ent;
 
 	ent = &ioapic->redirtbl[gsi];
 
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 49c9581..ee5b0bd 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -40,22 +40,7 @@ struct kvm_ioapic {
 	u32 id;
 	u32 irr;
 	u32 pad;
-	union ioapic_redir_entry {
-		u64 bits;
-		struct {
-			u8 vector;
-			u8 delivery_mode:3;
-			u8 dest_mode:1;
-			u8 delivery_status:1;
-			u8 polarity:1;
-			u8 remote_irr:1;
-			u8 trig_mode:1;
-			u8 mask:1;
-			u8 reserve:7;
-			u8 reserved[4];
-			u8 dest_id;
-		} fields;
-	} redirtbl[IOAPIC_NUM_PINS];
+	union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS];
 	struct kvm_io_device dev;
 	struct kvm *kvm;
 	void (*ack_notifier)(void *opaque, int irq);
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 7/7] KVM: Unified the delivery of IOAPIC and MSI
  2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
                   ` (5 preceding siblings ...)
  2009-01-08 10:45 ` [PATCH 6/7] KVM: Split IOAPIC structure Sheng Yang
@ 2009-01-08 10:45 ` Sheng Yang
  6 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-08 10:45 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang


Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    3 ++
 virt/kvm/ioapic.c        |   84 ++++++++++++++++----------------------------
 virt/kvm/irq_comm.c      |   86 ++++++++++++++++++++++++++++------------------
 3 files changed, 86 insertions(+), 87 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bfdaab9..2736dbf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -351,6 +351,9 @@ struct kvm_gsi_route_entry {
 	struct hlist_node link;
 };
 
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+				   union kvm_ioapic_redirect_entry *entry,
+				   u32 *deliver_bitmask);
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index b6530e9..951df12 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -200,75 +200,53 @@ u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 {
-	u8 dest = ioapic->redirtbl[irq].fields.dest_id;
-	u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
-	u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
-	u8 vector = ioapic->redirtbl[irq].fields.vector;
-	u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
+	union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
 	u32 deliver_bitmask;
 	struct kvm_vcpu *vcpu;
 	int vcpu_id, r = 0;
 
 	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
 		     "vector=%x trig_mode=%x\n",
-		     dest, dest_mode, delivery_mode, vector, trig_mode);
+		     entry.fields.dest, entry.fields.dest_mode,
+		     entry.fields.delivery_mode, entry.fields.vector,
+		     entry.fields.trig_mode);
 
-	deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest,
-							  dest_mode);
+	kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
 	if (!deliver_bitmask) {
 		ioapic_debug("no target on destination\n");
 		return 0;
 	}
 
-	switch (delivery_mode) {
-	case IOAPIC_LOWEST_PRIORITY:
-		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-				deliver_bitmask);
+	/* Always delivery PIT interrupt to vcpu 0 */
 #ifdef CONFIG_X86
-		if (irq == 0)
-			vcpu = ioapic->kvm->vcpus[0];
+	if (irq == 0)
+		deliver_bitmask = 1 << 0;
 #endif
-		if (vcpu != NULL)
-			r = ioapic_inj_irq(ioapic, vcpu, vector,
-				       trig_mode, delivery_mode);
-		else
-			ioapic_debug("null lowest prio vcpu: "
-				     "mask=%x vector=%x delivery_mode=%x\n",
-				     deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
-		break;
-	case IOAPIC_FIXED:
-#ifdef CONFIG_X86
-		if (irq == 0)
-			deliver_bitmask = 1;
-#endif
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu) {
-				r = ioapic_inj_irq(ioapic, vcpu, vector,
-					       trig_mode, delivery_mode);
-			}
-		}
-		break;
-	case IOAPIC_NMI:
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu)
+
+	for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+		if (!(deliver_bitmask & (1 << vcpu_id)))
+			continue;
+		deliver_bitmask &= ~(1 << vcpu_id);
+		vcpu = ioapic->kvm->vcpus[vcpu_id];
+		if (vcpu) {
+			if (entry.fields.delivery_mode ==
+					IOAPIC_LOWEST_PRIORITY ||
+			    entry.fields.delivery_mode == IOAPIC_FIXED)
+				r = ioapic_inj_irq(ioapic, vcpu,
+						   entry.fields.vector,
+						   entry.fields.trig_mode,
+						   entry.fields.delivery_mode);
+			else if (entry.fields.delivery_mode == IOAPIC_NMI)
 				ioapic_inj_nmi(vcpu);
 			else
-				ioapic_debug("NMI to vcpu %d failed\n",
-						vcpu->vcpu_id);
-		}
-		break;
-	default:
-		printk(KERN_WARNING "Unsupported delivery mode %d\n",
-		       delivery_mode);
-		break;
+				ioapic_debug("unsupported delivery mode %x!\n",
+					     entry.fields.delivery_mode);
+		} else
+			ioapic_debug("null destination vcpu: "
+				     "mask=%x vector=%x delivery_mode=%x\n",
+				     entry.fields.deliver_bitmask,
+				     entry.fields.vector,
+				     entry.fields.delivery_mode);
 	}
 	return r;
 }
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index e6ce472..6f9b51e 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -28,13 +28,39 @@
 #include <asm/msidef.h>
 #endif
 
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+				   union kvm_ioapic_redirect_entry *entry,
+				   u32 *deliver_bitmask)
+{
+	struct kvm_vcpu *vcpu;
+
+	*deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+				entry->fields.dest_id, entry->fields.dest_mode);
+	switch (entry->fields.delivery_mode) {
+	case IOAPIC_LOWEST_PRIORITY:
+		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
+				entry->fields.vector, *deliver_bitmask);
+		*deliver_bitmask = 1 << vcpu->vcpu_id;
+		break;
+	case IOAPIC_FIXED:
+	case IOAPIC_NMI:
+		break;
+	default:
+		if (printk_ratelimit())
+			printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
+				entry->fields.delivery_mode);
+		*deliver_bitmask = 0;
+	}
+}
+
+
 static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 {
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
 	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
 	struct kvm_gsi_route_entry *gsi_entry;
-	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
+	union kvm_ioapic_redirect_entry entry;
 	u32 deliver_bitmask;
 
 	BUG_ON(!ioapic);
@@ -47,42 +73,34 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 
 #ifdef CONFIG_X86
 	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
-		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+		entry.bits = 0;
+		entry.fields.dest_id = (gsi_entry->msi.address_lo &
+			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+		entry.fields.vector = (gsi_entry->msi.data &
+			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+		entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.address_lo);
-		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+		entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
-		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+		entry.fields.delivery_mode = test_bit(
+				MSI_DATA_DELIVERY_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
-		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
-				dest_id, dest_mode);
-		/* IOAPIC delivery mode value is the same as MSI here */
-		switch (delivery_mode) {
-		case IOAPIC_LOWEST_PRIORITY:
-			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-					deliver_bitmask);
-			if (vcpu != NULL)
-				kvm_apic_set_irq(vcpu, vector, trig_mode);
-			else
-				printk(KERN_INFO
-				       "kvm: null lowest priority vcpu!\n");
-			break;
-		case IOAPIC_FIXED:
-			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-				if (!(deliver_bitmask & (1 << vcpu_id)))
-					continue;
-				deliver_bitmask &= ~(1 << vcpu_id);
-				vcpu = ioapic->kvm->vcpus[vcpu_id];
-				if (vcpu)
-					kvm_apic_set_irq(vcpu, vector,
-							trig_mode);
-			}
-			break;
-		default:
-			break;
+
+		kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
+
+		if (!deliver_bitmask) {
+			printk(KERN_WARNING
+				"kvm: no destination for MSI delivery!");
+			return;
+		}
+		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+			if (!(deliver_bitmask & (1 << vcpu_id)))
+				continue;
+			deliver_bitmask &= ~(1 << vcpu_id);
+			vcpu = ioapic->kvm->vcpus[vcpu_id];
+			if (vcpu)
+				kvm_apic_set_irq(vcpu, entry.fields.vector,
+						entry.fields.trig_mode);
 		}
 	}
 #endif /* CONFIG_X86 */
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-08 10:45 ` [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
@ 2009-01-08 14:20   ` Marcelo Tosatti
  2009-01-09  1:51     ` Sheng Yang
  2009-01-08 15:08   ` Avi Kivity
  1 sibling, 1 reply; 23+ messages in thread
From: Marcelo Tosatti @ 2009-01-08 14:20 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Avi Kivity, kvm

On Thu, Jan 08, 2009 at 06:45:29PM +0800, Sheng Yang wrote:
>   * ioctls for VM fds
> @@ -433,6 +436,8 @@ struct kvm_trace_rec {
>  #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
>  			    struct kvm_assigned_irq)
>  #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
> +#define KVM_REQUEST_GSI_ROUTE	  _IOWR(KVMIO, 0x72, void *)
> +#define KVM_FREE_GSI_ROUTE	  _IOR(KVMIO, 0x73, void *)

Oh this slipped: should be struct kvm_gsi_route_guest.

>  /*
>   * ioctls for vcpu fds
> @@ -553,4 +558,25 @@ struct kvm_assigned_irq {
>  #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
>  #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
>  
> +struct kvm_gsi_route_guest {
> +	__u32 entries_nr;
> +	struct kvm_gsi_route_entry_guest *entries;
> +};

And you can use a zero sized array here.

Sorry :(

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-08 10:45 ` [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
  2009-01-08 14:20   ` Marcelo Tosatti
@ 2009-01-08 15:08   ` Avi Kivity
  2009-01-09  2:50     ` Sheng Yang
  1 sibling, 1 reply; 23+ messages in thread
From: Avi Kivity @ 2009-01-08 15:08 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

Sheng Yang wrote:
> Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including
> MSI. So here is it.
>
> struct gsi_route_entry is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to
> MSI/MSI-X message address/data. And the struct can also be extended for other
> purpose.
>
> Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by kernel and
> provide two ioctls to userspace, which is more flexiable.
>
> @@ -553,4 +558,25 @@ struct kvm_assigned_irq {
>  #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
>  #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
>  
> +struct kvm_gsi_route_guest {
> +	__u32 entries_nr;
>   

Need padding here otherwise offsetof(entries) will differ on 32-bit and 
64-bit kernels.

> +	struct kvm_gsi_route_entry_guest *entries;
>   

Like Marcelo says, zero sized array is better here.

> +};
> +
> +#define KVM_GSI_ROUTE_MSI	(1 << 0)
>   

This looks like a flag.  Shouldn't it be a type?

> +struct kvm_gsi_route_entry_guest {
>   
what does _guest mean here? almost all kvm stuff is _guest related.

> +	__u32 gsi;
> +	__u32 type;
> +	__u32 flags;
> +	__u32 reserved;
> +	union {
> +		struct {
> +			__u32 addr_lo;
> +			__u32 addr_hi;
> +			__u32 data;
> +		} msi;
> +		__u32 padding[8];
> +	};
> +};
> +
>   

Since we replace the entire table every time, how do ioapic/pic gsis work?

>  
>  /* The guest did something we don't support. */
> @@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
>  				      struct kvm_irq_mask_notifier *kimn);
>  void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
>  
> +#define KVM_GSI_ROUTE_MASK    0x1000000ull
> +struct kvm_gsi_route_entry {
> +	u32 gsi;
> +	u32 type;
> +	u32 flags;
> +	u32 reserved;
> +	union {
> +		struct msi_msg msi;
> +		u32 reserved[8];
>   

No need for reserved fields in kernel data.

> +	};
> +	struct hlist_node link;
> +};
> @@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
>  			kimn->func(kimn, mask);
>  }
>  
> +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
> +{
> +	struct kvm_gsi_route_entry *found_entry, *new_entry;
> +	int r, gsi;
> +
> +	mutex_lock(&kvm->lock);
> +	/* Find whether we need a update or a new entry */
> +	found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
> +	if (found_entry)
> +		*found_entry = *entry;
> +	else {
> +		gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
> +					  KVM_NR_GSI_ROUTE_ENTRIES);
> +		if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
> +			r = -ENOSPC;
> +			goto out;
> +		}
> +		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
> +		if (!new_entry) {
> +			r = -ENOMEM;
> +			goto out;
> +		}
> +		*new_entry = *entry;
> +		entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
> +		__set_bit(gsi, kvm->gsi_route_bitmap);
> +		hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
> +	}
> +	r = 0;
> +out:
> +	mutex_unlock(&kvm->lock);
> +	return r;
> +}
>   

Why not throw everything and set the new table?

I didn't see where you respond the new KVM_CAP.  It looks like a good 
place to return the maximum size of the table.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 2/7] KVM: Using gsi route for MSI device assignment
  2009-01-08 10:45 ` [PATCH 2/7] KVM: Using gsi route for MSI device assignment Sheng Yang
@ 2009-01-08 15:11   ` Avi Kivity
  0 siblings, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2009-01-08 15:11 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

Sheng Yang wrote:
> Convert MSI userspace interface to support gsi_msg mapping(and nobody should
> be the user of the old interface...).
>   

This affects device assignment only, right?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 6/7] KVM: Split IOAPIC structure
  2009-01-08 10:45 ` [PATCH 6/7] KVM: Split IOAPIC structure Sheng Yang
@ 2009-01-08 15:27   ` Marcelo Tosatti
  2009-01-09  5:55     ` Sheng Yang
  0 siblings, 1 reply; 23+ messages in thread
From: Marcelo Tosatti @ 2009-01-08 15:27 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Avi Kivity, kvm

On Thu, Jan 08, 2009 at 06:45:34PM +0800, Sheng Yang wrote:
> Prepared for reuse ioapic_redir_entry for MSI.

What is the disadvantage of dispatching the MSI interrupts to the vcpus
via the IOAPIC? Pin shortage I can think of, but adding more IOAPIC's is
possible (and wanted anyway for systems with insane amounts of net/block
devices).

That would avoid code duplication (might need to handle a few msi
specific bits).



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-08 14:20   ` Marcelo Tosatti
@ 2009-01-09  1:51     ` Sheng Yang
  0 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-09  1:51 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, kvm

On Thursday 08 January 2009 22:20:22 Marcelo Tosatti wrote:
> On Thu, Jan 08, 2009 at 06:45:29PM +0800, Sheng Yang wrote:
> >   * ioctls for VM fds
> > @@ -433,6 +436,8 @@ struct kvm_trace_rec {
> >  #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
> >  			    struct kvm_assigned_irq)
> >  #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
> > +#define KVM_REQUEST_GSI_ROUTE	  _IOWR(KVMIO, 0x72, void *)
> > +#define KVM_FREE_GSI_ROUTE	  _IOR(KVMIO, 0x73, void *)
>
> Oh this slipped: should be struct kvm_gsi_route_guest.

Oh, yeah, forgot to change it back(I original purpose a array here...)
>
> >  /*
> >   * ioctls for vcpu fds
> > @@ -553,4 +558,25 @@ struct kvm_assigned_irq {
> >  #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
> >  #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
> >
> > +struct kvm_gsi_route_guest {
> > +	__u32 entries_nr;
> > +	struct kvm_gsi_route_entry_guest *entries;
> > +};
>
> And you can use a zero sized array here.

OK.
>
> Sorry :(

Oh, that's not necessary. :)

-- 
regards
Yang, Sheng


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-08 15:08   ` Avi Kivity
@ 2009-01-09  2:50     ` Sheng Yang
  2009-01-09 18:06       ` Avi Kivity
  0 siblings, 1 reply; 23+ messages in thread
From: Sheng Yang @ 2009-01-09  2:50 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

On Thursday 08 January 2009 23:08:25 Avi Kivity wrote:
> Sheng Yang wrote:
> > Avi's purpose, to use single kvm_set_irq() to deal with all interrupt,
> > including MSI. So here is it.
> >
> > struct gsi_route_entry is a mapping from a special gsi(with
> > KVM_GSI_MSG_MASK) to MSI/MSI-X message address/data. And the struct can
> > also be extended for other purpose.
> >
> > Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by
> > kernel and provide two ioctls to userspace, which is more flexiable.
> >
> > @@ -553,4 +558,25 @@ struct kvm_assigned_irq {
> >  #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
> >  #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
> >
> > +struct kvm_gsi_route_guest {
> > +	__u32 entries_nr;
>
> Need padding here otherwise offsetof(entries) will differ on 32-bit and
> 64-bit kernels.

OK.

>
> > +	struct kvm_gsi_route_entry_guest *entries;
>
> Like Marcelo says, zero sized array is better here.
>
> > +};
> > +
> > +#define KVM_GSI_ROUTE_MSI	(1 << 0)
>
> This looks like a flag.  Shouldn't it be a type?

Oh, custom... Would update.

> > +struct kvm_gsi_route_entry_guest {
>
> what does _guest mean here? almost all kvm stuff is _guest related.

Because I can't think of a good name... kvm_gsi_route_entry_guest? 
kvm_gsi_kernel_route_entry? What's your favorite? :)

> > +	__u32 gsi;
> > +	__u32 type;
> > +	__u32 flags;
> > +	__u32 reserved;
> > +	union {
> > +		struct {
> > +			__u32 addr_lo;
> > +			__u32 addr_hi;
> > +			__u32 data;
> > +		} msi;
> > +		__u32 padding[8];
> > +	};
> > +};
> > +
>
> Since we replace the entire table every time, how do ioapic/pic gsis work?
> >  /* The guest did something we don't support. */
> > @@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm
> > *kvm, int irq, struct kvm_irq_mask_notifier *kimn);
> >  void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
> >
> > +#define KVM_GSI_ROUTE_MASK    0x1000000ull
> > +struct kvm_gsi_route_entry {
> > +	u32 gsi;
> > +	u32 type;
> > +	u32 flags;
> > +	u32 reserved;
> > +	union {
> > +		struct msi_msg msi;
> > +		u32 reserved[8];
>
> No need for reserved fields in kernel data.

Yeah

> > +	};
> > +	struct hlist_node link;
> > +};
> > @@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int
> > irq, bool mask) kimn->func(kimn, mask);
> >  }
> >
> > +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry
> > *entry) +{
> > +	struct kvm_gsi_route_entry *found_entry, *new_entry;
> > +	int r, gsi;
> > +
> > +	mutex_lock(&kvm->lock);
> > +	/* Find whether we need a update or a new entry */
> > +	found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
> > +	if (found_entry)
> > +		*found_entry = *entry;
> > +	else {
> > +		gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
> > +					  KVM_NR_GSI_ROUTE_ENTRIES);
> > +		if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
> > +			r = -ENOSPC;
> > +			goto out;
> > +		}
> > +		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
> > +		if (!new_entry) {
> > +			r = -ENOMEM;
> > +			goto out;
> > +		}
> > +		*new_entry = *entry;
> > +		entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
> > +		__set_bit(gsi, kvm->gsi_route_bitmap);
> > +		hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
> > +	}
> > +	r = 0;
> > +out:
> > +	mutex_unlock(&kvm->lock);
> > +	return r;
> > +}
>
> Why not throw everything and set the new table?

Userspace to maintain a big route table? Just for MSI/MSI-X it's easy, but for 
others, a global one is needed, and need follow up more maintain functions. 
For kernel, a little more effect can archive this, like this. So I do it in 
this way.

> I didn't see where you respond the new KVM_CAP.  It looks like a good
> place to return the maximum size of the table.

I just use it as #ifdef in userspace now, for no user other than MSI/MSI-X 
now. And if we keep maintaining it in kernel, we would return free size 
instead of maximum size..

-- 
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 6/7] KVM: Split IOAPIC structure
  2009-01-08 15:27   ` Marcelo Tosatti
@ 2009-01-09  5:55     ` Sheng Yang
  0 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-09  5:55 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, kvm

On Thursday 08 January 2009 23:27:21 Marcelo Tosatti wrote:
> On Thu, Jan 08, 2009 at 06:45:34PM +0800, Sheng Yang wrote:
> > Prepared for reuse ioapic_redir_entry for MSI.
>
> What is the disadvantage of dispatching the MSI interrupts to the vcpus
> via the IOAPIC? Pin shortage I can think of, but adding more IOAPIC's is
> possible (and wanted anyway for systems with insane amounts of net/block
> devices).
>
> That would avoid code duplication (might need to handle a few msi
> specific bits).

I think for current RH=1 implement, independence of IOAPIC is acceptable...

Indicated by "Redicection hint indication"(RH) bit, there are two mode that 
MSI would deliver: either use IOAPIC redirect table entry(RH=0), or redirect 
directly(RH=1). I implemented the later for now, and it's the most common way.  
Of course, delivery would go through IOAPIC when RH=0. 

-- 
regards
Yang, Sheng


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-09  2:50     ` Sheng Yang
@ 2009-01-09 18:06       ` Avi Kivity
  2009-01-10 11:12         ` Sheng Yang
                           ` (2 more replies)
  0 siblings, 3 replies; 23+ messages in thread
From: Avi Kivity @ 2009-01-09 18:06 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

Sheng Yang wrote:
>>> +struct kvm_gsi_route_entry_guest {
>>>       
>> what does _guest mean here? almost all kvm stuff is _guest related.
>>     
>
> Because I can't think of a good name... kvm_gsi_route_entry_guest? 
> kvm_gsi_kernel_route_entry? What's your favorite? :)
>
>   

kvm_gsi_route_entry?

>>
>> Since we replace the entire table every time, how do ioapic/pic gsis work?
>>     

Missed this question...

>> Why not throw everything and set the new table?
>>     
>
> Userspace to maintain a big route table? Just for MSI/MSI-X it's easy, but for 
> others, a global one is needed, and need follow up more maintain functions. 
> For kernel, a little more effect can archive this, like this. So I do it in 
> this way.
>   

Sorry, I don't understand the reply.

>> I didn't see where you respond the new KVM_CAP.  It looks like a good
>> place to return the maximum size of the table.
>>     
>
> I just use it as #ifdef in userspace now, for no user other than MSI/MSI-X 
> now. And if we keep maintaining it in kernel, we would return free size 
> instead of maximum size..
>   

We need to allow userspace to change pic/ioapic routing for the HPET.

There are two styles of maintaining the table:

1. add/remove ioctls

The advantage is that very little work needs to be done when something 
changes, but the code size (and bug count) doubles.

2. replace everything ioctl

Smaller code size, but slower if there are high frequency changes

I don't think we'll see high frequency interrupt routing changes; we'll 
probably have one on setup (for HPET), another when switching from ACPI 
PIC mode to ACPI APIC mode, and one for every msi initialized.


-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-09 18:06       ` Avi Kivity
@ 2009-01-10 11:12         ` Sheng Yang
  2009-01-11  9:34           ` Avi Kivity
  2009-01-10 12:28         ` Sheng Yang
  2009-01-11  9:40         ` Avi Kivity
  2 siblings, 1 reply; 23+ messages in thread
From: Sheng Yang @ 2009-01-10 11:12 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Sheng Yang, Marcelo Tosatti, kvm

On Fri, Jan 09, 2009 at 08:06:01PM +0200, Avi Kivity wrote:
> Sheng Yang wrote:
>>>> +struct kvm_gsi_route_entry_guest {
>>>>       
>>> what does _guest mean here? almost all kvm stuff is _guest related.
>>>     
>>
>> Because I can't think of a good name... kvm_gsi_route_entry_guest?  
>> kvm_gsi_kernel_route_entry? What's your favorite? :)
>>
>>   
>
> kvm_gsi_route_entry?

Which is used for kernel one now...
>
>>>
>>> Since we replace the entire table every time, how do ioapic/pic gsis work?
>>>     
>
> Missed this question...

My comments below...
>
>>> Why not throw everything and set the new table?
>>>     
>>
>> Userspace to maintain a big route table? Just for MSI/MSI-X it's easy, 
>> but for others, a global one is needed, and need follow up more 
>> maintain functions. For kernel, a little more effect can archive this, 
>> like this. So I do it in this way.
>>   
>
> Sorry, I don't understand the reply.
>
>>> I didn't see where you respond the new KVM_CAP.  It looks like a good
>>> place to return the maximum size of the table.
>>>     
>>
>> I just use it as #ifdef in userspace now, for no user other than 
>> MSI/MSI-X now. And if we keep maintaining it in kernel, we would return 
>> free size instead of maximum size..
>>   
>
> We need to allow userspace to change pic/ioapic routing for the HPET.
>
> There are two styles of maintaining the table:
>
> 1. add/remove ioctls
>
> The advantage is that very little work needs to be done when something  
> changes, but the code size (and bug count) doubles.
>
> 2. replace everything ioctl
>
> Smaller code size, but slower if there are high frequency changes
>
> I don't think we'll see high frequency interrupt routing changes; we'll  
> probably have one on setup (for HPET), another when switching from ACPI  
> PIC mode to ACPI APIC mode, and one for every msi initialized.

I come to option 2 mix with option 1 now. What I meant is, MSI part is in
device-assignment part, and HPET and others are in some other place, so a
global table should be maintained for these three across the parts of
userspace. I don't like the global gsi route table, and especially we
already got one in kernel space. We have to do some maintain work in the
kernel space, e.g. looking up a entry, so I think a little add-on can take
the job.

And now I think you original purpose is three different tables for MSI, HPET
and ACPI APIC mode? This also avoid global big table in userspace. But it
seems like a waste, and also too specific...

So how about this? One ioctl to replace everything, another(maybe two,
transfer entry number then table, or one with maximum entries number in
userspace) can export current gsi route table? This can avoid the global
route table, as well as reduce the complexity.

How do you think?

-- 
regards
Yang, Sheng	|Intel Opensource Technology Center

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-09 18:06       ` Avi Kivity
  2009-01-10 11:12         ` Sheng Yang
@ 2009-01-10 12:28         ` Sheng Yang
  2009-01-11  9:38           ` Avi Kivity
  2009-01-11  9:40         ` Avi Kivity
  2 siblings, 1 reply; 23+ messages in thread
From: Sheng Yang @ 2009-01-10 12:28 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Sheng Yang, Marcelo Tosatti, kvm

On Fri, Jan 09, 2009 at 08:06:01PM +0200, Avi Kivity wrote:
> Sheng Yang wrote:
>> I just use it as #ifdef in userspace now, for no user other than 
>> MSI/MSI-X now. And if we keep maintaining it in kernel, we would return 
>> free size instead of maximum size..
>>   
>
> We need to allow userspace to change pic/ioapic routing for the HPET.
>
> There are two styles of maintaining the table:
>
> 1. add/remove ioctls
>
> The advantage is that very little work needs to be done when something  
> changes, but the code size (and bug count) doubles.
>
> 2. replace everything ioctl
>
> Smaller code size, but slower if there are high frequency changes
>
> I don't think we'll see high frequency interrupt routing changes; we'll  
> probably have one on setup (for HPET), another when switching from ACPI  
> PIC mode to ACPI APIC mode, and one for every msi initialized.

Hi, Avi

After reconsidering, I must say I prefer add/remove ioctls.

About the code size, I don't think it would increase much. I've rewritten
the code twice, I think I know the difference is little.

For the option 2 route table ioctl, we got a array from userspace, and would
convert it to linked list and keep it in kernel. That's a kind of must(I
don't think you would prefer use a array in kernel), and it's very direct.

So, we have to insert/delete route entry for both. What's the real
difference we do it one by one or do it all at once. I don't think it is
much different on the code size. And it's indeed very clear and direct.

Beside this, option 2 seems strange. Why should we handle this table in this
way when it won't result in significant code reduce. Insert/delete entry it
there, look up entry is also there, not many things changed. And it's not
that direct as option 1, which also can be a source of bugs.

How do you think?

-- 
regards
Yang, Sheng	|Intel Opensource Technology Center

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-10 11:12         ` Sheng Yang
@ 2009-01-11  9:34           ` Avi Kivity
  0 siblings, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2009-01-11  9:34 UTC (permalink / raw)
  To: Avi Kivity, Sheng Yang, Marcelo Tosatti, kvm

Sheng Yang wrote:
> On Fri, Jan 09, 2009 at 08:06:01PM +0200, Avi Kivity wrote:
>   
>> Sheng Yang wrote:
>>     
>>>>> +struct kvm_gsi_route_entry_guest {
>>>>>       
>>>>>           
>>>> what does _guest mean here? almost all kvm stuff is _guest related.
>>>>     
>>>>         
>>> Because I can't think of a good name... kvm_gsi_route_entry_guest?  
>>> kvm_gsi_kernel_route_entry? What's your favorite? :)
>>>
>>>   
>>>       
>> kvm_gsi_route_entry?
>>     
>
> Which is used for kernel one now...
>   

So add _kernel to that...

It's more important to keep the userspace interface clean.

>> We need to allow userspace to change pic/ioapic routing for the HPET.
>>
>> There are two styles of maintaining the table:
>>
>> 1. add/remove ioctls
>>
>> The advantage is that very little work needs to be done when something  
>> changes, but the code size (and bug count) doubles.
>>
>> 2. replace everything ioctl
>>
>> Smaller code size, but slower if there are high frequency changes
>>
>> I don't think we'll see high frequency interrupt routing changes; we'll  
>> probably have one on setup (for HPET), another when switching from ACPI  
>> PIC mode to ACPI APIC mode, and one for every msi initialized.
>>     
>
> I come to option 2 mix with option 1 now. What I meant is, MSI part is in
> device-assignment part, and HPET and others are in some other place, so a
> global table should be maintained for these three across the parts of
> userspace. I don't like the global gsi route table, and especially we
> already got one in kernel space. We have to do some maintain work in the
> kernel space, e.g. looking up a entry, so I think a little add-on can take
> the job.
>
> And now I think you original purpose is three different tables for MSI, HPET
> and ACPI APIC mode? This also avoid global big table in userspace. But it
> seems like a waste, and also too specific...
>
> So how about this? One ioctl to replace everything, another(maybe two,
> transfer entry number then table, or one with maximum entries number in
> userspace) can export current gsi route table? This can avoid the global
> route table, as well as reduce the complexity.
>   

I still don't understand.

We already have all of the information needed in userspace.  We know 
whether HPET is in use or not, whether PIC mode or APIC mode is enabled, 
and what MSIs are enabled.  We need all this at least for live migration.

In the kernel, we'll need a function to free the table (for VM 
shutdown), so adding a function to populate a new table seems to be the 
least amount of work.

But I think we're miscommunicating, maybe I'm misunderstanding what 
you're suggesting.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-10 12:28         ` Sheng Yang
@ 2009-01-11  9:38           ` Avi Kivity
  2009-01-12  6:53             ` Sheng Yang
  0 siblings, 1 reply; 23+ messages in thread
From: Avi Kivity @ 2009-01-11  9:38 UTC (permalink / raw)
  To: Avi Kivity, Sheng Yang, Marcelo Tosatti, kvm

Sheng Yang wrote:
> After reconsidering, I must say I prefer add/remove ioctls.
>
> About the code size, I don't think it would increase much. I've rewritten
> the code twice, I think I know the difference is little.
>   

:( sorry about that.

> For the option 2 route table ioctl, we got a array from userspace, and would
> convert it to linked list and keep it in kernel. That's a kind of must(I
> don't think you would prefer use a array in kernel), and it's very direct.
>   

Actually, eventually we'd want an array indexed by gsi.  Each element 
would be a pointer to another array (one or two routing entries).

Certainly we don't want to iterate a list which could hold several 
hundred interrupts for a large guest.

It's okay to start with a linked list, but eventually we'll want 
something faster.

> So, we have to insert/delete route entry for both. What's the real
> difference we do it one by one or do it all at once. I don't think it is
> much different on the code size. And it's indeed very clear and direct.
>
> Beside this, option 2 seems strange. Why should we handle this table in this
> way when it won't result in significant code reduce. Insert/delete entry it
> there, look up entry is also there, not many things changed. And it's not
> that direct as option 1, which also can be a source of bugs.
>
> How do you think?
>   

I'm not convinced.  Please post your latest, and I will post a 
counter-proposal.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-09 18:06       ` Avi Kivity
  2009-01-10 11:12         ` Sheng Yang
  2009-01-10 12:28         ` Sheng Yang
@ 2009-01-11  9:40         ` Avi Kivity
  2 siblings, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2009-01-11  9:40 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

Avi Kivity wrote:
>
> 1. add/remove ioctls
>
> The advantage is that very little work needs to be done when something 
> changes, but the code size (and bug count) doubles.
>

One disadvantage of add/remove is that we cannot effect a change 
atomically.  Probably not a big deal, but something to keep in mind.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI
  2009-01-11  9:38           ` Avi Kivity
@ 2009-01-12  6:53             ` Sheng Yang
  0 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-12  6:53 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

On Sunday 11 January 2009 17:38:22 Avi Kivity wrote:
> Sheng Yang wrote:
> > After reconsidering, I must say I prefer add/remove ioctls.
> >
> > About the code size, I don't think it would increase much. I've rewritten
> > the code twice, I think I know the difference is little.
> >
> :( sorry about that.
> :
> > For the option 2 route table ioctl, we got a array from userspace, and
> > would convert it to linked list and keep it in kernel. That's a kind of
> > must(I don't think you would prefer use a array in kernel), and it's very
> > direct.
>
> Actually, eventually we'd want an array indexed by gsi.  Each element
> would be a pointer to another array (one or two routing entries).
>
> Certainly we don't want to iterate a list which could hold several
> hundred interrupts for a large guest.
>
> It's okay to start with a linked list, but eventually we'll want
> something faster.

Oh, I see. What I means here is allocate/deallocate would cause some memory 
fragments, but seems not that critical here.
>
> > So, we have to insert/delete route entry for both. What's the real
> > difference we do it one by one or do it all at once. I don't think it is
> > much different on the code size. And it's indeed very clear and direct.
> >
> > Beside this, option 2 seems strange. Why should we handle this table in
> > this way when it won't result in significant code reduce. Insert/delete
> > entry it there, look up entry is also there, not many things changed. And
> > it's not that direct as option 1, which also can be a source of bugs.
> >
> > How do you think?
>
> I'm not convinced.  Please post your latest, and I will post a
> counter-proposal.

OK.

-- 
regards
Yang, Sheng


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 7/7] KVM: Unified the delivery of IOAPIC and MSI
  2009-01-13  9:58 [PATCH 0/7][v6] GSI route layer for MSI/MSI-X Sheng Yang
@ 2009-01-13  9:58 ` Sheng Yang
  0 siblings, 0 replies; 23+ messages in thread
From: Sheng Yang @ 2009-01-13  9:58 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang


Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    3 ++
 virt/kvm/ioapic.c        |   84 ++++++++++++++++----------------------------
 virt/kvm/irq_comm.c      |   87 ++++++++++++++++++++++++++++------------------
 3 files changed, 87 insertions(+), 87 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 440409f..b0d5a95 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -350,6 +350,9 @@ struct kvm_gsi_route_kernel_entry {
 	struct hlist_node link;
 };
 
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+				   union kvm_ioapic_redirect_entry *entry,
+				   u32 *deliver_bitmask);
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index b6530e9..951df12 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -200,75 +200,53 @@ u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 {
-	u8 dest = ioapic->redirtbl[irq].fields.dest_id;
-	u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
-	u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
-	u8 vector = ioapic->redirtbl[irq].fields.vector;
-	u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
+	union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
 	u32 deliver_bitmask;
 	struct kvm_vcpu *vcpu;
 	int vcpu_id, r = 0;
 
 	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
 		     "vector=%x trig_mode=%x\n",
-		     dest, dest_mode, delivery_mode, vector, trig_mode);
+		     entry.fields.dest, entry.fields.dest_mode,
+		     entry.fields.delivery_mode, entry.fields.vector,
+		     entry.fields.trig_mode);
 
-	deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest,
-							  dest_mode);
+	kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
 	if (!deliver_bitmask) {
 		ioapic_debug("no target on destination\n");
 		return 0;
 	}
 
-	switch (delivery_mode) {
-	case IOAPIC_LOWEST_PRIORITY:
-		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-				deliver_bitmask);
+	/* Always delivery PIT interrupt to vcpu 0 */
 #ifdef CONFIG_X86
-		if (irq == 0)
-			vcpu = ioapic->kvm->vcpus[0];
+	if (irq == 0)
+		deliver_bitmask = 1 << 0;
 #endif
-		if (vcpu != NULL)
-			r = ioapic_inj_irq(ioapic, vcpu, vector,
-				       trig_mode, delivery_mode);
-		else
-			ioapic_debug("null lowest prio vcpu: "
-				     "mask=%x vector=%x delivery_mode=%x\n",
-				     deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
-		break;
-	case IOAPIC_FIXED:
-#ifdef CONFIG_X86
-		if (irq == 0)
-			deliver_bitmask = 1;
-#endif
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu) {
-				r = ioapic_inj_irq(ioapic, vcpu, vector,
-					       trig_mode, delivery_mode);
-			}
-		}
-		break;
-	case IOAPIC_NMI:
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu)
+
+	for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+		if (!(deliver_bitmask & (1 << vcpu_id)))
+			continue;
+		deliver_bitmask &= ~(1 << vcpu_id);
+		vcpu = ioapic->kvm->vcpus[vcpu_id];
+		if (vcpu) {
+			if (entry.fields.delivery_mode ==
+					IOAPIC_LOWEST_PRIORITY ||
+			    entry.fields.delivery_mode == IOAPIC_FIXED)
+				r = ioapic_inj_irq(ioapic, vcpu,
+						   entry.fields.vector,
+						   entry.fields.trig_mode,
+						   entry.fields.delivery_mode);
+			else if (entry.fields.delivery_mode == IOAPIC_NMI)
 				ioapic_inj_nmi(vcpu);
 			else
-				ioapic_debug("NMI to vcpu %d failed\n",
-						vcpu->vcpu_id);
-		}
-		break;
-	default:
-		printk(KERN_WARNING "Unsupported delivery mode %d\n",
-		       delivery_mode);
-		break;
+				ioapic_debug("unsupported delivery mode %x!\n",
+					     entry.fields.delivery_mode);
+		} else
+			ioapic_debug("null destination vcpu: "
+				     "mask=%x vector=%x delivery_mode=%x\n",
+				     entry.fields.deliver_bitmask,
+				     entry.fields.vector,
+				     entry.fields.delivery_mode);
 	}
 	return r;
 }
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9e6c622..6848d03 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -28,13 +28,39 @@
 #include <asm/msidef.h>
 #endif
 
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+				   union kvm_ioapic_redirect_entry *entry,
+				   u32 *deliver_bitmask)
+{
+	struct kvm_vcpu *vcpu;
+
+	*deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+				entry->fields.dest_id, entry->fields.dest_mode);
+	switch (entry->fields.delivery_mode) {
+	case IOAPIC_LOWEST_PRIORITY:
+		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
+				entry->fields.vector, *deliver_bitmask);
+		*deliver_bitmask = 1 << vcpu->vcpu_id;
+		break;
+	case IOAPIC_FIXED:
+	case IOAPIC_NMI:
+		break;
+	default:
+		if (printk_ratelimit())
+			printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
+				entry->fields.delivery_mode);
+		*deliver_bitmask = 0;
+	}
+}
+
+
 static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 {
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
 	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
 	struct kvm_gsi_route_kernel_entry *gsi_entry;
-	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
+	union kvm_ioapic_redirect_entry entry;
 	u32 deliver_bitmask;
 
 	BUG_ON(!ioapic);
@@ -47,42 +73,35 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 
 #ifdef CONFIG_X86
 	if (gsi_entry->type == KVM_GSI_ROUTE_TYPE_MSI) {
-		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+		entry.bits = 0;
+		entry.fields.dest_id = (gsi_entry->msi.address_lo &
+			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+		entry.fields.vector = (gsi_entry->msi.data &
+			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+		entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.address_lo);
-		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+		entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
-		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+		entry.fields.delivery_mode = test_bit(
+				MSI_DATA_DELIVERY_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
-		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
-				dest_id, dest_mode);
-		/* IOAPIC delivery mode value is the same as MSI here */
-		switch (delivery_mode) {
-		case IOAPIC_LOWEST_PRIORITY:
-			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-					deliver_bitmask);
-			if (vcpu != NULL)
-				kvm_apic_set_irq(vcpu, vector, trig_mode);
-			else
-				printk(KERN_INFO
-				       "kvm: null lowest priority vcpu!\n");
-			break;
-		case IOAPIC_FIXED:
-			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-				if (!(deliver_bitmask & (1 << vcpu_id)))
-					continue;
-				deliver_bitmask &= ~(1 << vcpu_id);
-				vcpu = ioapic->kvm->vcpus[vcpu_id];
-				if (vcpu)
-					kvm_apic_set_irq(vcpu, vector,
-							trig_mode);
-			}
-			break;
-		default:
-			break;
+		/* TODO Deal with RH bit of MSI message address */
+
+		kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
+
+		if (!deliver_bitmask) {
+			printk(KERN_WARNING
+				"kvm: no destination for MSI delivery!");
+			return;
+		}
+		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+			if (!(deliver_bitmask & (1 << vcpu_id)))
+				continue;
+			deliver_bitmask &= ~(1 << vcpu_id);
+			vcpu = ioapic->kvm->vcpus[vcpu_id];
+			if (vcpu)
+				kvm_apic_set_irq(vcpu, entry.fields.vector,
+						entry.fields.trig_mode);
 		}
 	}
 #endif /* CONFIG_X86 */
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2009-01-13  9:58 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-01-08 10:45 [PATCH 0/7][v5] GSI route layer for MSI/MSI-X Sheng Yang
2009-01-08 10:45 ` [PATCH 1/7] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
2009-01-08 14:20   ` Marcelo Tosatti
2009-01-09  1:51     ` Sheng Yang
2009-01-08 15:08   ` Avi Kivity
2009-01-09  2:50     ` Sheng Yang
2009-01-09 18:06       ` Avi Kivity
2009-01-10 11:12         ` Sheng Yang
2009-01-11  9:34           ` Avi Kivity
2009-01-10 12:28         ` Sheng Yang
2009-01-11  9:38           ` Avi Kivity
2009-01-12  6:53             ` Sheng Yang
2009-01-11  9:40         ` Avi Kivity
2009-01-08 10:45 ` [PATCH 2/7] KVM: Using gsi route for MSI device assignment Sheng Yang
2009-01-08 15:11   ` Avi Kivity
2009-01-08 10:45 ` [PATCH 3/7] KVM: Improve MSI dispatch function Sheng Yang
2009-01-08 10:45 ` [PATCH 4/7] KVM: Using ioapic_irqchip() macro for kvm_set_irq Sheng Yang
2009-01-08 10:45 ` [PATCH 5/7] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
2009-01-08 10:45 ` [PATCH 6/7] KVM: Split IOAPIC structure Sheng Yang
2009-01-08 15:27   ` Marcelo Tosatti
2009-01-09  5:55     ` Sheng Yang
2009-01-08 10:45 ` [PATCH 7/7] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang
2009-01-13  9:58 [PATCH 0/7][v6] GSI route layer for MSI/MSI-X Sheng Yang
2009-01-13  9:58 ` [PATCH 7/7] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).