kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/10][v4]GSI route layer for MSI/MSI-X
@ 2009-01-07 10:42 Sheng Yang
  2009-01-07 10:42 ` [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
                   ` (9 more replies)
  0 siblings, 10 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

Update from v3:
Addressed Avi's comment, improve struct gsi_route_entry and use a pair of
ioctl to handle them(including some specific interrupt routing) all. Now only
support MSI/MSI-X.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 16:18   ` Marcelo Tosatti
  2009-01-07 10:42 ` [PATCH 02/10] KVM: Using gsi route for MSI device assignment Sheng Yang
                   ` (8 subsequent siblings)
  9 siblings, 1 reply; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including
MSI. So here is it.

struct gsi_route_entry is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to
MSI/MSI-X message address/data. And the struct can also be extended for other
purpose.

Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by kernel and
provide two ioctls to userspace, which is more flexiable.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm.h      |   26 +++++++++++
 include/linux/kvm_host.h |   20 +++++++++
 virt/kvm/irq_comm.c      |   70 ++++++++++++++++++++++++++++++
 virt/kvm/kvm_main.c      |  106 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 222 insertions(+), 0 deletions(-)

diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 71c150f..bbefce6 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -399,6 +399,9 @@ struct kvm_trace_rec {
 #if defined(CONFIG_X86)
 #define KVM_CAP_REINJECT_CONTROL 24
 #endif
+#if defined(CONFIG_X86)
+#define KVM_CAP_GSI_ROUTE 25
+#endif
 
 /*
  * ioctls for VM fds
@@ -433,6 +436,8 @@ struct kvm_trace_rec {
 #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
 			    struct kvm_assigned_irq)
 #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
+#define KVM_REQUEST_GSI_ROUTE	  _IOWR(KVMIO, 0x72, void *)
+#define KVM_FREE_GSI_ROUTE	  _IOR(KVMIO, 0x73, void *)
 
 /*
  * ioctls for vcpu fds
@@ -553,4 +558,25 @@ struct kvm_assigned_irq {
 #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
 #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
 
+struct kvm_gsi_route_guest {
+	__u32 entries_nr;
+	struct kvm_gsi_route_entry_guest *entries;
+};
+
+#define KVM_GSI_ROUTE_MSI	(1 << 0)
+struct kvm_gsi_route_entry_guest {
+	__u32 gsi;
+	__u32 type;
+	__u32 flags;
+	__u32 reserved;
+	union {
+		struct {
+			__u32 addr_lo;
+			__u32 addr_hi;
+			__u32 data;
+		} msi;
+		__u32 padding[8];
+	};
+};
+
 #endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a8bcad0..6a00201 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -136,6 +136,9 @@ struct kvm {
 	unsigned long mmu_notifier_seq;
 	long mmu_notifier_count;
 #endif
+	struct hlist_head gsi_route_list;
+#define KVM_NR_GSI_ROUTE_ENTRIES    256
+	DECLARE_BITMAP(gsi_route_bitmap, KVM_NR_GSI_ROUTE_ENTRIES);
 };
 
 /* The guest did something we don't support. */
@@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 				      struct kvm_irq_mask_notifier *kimn);
 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
 
+#define KVM_GSI_ROUTE_MASK    0x1000000ull
+struct kvm_gsi_route_entry {
+	u32 gsi;
+	u32 type;
+	u32 flags;
+	u32 reserved;
+	union {
+		struct msi_msg msi;
+		u32 reserved[8];
+	};
+	struct hlist_node link;
+};
+
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -343,6 +359,10 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
 int kvm_request_irq_source_id(struct kvm *kvm);
 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
+struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi);
+void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
+void kvm_free_gsi_route_list(struct kvm *kvm);
 
 #ifdef CONFIG_DMAR
 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 5162a41..7460e7f 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
 			kimn->func(kimn, mask);
 }
 
+int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
+{
+	struct kvm_gsi_route_entry *found_entry, *new_entry;
+	int r, gsi;
+
+	mutex_lock(&kvm->lock);
+	/* Find whether we need a update or a new entry */
+	found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
+	if (found_entry)
+		*found_entry = *entry;
+	else {
+		gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
+					  KVM_NR_GSI_ROUTE_ENTRIES);
+		if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
+			r = -ENOSPC;
+			goto out;
+		}
+		__set_bit(gsi, kvm->gsi_route_bitmap);
+		entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
+		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+		if (!new_entry) {
+			r = -ENOMEM;
+			goto out;
+		}
+		*new_entry = *entry;
+		hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
+	}
+	r = 0;
+out:
+	mutex_unlock(&kvm->lock);
+	return r;
+}
+
+/* Call with kvm->lock hold */
+struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi)
+{
+	struct kvm_gsi_route_entry *entry;
+	struct hlist_node *n;
+
+	if (!(gsi & KVM_GSI_ROUTE_MASK))
+		return NULL;
+	hlist_for_each_entry(entry, n, &kvm->gsi_route_list, link)
+		if (entry->gsi == gsi)
+			goto out;
+	entry = NULL;
+out:
+	return entry;
+}
+
+/* Call with kvm->lock hold */
+void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
+{
+	if (!entry)
+		return;
+	__clear_bit(entry->gsi & ~KVM_GSI_ROUTE_MASK, kvm->gsi_route_bitmap);
+	hlist_del(&entry->link);
+	kfree(entry);
+}
+
+void kvm_free_gsi_route_list(struct kvm *kvm)
+{
+	struct kvm_gsi_route_entry *entry;
+	struct hlist_node *pos, *n;
+
+	mutex_lock(&kvm->lock);
+	hlist_for_each_entry_safe(entry, pos, n, &kvm->gsi_route_list, link)
+		kvm_free_gsi_route(kvm, entry);
+	mutex_unlock(&kvm->lock);
+}
+
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 61688a6..bc1a27b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -839,6 +839,7 @@ static struct kvm *kvm_create_vm(void)
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 	kvm_coalesced_mmio_init(kvm);
 #endif
+	INIT_HLIST_HEAD(&kvm->gsi_route_list);
 out:
 	return kvm;
 }
@@ -877,6 +878,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
 	struct mm_struct *mm = kvm->mm;
 
 	kvm_arch_sync_events(kvm);
+	kvm_free_gsi_route_list(kvm);
 	spin_lock(&kvm_lock);
 	list_del(&kvm->vm_list);
 	spin_unlock(&kvm_lock);
@@ -1605,6 +1607,47 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
 	return 0;
 }
 
+static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm,
+			struct kvm_gsi_route_guest *gsi_route,
+			struct kvm_gsi_route_entry_guest *guest_entries)
+{
+	struct kvm_gsi_route_entry entry;
+	int r, i;
+
+	for (i = 0; i < gsi_route->entries_nr; i++) {
+		memcpy(&entry, &guest_entries[i], sizeof(entry));
+		r = kvm_update_gsi_route(kvm, &entry);
+		if (r == 0)
+			guest_entries[i].gsi = entry.gsi;
+		else
+			break;
+	}
+	return r;
+}
+
+static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm,
+			struct kvm_gsi_route_guest *gsi_route,
+			struct kvm_gsi_route_entry_guest *guest_entries)
+{
+	struct kvm_gsi_route_entry *entry;
+	int r, i;
+
+	mutex_lock(&kvm->lock);
+	for (i = 0; i < gsi_route->entries_nr; i++) {
+		entry = kvm_find_gsi_route_entry(kvm, guest_entries[i].gsi);
+		if (!entry ||
+		    memcmp(entry, &guest_entries[i], sizeof(*entry)) != 0) {
+			printk(KERN_WARNING "kvm: illegal gsi mapping!");
+			r = -EINVAL;
+			goto out;
+		}
+		kvm_free_gsi_route(kvm, entry);
+	}
+out:
+	mutex_unlock(&kvm->lock);
+	return r;
+}
+
 static long kvm_vcpu_ioctl(struct file *filp,
 			   unsigned int ioctl, unsigned long arg)
 {
@@ -1803,6 +1846,7 @@ static long kvm_vm_ioctl(struct file *filp,
 {
 	struct kvm *kvm = filp->private_data;
 	void __user *argp = (void __user *)arg;
+	struct kvm_gsi_route_entry_guest *gsi_entries = NULL;
 	int r;
 
 	if (kvm->mm != current->mm)
@@ -1887,10 +1931,72 @@ static long kvm_vm_ioctl(struct file *filp,
 		break;
 	}
 #endif
+	case KVM_REQUEST_GSI_ROUTE: {
+		struct kvm_gsi_route_guest gsi_route;
+		r = copy_from_user(&gsi_route, argp, sizeof gsi_route);
+		if (r)
+			goto out;
+		if (gsi_route.entries_nr == 0) {
+			r = -EFAULT;
+			goto out;
+		}
+		gsi_entries = kmalloc(gsi_route.entries_nr *
+				      sizeof(struct kvm_gsi_route_entry_guest),
+				      GFP_KERNEL);
+		if (!gsi_entries) {
+			r = -ENOMEM;
+			goto out;
+		}
+		r = copy_from_user(gsi_entries,
+				   (void __user *)gsi_route.entries,
+				   gsi_route.entries_nr *
+				   sizeof(struct kvm_gsi_route_entry_guest));
+		if (r)
+			goto out;
+		r = kvm_vm_ioctl_request_gsi_route(kvm, &gsi_route,
+						   gsi_entries);
+		if (r)
+			goto out;
+		r = copy_to_user((void __user *)gsi_route.entries,
+				gsi_entries,
+				gsi_route.entries_nr *
+				sizeof(struct kvm_gsi_route_entry_guest));
+		if (r)
+			goto out;
+		break;
+	}
+	case KVM_FREE_GSI_ROUTE: {
+		struct kvm_gsi_route_guest gsi_route;
+		r = copy_from_user(&gsi_route, argp, sizeof gsi_route);
+		if (r)
+			goto out;
+		if (gsi_route.entries_nr == 0) {
+			r = -EFAULT;
+			goto out;
+		}
+		gsi_entries = kmalloc(gsi_route.entries_nr *
+				      sizeof(struct kvm_gsi_route_entry_guest),
+				      GFP_KERNEL);
+		if (!gsi_entries) {
+			r = -ENOMEM;
+			goto out;
+		}
+		r = copy_from_user(gsi_entries,
+				   (void __user *)gsi_route.entries,
+				   gsi_route.entries_nr *
+				   sizeof(struct kvm_gsi_route_entry_guest));
+		if (r)
+			goto out;
+		r = kvm_vm_ioctl_free_gsi_route(kvm, &gsi_route, gsi_entries);
+		if (r)
+			goto out;
+		break;
+	}
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 	}
 out:
+	kfree(gsi_entries);
 	return r;
 }
 
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 02/10] KVM: Using gsi route for MSI device assignment
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
  2009-01-07 10:42 ` [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 10:42 ` [PATCH 03/10] KVM: Improve MSI dispatch function Sheng Yang
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

Convert MSI userspace interface to support gsi_msg mapping(and nobody should
be the user of the old interface...).

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    1 -
 virt/kvm/kvm_main.c      |   79 ++++++++++++++++++++++++++--------------------
 2 files changed, 45 insertions(+), 35 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6a00201..eab9588 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -316,7 +316,6 @@ struct kvm_assigned_dev_kernel {
 	int host_irq;
 	bool host_irq_disabled;
 	int guest_irq;
-	struct msi_msg guest_msi;
 #define KVM_ASSIGNED_DEV_GUEST_INTX	(1 << 0)
 #define KVM_ASSIGNED_DEV_GUEST_MSI	(1 << 1)
 #define KVM_ASSIGNED_DEV_HOST_INTX	(1 << 8)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bc1a27b..0a59245 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -92,44 +92,56 @@ static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
 	struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
-	int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-	int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-	int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
-				(unsigned long *)&dev->guest_msi.address_lo);
-	int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
-				(unsigned long *)&dev->guest_msi.data);
-	int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
-				(unsigned long *)&dev->guest_msi.data);
+	struct kvm_gsi_route_entry *gsi_entry;
+	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
 	u32 deliver_bitmask;
 
 	BUG_ON(!ioapic);
 
-	deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, dev->guest_irq);
+	if (!gsi_entry) {
+		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
+		return;
+	}
+
+	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
+		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
+			>> MSI_ADDR_DEST_ID_SHIFT;
+		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
+			>> MSI_DATA_VECTOR_SHIFT;
+		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.address_lo);
+		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
 				dest_id, dest_mode);
-	/* IOAPIC delivery mode value is the same as MSI here */
-	switch (delivery_mode) {
-	case IOAPIC_LOWEST_PRIORITY:
-		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-				deliver_bitmask);
-		if (vcpu != NULL)
-			kvm_apic_set_irq(vcpu, vector, trig_mode);
-		else
-			printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
-		break;
-	case IOAPIC_FIXED:
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu)
+		/* IOAPIC delivery mode value is the same as MSI here */
+		switch (delivery_mode) {
+		case IOAPIC_LOWEST_PRIORITY:
+			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+					deliver_bitmask);
+			if (vcpu != NULL)
 				kvm_apic_set_irq(vcpu, vector, trig_mode);
+			else
+				printk(KERN_INFO
+				       "kvm: null lowest priority vcpu!\n");
+			break;
+		case IOAPIC_FIXED:
+			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+				if (!(deliver_bitmask & (1 << vcpu_id)))
+					continue;
+				deliver_bitmask &= ~(1 << vcpu_id);
+				vcpu = ioapic->kvm->vcpus[vcpu_id];
+				if (vcpu)
+					kvm_apic_set_irq(vcpu, vector,
+							trig_mode);
+			}
+			break;
+		default:
+			break;
 		}
-		break;
-	default:
-		printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
 	}
 }
 #else
@@ -331,17 +343,16 @@ static int assigned_device_update_msi(struct kvm *kvm,
 {
 	int r;
 
+	adev->guest_irq = airq->guest_irq;
+
 	if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
 		/* x86 don't care upper address of guest msi message addr */
 		adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
 		adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
-		adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
-		adev->guest_msi.data = airq->guest_msi.data;
 		adev->ack_notifier.gsi = -1;
 	} else if (msi2intx) {
 		adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
 		adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
-		adev->guest_irq = airq->guest_irq;
 		adev->ack_notifier.gsi = airq->guest_irq;
 	} else {
 		/*
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 03/10] KVM: Improve MSI dispatch function
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
  2009-01-07 10:42 ` [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
  2009-01-07 10:42 ` [PATCH 02/10] KVM: Using gsi route for MSI device assignment Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 10:42 ` [PATCH 04/10] KVM: Using ioapic_irqchip() macro for kvm_set_irq Sheng Yang
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

Prepare to merge with kvm_set_irq().

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 virt/kvm/kvm_main.c |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0a59245..717e1b0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -87,7 +87,7 @@ static bool kvm_rebooting;
 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
 
 #ifdef CONFIG_X86
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi)
 {
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
@@ -98,7 +98,7 @@ static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
 
 	BUG_ON(!ioapic);
 
-	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, dev->guest_irq);
+	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, gsi);
 	if (!gsi_entry) {
 		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
 		return;
@@ -145,7 +145,7 @@ static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
 	}
 }
 #else
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi) {}
 #endif
 
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -180,7 +180,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
 			    assigned_dev->guest_irq, 1);
 	else if (assigned_dev->irq_requested_type &
 				KVM_ASSIGNED_DEV_GUEST_MSI) {
-		assigned_device_msi_dispatch(assigned_dev);
+		assigned_device_msi_dispatch(assigned_dev, assigned_dev->guest_irq);
 		enable_irq(assigned_dev->host_irq);
 		assigned_dev->host_irq_disabled = false;
 	}
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 04/10] KVM: Using ioapic_irqchip() macro for kvm_set_irq
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (2 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 03/10] KVM: Improve MSI dispatch function Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 10:42 ` [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang


Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 virt/kvm/irq_comm.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 7460e7f..f5e2d2c 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -39,7 +39,7 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
 	 * IOAPIC.  So set the bit in both. The guest will ignore
 	 * writes to the unused one.
 	 */
-	kvm_ioapic_set_irq(kvm->arch.vioapic, irq, !!(*irq_state));
+	kvm_ioapic_set_irq(ioapic_irqchip(kvm), irq, !!(*irq_state));
 #ifdef CONFIG_X86
 	kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state));
 #endif
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (3 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 04/10] KVM: Using ioapic_irqchip() macro for kvm_set_irq Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 21:39   ` Marcelo Tosatti
  2009-01-08 13:54   ` Mike Day
  2009-01-07 10:42 ` [PATCH 06/10] KVM: Split IOAPIC structure Sheng Yang
                   ` (4 subsequent siblings)
  9 siblings, 2 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

Using kvm_set_irq to handle all interrupt injection.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    2 +-
 virt/kvm/irq_comm.c      |   79 +++++++++++++++++++++++++++++++++++++++++++--
 virt/kvm/kvm_main.c      |   79 +++-------------------------------------------
 3 files changed, 81 insertions(+), 79 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eab9588..bfdaab9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -351,7 +351,7 @@ struct kvm_gsi_route_entry {
 	struct hlist_node link;
 };
 
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
+void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index f5e2d2c..e9fcd23 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -24,10 +24,81 @@
 
 #include "ioapic.h"
 
+#ifdef CONFIG_X86
+#include <asm/msidef.h>
+#endif
+
+static void gsi_dispatch(struct kvm *kvm, u32 gsi)
+{
+	int vcpu_id;
+	struct kvm_vcpu *vcpu;
+	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
+	struct kvm_gsi_route_entry *gsi_entry;
+	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
+	u32 deliver_bitmask;
+
+	BUG_ON(!ioapic);
+
+	gsi_entry = kvm_find_gsi_route_entry(kvm, gsi);
+	if (!gsi_entry) {
+		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
+		return;
+	}
+
+#ifdef CONFIG_X86
+	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
+		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
+			>> MSI_ADDR_DEST_ID_SHIFT;
+		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
+			>> MSI_DATA_VECTOR_SHIFT;
+		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.address_lo);
+		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+				(unsigned long *)&gsi_entry->msi.data);
+		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+				dest_id, dest_mode);
+		/* IOAPIC delivery mode value is the same as MSI here */
+		switch (delivery_mode) {
+		case IOAPIC_LOWEST_PRIORITY:
+			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+					deliver_bitmask);
+			if (vcpu != NULL)
+				kvm_apic_set_irq(vcpu, vector, trig_mode);
+			else
+				printk(KERN_INFO
+				       "kvm: null lowest priority vcpu!\n");
+			break;
+		case IOAPIC_FIXED:
+			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+				if (!(deliver_bitmask & (1 << vcpu_id)))
+					continue;
+				deliver_bitmask &= ~(1 << vcpu_id);
+				vcpu = ioapic->kvm->vcpus[vcpu_id];
+				if (vcpu)
+					kvm_apic_set_irq(vcpu, vector,
+							trig_mode);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+#endif /* CONFIG_X86 */
+}
+
 /* This should be called with the kvm->lock mutex held */
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
+void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level)
 {
-	unsigned long *irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
+	unsigned long *irq_state;
+
+	if (gsi & KVM_GSI_ROUTE_MASK) {
+		gsi_dispatch(kvm, gsi);
+		return;
+	}
+
+	irq_state = (unsigned long *)&kvm->arch.irq_states[gsi];
 
 	/* Logical OR for level trig interrupt */
 	if (level)
@@ -39,9 +110,9 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
 	 * IOAPIC.  So set the bit in both. The guest will ignore
 	 * writes to the unused one.
 	 */
-	kvm_ioapic_set_irq(ioapic_irqchip(kvm), irq, !!(*irq_state));
+	kvm_ioapic_set_irq(ioapic_irqchip(kvm), gsi, !!(*irq_state));
 #ifdef CONFIG_X86
-	kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state));
+	kvm_pic_set_irq(pic_irqchip(kvm), gsi, !!(*irq_state));
 #endif
 }
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 717e1b0..60352cf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -47,10 +47,6 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 
-#ifdef CONFIG_X86
-#include <asm/msidef.h>
-#endif
-
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 #include "coalesced_mmio.h"
 #endif
@@ -85,69 +81,6 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 static bool kvm_rebooting;
 
 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
-
-#ifdef CONFIG_X86
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi)
-{
-	int vcpu_id;
-	struct kvm_vcpu *vcpu;
-	struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
-	struct kvm_gsi_route_entry *gsi_entry;
-	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
-	u32 deliver_bitmask;
-
-	BUG_ON(!ioapic);
-
-	gsi_entry = kvm_find_gsi_route_entry(dev->kvm, gsi);
-	if (!gsi_entry) {
-		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
-		return;
-	}
-
-	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
-		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
-				(unsigned long *)&gsi_entry->msi.address_lo);
-		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
-				(unsigned long *)&gsi_entry->msi.data);
-		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
-				(unsigned long *)&gsi_entry->msi.data);
-		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
-				dest_id, dest_mode);
-		/* IOAPIC delivery mode value is the same as MSI here */
-		switch (delivery_mode) {
-		case IOAPIC_LOWEST_PRIORITY:
-			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-					deliver_bitmask);
-			if (vcpu != NULL)
-				kvm_apic_set_irq(vcpu, vector, trig_mode);
-			else
-				printk(KERN_INFO
-				       "kvm: null lowest priority vcpu!\n");
-			break;
-		case IOAPIC_FIXED:
-			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-				if (!(deliver_bitmask & (1 << vcpu_id)))
-					continue;
-				deliver_bitmask &= ~(1 << vcpu_id);
-				vcpu = ioapic->kvm->vcpus[vcpu_id];
-				if (vcpu)
-					kvm_apic_set_irq(vcpu, vector,
-							trig_mode);
-			}
-			break;
-		default:
-			break;
-		}
-	}
-}
-#else
-static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev, u32 gsi) {}
-#endif
-
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
 						      int assigned_dev_id)
 {
@@ -174,13 +107,11 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
 	 * finer-grained lock, update this
 	 */
 	mutex_lock(&assigned_dev->kvm->lock);
-	if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
-		kvm_set_irq(assigned_dev->kvm,
-			    assigned_dev->irq_source_id,
-			    assigned_dev->guest_irq, 1);
-	else if (assigned_dev->irq_requested_type &
-				KVM_ASSIGNED_DEV_GUEST_MSI) {
-		assigned_device_msi_dispatch(assigned_dev, assigned_dev->guest_irq);
+
+	kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
+			assigned_dev->guest_irq, 1);
+
+	if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI) {
 		enable_irq(assigned_dev->host_irq);
 		assigned_dev->host_irq_disabled = false;
 	}
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 06/10] KVM: Split IOAPIC structure
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (4 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 10:42 ` [PATCH 07/10] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

Prepared for reuse ioapic_redir_entry for MSI.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_types.h |   17 +++++++++++++++++
 virt/kvm/ioapic.c         |    6 +++---
 virt/kvm/ioapic.h         |   17 +----------------
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 5f4a18c..46e3d8d 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -52,4 +52,21 @@ struct kvm_pio_request {
 	int rep;
 };
 
+union kvm_ioapic_redirect_entry {
+	u64 bits;
+	struct {
+		u8 vector;
+		u8 delivery_mode:3;
+		u8 dest_mode:1;
+		u8 delivery_status:1;
+		u8 polarity:1;
+		u8 remote_irr:1;
+		u8 trig_mode:1;
+		u8 mask:1;
+		u8 reserve:7;
+		u8 reserved[4];
+		u8 dest_id;
+	} fields;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index e85a2bc..b6530e9 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -85,7 +85,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 
 static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
 {
-	union ioapic_redir_entry *pent;
+	union kvm_ioapic_redirect_entry *pent;
 
 	pent = &ioapic->redirtbl[idx];
 
@@ -277,7 +277,7 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
 {
 	u32 old_irr = ioapic->irr;
 	u32 mask = 1 << irq;
-	union ioapic_redir_entry entry;
+	union kvm_ioapic_redirect_entry entry;
 
 	if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
 		entry = ioapic->redirtbl[irq];
@@ -296,7 +296,7 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
 static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi,
 				    int trigger_mode)
 {
-	union ioapic_redir_entry *ent;
+	union kvm_ioapic_redirect_entry *ent;
 
 	ent = &ioapic->redirtbl[gsi];
 
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 49c9581..ee5b0bd 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -40,22 +40,7 @@ struct kvm_ioapic {
 	u32 id;
 	u32 irr;
 	u32 pad;
-	union ioapic_redir_entry {
-		u64 bits;
-		struct {
-			u8 vector;
-			u8 delivery_mode:3;
-			u8 dest_mode:1;
-			u8 delivery_status:1;
-			u8 polarity:1;
-			u8 remote_irr:1;
-			u8 trig_mode:1;
-			u8 mask:1;
-			u8 reserve:7;
-			u8 reserved[4];
-			u8 dest_id;
-		} fields;
-	} redirtbl[IOAPIC_NUM_PINS];
+	union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS];
 	struct kvm_io_device dev;
 	struct kvm *kvm;
 	void (*ack_notifier)(void *opaque, int irq);
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 07/10] KVM: Unified the delivery of IOAPIC and MSI
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (5 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 06/10] KVM: Split IOAPIC structure Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 10:42 ` [PATCH 08/10] KVM: Change API of kvm_ioapic_get_delivery_bitmask Sheng Yang
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang


Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 include/linux/kvm_host.h |    3 ++
 virt/kvm/ioapic.c        |   84 ++++++++++++++++----------------------------
 virt/kvm/irq_comm.c      |   86 ++++++++++++++++++++++++++++------------------
 3 files changed, 86 insertions(+), 87 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bfdaab9..2736dbf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -351,6 +351,9 @@ struct kvm_gsi_route_entry {
 	struct hlist_node link;
 };
 
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+				   union kvm_ioapic_redirect_entry *entry,
+				   u32 *deliver_bitmask);
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index b6530e9..951df12 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -200,75 +200,53 @@ u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 {
-	u8 dest = ioapic->redirtbl[irq].fields.dest_id;
-	u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
-	u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
-	u8 vector = ioapic->redirtbl[irq].fields.vector;
-	u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
+	union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
 	u32 deliver_bitmask;
 	struct kvm_vcpu *vcpu;
 	int vcpu_id, r = 0;
 
 	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
 		     "vector=%x trig_mode=%x\n",
-		     dest, dest_mode, delivery_mode, vector, trig_mode);
+		     entry.fields.dest, entry.fields.dest_mode,
+		     entry.fields.delivery_mode, entry.fields.vector,
+		     entry.fields.trig_mode);
 
-	deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest,
-							  dest_mode);
+	kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
 	if (!deliver_bitmask) {
 		ioapic_debug("no target on destination\n");
 		return 0;
 	}
 
-	switch (delivery_mode) {
-	case IOAPIC_LOWEST_PRIORITY:
-		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-				deliver_bitmask);
+	/* Always delivery PIT interrupt to vcpu 0 */
 #ifdef CONFIG_X86
-		if (irq == 0)
-			vcpu = ioapic->kvm->vcpus[0];
+	if (irq == 0)
+		deliver_bitmask = 1 << 0;
 #endif
-		if (vcpu != NULL)
-			r = ioapic_inj_irq(ioapic, vcpu, vector,
-				       trig_mode, delivery_mode);
-		else
-			ioapic_debug("null lowest prio vcpu: "
-				     "mask=%x vector=%x delivery_mode=%x\n",
-				     deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
-		break;
-	case IOAPIC_FIXED:
-#ifdef CONFIG_X86
-		if (irq == 0)
-			deliver_bitmask = 1;
-#endif
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu) {
-				r = ioapic_inj_irq(ioapic, vcpu, vector,
-					       trig_mode, delivery_mode);
-			}
-		}
-		break;
-	case IOAPIC_NMI:
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
-			vcpu = ioapic->kvm->vcpus[vcpu_id];
-			if (vcpu)
+
+	for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+		if (!(deliver_bitmask & (1 << vcpu_id)))
+			continue;
+		deliver_bitmask &= ~(1 << vcpu_id);
+		vcpu = ioapic->kvm->vcpus[vcpu_id];
+		if (vcpu) {
+			if (entry.fields.delivery_mode ==
+					IOAPIC_LOWEST_PRIORITY ||
+			    entry.fields.delivery_mode == IOAPIC_FIXED)
+				r = ioapic_inj_irq(ioapic, vcpu,
+						   entry.fields.vector,
+						   entry.fields.trig_mode,
+						   entry.fields.delivery_mode);
+			else if (entry.fields.delivery_mode == IOAPIC_NMI)
 				ioapic_inj_nmi(vcpu);
 			else
-				ioapic_debug("NMI to vcpu %d failed\n",
-						vcpu->vcpu_id);
-		}
-		break;
-	default:
-		printk(KERN_WARNING "Unsupported delivery mode %d\n",
-		       delivery_mode);
-		break;
+				ioapic_debug("unsupported delivery mode %x!\n",
+					     entry.fields.delivery_mode);
+		} else
+			ioapic_debug("null destination vcpu: "
+				     "mask=%x vector=%x delivery_mode=%x\n",
+				     entry.fields.deliver_bitmask,
+				     entry.fields.vector,
+				     entry.fields.delivery_mode);
 	}
 	return r;
 }
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index e9fcd23..89b2982 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -28,13 +28,39 @@
 #include <asm/msidef.h>
 #endif
 
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+				   union kvm_ioapic_redirect_entry *entry,
+				   u32 *deliver_bitmask)
+{
+	struct kvm_vcpu *vcpu;
+
+	*deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+				entry->fields.dest_id, entry->fields.dest_mode);
+	switch (entry->fields.delivery_mode) {
+	case IOAPIC_LOWEST_PRIORITY:
+		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
+				entry->fields.vector, *deliver_bitmask);
+		*deliver_bitmask = 1 << vcpu->vcpu_id;
+		break;
+	case IOAPIC_FIXED:
+	case IOAPIC_NMI:
+		break;
+	default:
+		if (printk_ratelimit())
+			printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
+				entry->fields.delivery_mode);
+		*deliver_bitmask = 0;
+	}
+}
+
+
 static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 {
 	int vcpu_id;
 	struct kvm_vcpu *vcpu;
 	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
 	struct kvm_gsi_route_entry *gsi_entry;
-	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
+	union kvm_ioapic_redirect_entry entry;
 	u32 deliver_bitmask;
 
 	BUG_ON(!ioapic);
@@ -47,42 +73,34 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 
 #ifdef CONFIG_X86
 	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
-		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
-			>> MSI_ADDR_DEST_ID_SHIFT;
-		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
-			>> MSI_DATA_VECTOR_SHIFT;
-		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+		entry.bits = 0;
+		entry.fields.dest_id = (gsi_entry->msi.address_lo &
+			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+		entry.fields.vector = (gsi_entry->msi.data &
+			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+		entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.address_lo);
-		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+		entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
-		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+		entry.fields.delivery_mode = test_bit(
+				MSI_DATA_DELIVERY_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
-		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
-				dest_id, dest_mode);
-		/* IOAPIC delivery mode value is the same as MSI here */
-		switch (delivery_mode) {
-		case IOAPIC_LOWEST_PRIORITY:
-			vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
-					deliver_bitmask);
-			if (vcpu != NULL)
-				kvm_apic_set_irq(vcpu, vector, trig_mode);
-			else
-				printk(KERN_INFO
-				       "kvm: null lowest priority vcpu!\n");
-			break;
-		case IOAPIC_FIXED:
-			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-				if (!(deliver_bitmask & (1 << vcpu_id)))
-					continue;
-				deliver_bitmask &= ~(1 << vcpu_id);
-				vcpu = ioapic->kvm->vcpus[vcpu_id];
-				if (vcpu)
-					kvm_apic_set_irq(vcpu, vector,
-							trig_mode);
-			}
-			break;
-		default:
-			break;
+
+		kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
+
+		if (!deliver_bitmask) {
+			printk(KERN_WARNING
+				"kvm: no destination for MSI delivery!");
+			return;
+		}
+		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+			if (!(deliver_bitmask & (1 << vcpu_id)))
+				continue;
+			deliver_bitmask &= ~(1 << vcpu_id);
+			vcpu = ioapic->kvm->vcpus[vcpu_id];
+			if (vcpu)
+				kvm_apic_set_irq(vcpu, entry.fields.vector,
+						entry.fields.trig_mode);
 		}
 	}
 #endif /* CONFIG_X86 */
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 08/10] KVM: Change API of kvm_ioapic_get_delivery_bitmask
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (6 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 07/10] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-07 10:42 ` [PATCH 09/10] KVM: Update intr delivery func to accept unsigned long* bitmap Sheng Yang
  2009-01-07 10:42 ` [PATCH 10/10] KVM: bit ops for deliver_bitmap Sheng Yang
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

In order to use with bit ops.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 virt/kvm/ioapic.c   |   17 ++++++++---------
 virt/kvm/ioapic.h   |    4 ++--
 virt/kvm/irq_comm.c |    5 +++--
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 951df12..aa4e8d8 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -158,22 +158,22 @@ static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
 	kvm_vcpu_kick(vcpu);
 }
 
-u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
-				    u8 dest_mode)
+void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+				     u8 dest_mode, u32 *mask)
 {
-	u32 mask = 0;
 	int i;
 	struct kvm *kvm = ioapic->kvm;
 	struct kvm_vcpu *vcpu;
 
 	ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode);
 
+	*mask = 0;
 	if (dest_mode == 0) {	/* Physical mode. */
 		if (dest == 0xFF) {	/* Broadcast. */
 			for (i = 0; i < KVM_MAX_VCPUS; ++i)
 				if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
-					mask |= 1 << i;
-			return mask;
+					*mask |= 1 << i;
+			return;
 		}
 		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 			vcpu = kvm->vcpus[i];
@@ -181,7 +181,7 @@ u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 				continue;
 			if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
 				if (vcpu->arch.apic)
-					mask = 1 << i;
+					*mask = 1 << i;
 				break;
 			}
 		}
@@ -192,10 +192,9 @@ u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 				continue;
 			if (vcpu->arch.apic &&
 			    kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
-				mask |= 1 << vcpu->vcpu_id;
+				*mask |= 1 << vcpu->vcpu_id;
 		}
-	ioapic_debug("mask %x\n", mask);
-	return mask;
+	ioapic_debug("mask %x\n", *mask);
 }
 
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index ee5b0bd..e107dbb 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -70,7 +70,7 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
 int kvm_ioapic_init(struct kvm *kvm);
 void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
 void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
-u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
-				u8 dest_mode);
+void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+				     u8 dest_mode, u32 *mask);
 
 #endif
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 89b2982..d97cdd6 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -34,8 +34,9 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 {
 	struct kvm_vcpu *vcpu;
 
-	*deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
-				entry->fields.dest_id, entry->fields.dest_mode);
+	kvm_ioapic_get_delivery_bitmask(ioapic, entry->fields.dest_id,
+					entry->fields.dest_mode,
+					deliver_bitmask);
 	switch (entry->fields.delivery_mode) {
 	case IOAPIC_LOWEST_PRIORITY:
 		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 09/10] KVM: Update intr delivery func to accept unsigned long* bitmap
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (7 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 08/10] KVM: Change API of kvm_ioapic_get_delivery_bitmask Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  2009-01-08  0:38   ` Marcelo Tosatti
  2009-01-07 10:42 ` [PATCH 10/10] KVM: bit ops for deliver_bitmap Sheng Yang
  9 siblings, 1 reply; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

Would be used with bit ops, and would be easily extended if KVM_MAX_VCPUS is
increased.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/lapic.c     |    8 ++++----
 include/linux/kvm_host.h |    2 +-
 virt/kvm/ioapic.c        |    4 ++--
 virt/kvm/ioapic.h        |    4 ++--
 virt/kvm/irq_comm.c      |    6 +++---
 5 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index afac68c..c1e4935 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -403,7 +403,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 }
 
 static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
-				       unsigned long bitmap)
+				       unsigned long *bitmap)
 {
 	int last;
 	int next;
@@ -415,7 +415,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
 	do {
 		if (++next == KVM_MAX_VCPUS)
 			next = 0;
-		if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
+		if (kvm->vcpus[next] == NULL || !test_bit(next, bitmap))
 			continue;
 		apic = kvm->vcpus[next]->arch.apic;
 		if (apic && apic_enabled(apic))
@@ -431,7 +431,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
 }
 
 struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
-		unsigned long bitmap)
+		unsigned long *bitmap)
 {
 	struct kvm_lapic *apic;
 
@@ -502,7 +502,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 	}
 
 	if (delivery_mode == APIC_DM_LOWEST) {
-		target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
+		target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, &lpr_map);
 		if (target != NULL)
 			__apic_accept_irq(target->arch.apic, delivery_mode,
 					  vector, level, trig_mode);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2736dbf..ed1c6bb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -353,7 +353,7 @@ struct kvm_gsi_route_entry {
 
 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 				   union kvm_ioapic_redirect_entry *entry,
-				   u32 *deliver_bitmask);
+				   unsigned long *deliver_bitmask);
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 gsi, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index aa4e8d8..0dcb0da 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -159,7 +159,7 @@ static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
 }
 
 void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
-				     u8 dest_mode, u32 *mask)
+				     u8 dest_mode, unsigned long *mask)
 {
 	int i;
 	struct kvm *kvm = ioapic->kvm;
@@ -200,7 +200,7 @@ void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 {
 	union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
-	u32 deliver_bitmask;
+	unsigned long deliver_bitmask;
 	struct kvm_vcpu *vcpu;
 	int vcpu_id, r = 0;
 
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index e107dbb..c418a7f 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -65,12 +65,12 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
 }
 
 struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
-				       unsigned long bitmap);
+				       unsigned long *bitmap);
 void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
 int kvm_ioapic_init(struct kvm *kvm);
 void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
 void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
 void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
-				     u8 dest_mode, u32 *mask);
+				     u8 dest_mode, unsigned long *mask);
 
 #endif
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index d97cdd6..baee4b7 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -30,7 +30,7 @@
 
 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 				   union kvm_ioapic_redirect_entry *entry,
-				   u32 *deliver_bitmask)
+				   unsigned long *deliver_bitmask)
 {
 	struct kvm_vcpu *vcpu;
 
@@ -40,7 +40,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 	switch (entry->fields.delivery_mode) {
 	case IOAPIC_LOWEST_PRIORITY:
 		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
-				entry->fields.vector, *deliver_bitmask);
+				entry->fields.vector, deliver_bitmask);
 		*deliver_bitmask = 1 << vcpu->vcpu_id;
 		break;
 	case IOAPIC_FIXED:
@@ -62,7 +62,7 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
 	struct kvm_gsi_route_entry *gsi_entry;
 	union kvm_ioapic_redirect_entry entry;
-	u32 deliver_bitmask;
+	unsigned long deliver_bitmask;
 
 	BUG_ON(!ioapic);
 
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 10/10] KVM: bit ops for deliver_bitmap
  2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
                   ` (8 preceding siblings ...)
  2009-01-07 10:42 ` [PATCH 09/10] KVM: Update intr delivery func to accept unsigned long* bitmap Sheng Yang
@ 2009-01-07 10:42 ` Sheng Yang
  9 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-07 10:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang

It's also convenient when we extend KVM supported vcpu number in the future.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/kvm/lapic.c |    7 ++++---
 virt/kvm/ioapic.c    |   24 +++++++++++++-----------
 virt/kvm/irq_comm.c  |   17 +++++++++--------
 3 files changed, 26 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c1e4935..359e02c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -477,9 +477,10 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 
 	struct kvm_vcpu *target;
 	struct kvm_vcpu *vcpu;
-	unsigned long lpr_map = 0;
+	DECLARE_BITMAP(lpr_map, KVM_MAX_VCPUS);
 	int i;
 
+	bitmap_zero(lpr_map, KVM_MAX_VCPUS);
 	apic_debug("icr_high 0x%x, icr_low 0x%x, "
 		   "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
 		   "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
@@ -494,7 +495,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 		if (vcpu->arch.apic &&
 		    apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
 			if (delivery_mode == APIC_DM_LOWEST)
-				set_bit(vcpu->vcpu_id, &lpr_map);
+				set_bit(vcpu->vcpu_id, lpr_map);
 			else
 				__apic_accept_irq(vcpu->arch.apic, delivery_mode,
 						  vector, level, trig_mode);
@@ -502,7 +503,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 	}
 
 	if (delivery_mode == APIC_DM_LOWEST) {
-		target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, &lpr_map);
+		target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
 		if (target != NULL)
 			__apic_accept_irq(target->arch.apic, delivery_mode,
 					  vector, level, trig_mode);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 0dcb0da..162cbdd 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -200,7 +200,7 @@ void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
 static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 {
 	union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
-	unsigned long deliver_bitmask;
+	DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
 	struct kvm_vcpu *vcpu;
 	int vcpu_id, r = 0;
 
@@ -210,22 +210,24 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 		     entry.fields.delivery_mode, entry.fields.vector,
 		     entry.fields.trig_mode);
 
-	kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
-	if (!deliver_bitmask) {
-		ioapic_debug("no target on destination\n");
-		return 0;
-	}
+	bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
 
 	/* Always delivery PIT interrupt to vcpu 0 */
 #ifdef CONFIG_X86
 	if (irq == 0)
-		deliver_bitmask = 1 << 0;
+		set_bit(0, deliver_bitmask);
+	else
 #endif
+		kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
+
+	if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
+		ioapic_debug("no target on destination\n");
+		return 0;
+	}
 
-	for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-		if (!(deliver_bitmask & (1 << vcpu_id)))
-			continue;
-		deliver_bitmask &= ~(1 << vcpu_id);
+	while ((vcpu_id = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
+			< KVM_MAX_VCPUS) {
+		clear_bit(vcpu_id, deliver_bitmask);
 		vcpu = ioapic->kvm->vcpus[vcpu_id];
 		if (vcpu) {
 			if (entry.fields.delivery_mode ==
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index baee4b7..bce3cd5 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -41,7 +41,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 	case IOAPIC_LOWEST_PRIORITY:
 		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
 				entry->fields.vector, deliver_bitmask);
-		*deliver_bitmask = 1 << vcpu->vcpu_id;
+		set_bit(vcpu->vcpu_id, deliver_bitmask);
 		break;
 	case IOAPIC_FIXED:
 	case IOAPIC_NMI:
@@ -62,7 +62,7 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
 	struct kvm_gsi_route_entry *gsi_entry;
 	union kvm_ioapic_redirect_entry entry;
-	unsigned long deliver_bitmask;
+	DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
 
 	BUG_ON(!ioapic);
 
@@ -72,6 +72,7 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 		return;
 	}
 
+	bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
 #ifdef CONFIG_X86
 	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
 		entry.bits = 0;
@@ -87,17 +88,17 @@ static void gsi_dispatch(struct kvm *kvm, u32 gsi)
 				MSI_DATA_DELIVERY_MODE_SHIFT,
 				(unsigned long *)&gsi_entry->msi.data);
 
-		kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
+		kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
 
-		if (!deliver_bitmask) {
+		if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS)
+				>= KVM_MAX_VCPUS) {
 			printk(KERN_WARNING
 				"kvm: no destination for MSI delivery!");
 			return;
 		}
-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
-			if (!(deliver_bitmask & (1 << vcpu_id)))
-				continue;
-			deliver_bitmask &= ~(1 << vcpu_id);
+		while ((vcpu_id = find_first_bit(deliver_bitmask,
+					KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
+			clear_bit(vcpu_id, deliver_bitmask);
 			vcpu = ioapic->kvm->vcpus[vcpu_id];
 			if (vcpu)
 				kvm_apic_set_irq(vcpu, entry.fields.vector,
-- 
1.5.4.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI
  2009-01-07 10:42 ` [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
@ 2009-01-07 16:18   ` Marcelo Tosatti
  2009-01-08  7:30     ` Sheng Yang
  0 siblings, 1 reply; 18+ messages in thread
From: Marcelo Tosatti @ 2009-01-07 16:18 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Avi Kivity, kvm

Hi Sheng,

On Wed, Jan 07, 2009 at 06:42:37PM +0800, Sheng Yang wrote:
> Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including
> MSI. So here is it.
> 
> struct gsi_route_entry is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to
> MSI/MSI-X message address/data. And the struct can also be extended for other
> purpose.
> 
> Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by kernel and
> provide two ioctls to userspace, which is more flexiable.
> 
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---
>  include/linux/kvm.h      |   26 +++++++++++
>  include/linux/kvm_host.h |   20 +++++++++
>  virt/kvm/irq_comm.c      |   70 ++++++++++++++++++++++++++++++
>  virt/kvm/kvm_main.c      |  106 ++++++++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 222 insertions(+), 0 deletions(-)
> 
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 71c150f..bbefce6 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -399,6 +399,9 @@ struct kvm_trace_rec {
>  #if defined(CONFIG_X86)
>  #define KVM_CAP_REINJECT_CONTROL 24
>  #endif
> +#if defined(CONFIG_X86)
> +#define KVM_CAP_GSI_ROUTE 25
> +#endif
>  
>  /*
>   * ioctls for VM fds
> @@ -433,6 +436,8 @@ struct kvm_trace_rec {
>  #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
>  			    struct kvm_assigned_irq)
>  #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
> +#define KVM_REQUEST_GSI_ROUTE	  _IOWR(KVMIO, 0x72, void *)
> +#define KVM_FREE_GSI_ROUTE	  _IOR(KVMIO, 0x73, void *)
>  
>  /*
>   * ioctls for vcpu fds
> @@ -553,4 +558,25 @@ struct kvm_assigned_irq {
>  #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION	KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
>  #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI	(1 << 0)
>  
> +struct kvm_gsi_route_guest {
> +	__u32 entries_nr;
> +	struct kvm_gsi_route_entry_guest *entries;
> +};
> +
> +#define KVM_GSI_ROUTE_MSI	(1 << 0)
> +struct kvm_gsi_route_entry_guest {
> +	__u32 gsi;
> +	__u32 type;
> +	__u32 flags;
> +	__u32 reserved;
> +	union {
> +		struct {
> +			__u32 addr_lo;
> +			__u32 addr_hi;
> +			__u32 data;
> +		} msi;
> +		__u32 padding[8];
> +	};
> +};
> +
>  #endif
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index a8bcad0..6a00201 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -136,6 +136,9 @@ struct kvm {
>  	unsigned long mmu_notifier_seq;
>  	long mmu_notifier_count;
>  #endif
> +	struct hlist_head gsi_route_list;
> +#define KVM_NR_GSI_ROUTE_ENTRIES    256
> +	DECLARE_BITMAP(gsi_route_bitmap, KVM_NR_GSI_ROUTE_ENTRIES);
>  };
>  
>  /* The guest did something we don't support. */
> @@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
>  				      struct kvm_irq_mask_notifier *kimn);
>  void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
>  
> +#define KVM_GSI_ROUTE_MASK    0x1000000ull
> +struct kvm_gsi_route_entry {
> +	u32 gsi;
> +	u32 type;
> +	u32 flags;
> +	u32 reserved;
> +	union {
> +		struct msi_msg msi;
> +		u32 reserved[8];
> +	};
> +	struct hlist_node link;
> +};
> +
>  void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
>  void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
>  void kvm_register_irq_ack_notifier(struct kvm *kvm,
> @@ -343,6 +359,10 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
>  void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
>  int kvm_request_irq_source_id(struct kvm *kvm);
>  void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
> +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
> +struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi);
> +void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
> +void kvm_free_gsi_route_list(struct kvm *kvm);
>  
>  #ifdef CONFIG_DMAR
>  int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
> index 5162a41..7460e7f 100644
> --- a/virt/kvm/irq_comm.c
> +++ b/virt/kvm/irq_comm.c
> @@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
>  			kimn->func(kimn, mask);
>  }
>  
> +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
> +{
> +	struct kvm_gsi_route_entry *found_entry, *new_entry;
> +	int r, gsi;
> +
> +	mutex_lock(&kvm->lock);
> +	/* Find whether we need a update or a new entry */
> +	found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
> +	if (found_entry)
> +		*found_entry = *entry;
> +	else {

Having a kvm_find_alloc_gsi_route_entry which either returns a present
entry if found or returns a newly allocated one makes the code easier to
read for me. Then just

                entry = kvm_find_alloc_gsi_route_entry
                *entry = *new_entry;

Just style, feel free to ignore the comment.

> +		gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
> +					  KVM_NR_GSI_ROUTE_ENTRIES);
> +		if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
> +			r = -ENOSPC;
> +			goto out;
> +		}
> +		__set_bit(gsi, kvm->gsi_route_bitmap);
> +		entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
> +		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);

Allocate first, then set the bit in the bitmap?

> +		if (!new_entry) {
> +			r = -ENOMEM;
> +			goto out;

Because here you don't clear the state on failure. 

</NITPICK MODE> 

> +		}
> +		*new_entry = *entry;
> +		hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
> +	}
> +	r = 0;
> +out:
> +	mutex_unlock(&kvm->lock);
> +	return r;
> +}
> +
>  
> +static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm,
> +			struct kvm_gsi_route_guest *gsi_route,
> +			struct kvm_gsi_route_entry_guest *guest_entries)
> +{
> +	struct kvm_gsi_route_entry entry;
> +	int r, i;
> +
> +	for (i = 0; i < gsi_route->entries_nr; i++) {
> +		memcpy(&entry, &guest_entries[i], sizeof(entry));
> +		r = kvm_update_gsi_route(kvm, &entry);
> +		if (r == 0)
> +			guest_entries[i].gsi = entry.gsi;
> +		else
> +			break;
> +	}
> +	return r;
> +}
> +
> +static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm,
> +			struct kvm_gsi_route_guest *gsi_route,
> +			struct kvm_gsi_route_entry_guest *guest_entries)
> +{
> +	struct kvm_gsi_route_entry *entry;
> +	int r, i;
> +
> +	mutex_lock(&kvm->lock);
> +	for (i = 0; i < gsi_route->entries_nr; i++) {
> +		entry = kvm_find_gsi_route_entry(kvm, guest_entries[i].gsi);
> +		if (!entry ||
> +		    memcmp(entry, &guest_entries[i], sizeof(*entry)) != 0) {
> +			printk(KERN_WARNING "kvm: illegal gsi mapping!");

Don't think you need that printk?

> +			r = -EINVAL;
> +			goto out;
> +		}
> +		kvm_free_gsi_route(kvm, entry);
> +	}
> +out:
> +	mutex_unlock(&kvm->lock);
> +	return r;
> +}
> +
>  static long kvm_vcpu_ioctl(struct file *filp,
>  			   unsigned int ioctl, unsigned long arg)
>  {
> @@ -1803,6 +1846,7 @@ static long kvm_vm_ioctl(struct file *filp,
>  {
>  	struct kvm *kvm = filp->private_data;
>  	void __user *argp = (void __user *)arg;
> +	struct kvm_gsi_route_entry_guest *gsi_entries = NULL;
>  	int r;
>  
>  	if (kvm->mm != current->mm)
> @@ -1887,10 +1931,72 @@ static long kvm_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  #endif
> +	case KVM_REQUEST_GSI_ROUTE: {
> +		struct kvm_gsi_route_guest gsi_route;

                r = -EFAULT;
                if (copy_from_user...)
                        goto out;

                etc...
    
To conform with the rest of the code in the function?

> +		r = copy_from_user(&gsi_route, argp, sizeof gsi_route);
> +		if (r)
> +			goto out;
> +		if (gsi_route.entries_nr == 0) {
> +			r = -EFAULT;
> +			goto out;
> +		}

Should check for a max of KVM_NR_GSI_ROUTE_ENTRIES ?

> +		gsi_entries = kmalloc(gsi_route.entries_nr *
> +				      sizeof(struct kvm_gsi_route_entry_guest),
> +				      GFP_KERNEL);

And use vmalloc/vfree instead.

> +		if (!gsi_entries) {
> +			r = -ENOMEM;
> +			goto out;
> +		}
> +		r = copy_from_user(gsi_entries,
> +				   (void __user *)gsi_route.entries,
> +				   gsi_route.entries_nr *
> +				   sizeof(struct kvm_gsi_route_entry_guest));
> +		if (r)
> +			goto out;
> +		r = kvm_vm_ioctl_request_gsi_route(kvm, &gsi_route,
> +						   gsi_entries);
> +		if (r)
> +			goto out;
> +		r = copy_to_user((void __user *)gsi_route.entries,
> +				gsi_entries,
> +				gsi_route.entries_nr *
> +				sizeof(struct kvm_gsi_route_entry_guest));
> +		if (r)
> +			goto out;
> +		break;
> +	}
> +	case KVM_FREE_GSI_ROUTE: {
> +		struct kvm_gsi_route_guest gsi_route;
> +		r = copy_from_user(&gsi_route, argp, sizeof gsi_route);
> +		if (r)
> +			goto out;
> +		if (gsi_route.entries_nr == 0) {
> +			r = -EFAULT;
> +			goto out;
> +		}

Check KVM_NR_GSI_ROUTE_ENTRIES and vmalloc.


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq
  2009-01-07 10:42 ` [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
@ 2009-01-07 21:39   ` Marcelo Tosatti
  2009-01-08  9:24     ` Sheng Yang
  2009-01-08 13:54   ` Mike Day
  1 sibling, 1 reply; 18+ messages in thread
From: Marcelo Tosatti @ 2009-01-07 21:39 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Avi Kivity, kvm

On Wed, Jan 07, 2009 at 06:42:41PM +0800, Sheng Yang wrote:
> Using kvm_set_irq to handle all interrupt injection.
> 
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---
>  include/linux/kvm_host.h |    2 +-
>  virt/kvm/irq_comm.c      |   79 +++++++++++++++++++++++++++++++++++++++++++--
>  virt/kvm/kvm_main.c      |   79 +++-------------------------------------------
>  3 files changed, 81 insertions(+), 79 deletions(-)
> 
> +static void gsi_dispatch(struct kvm *kvm, u32 gsi)
> +{
> +	int vcpu_id;
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
> +	struct kvm_gsi_route_entry *gsi_entry;
> +	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
> +	u32 deliver_bitmask;
> +
> +	BUG_ON(!ioapic);
> +
> +	gsi_entry = kvm_find_gsi_route_entry(kvm, gsi);
> +	if (!gsi_entry) {
> +		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
> +		return;
> +	}
> +
> +#ifdef CONFIG_X86
> +	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
> +		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
> +			>> MSI_ADDR_DEST_ID_SHIFT;
> +		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
> +			>> MSI_DATA_VECTOR_SHIFT;
> +		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
> +				(unsigned long *)&gsi_entry->msi.address_lo);
> +		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
> +				(unsigned long *)&gsi_entry->msi.data);
> +		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
> +				(unsigned long *)&gsi_entry->msi.data);
> +		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
> +				dest_id, dest_mode);
> +		/* IOAPIC delivery mode value is the same as MSI here */
> +		switch (delivery_mode) {

Sheng, 

This code seems to ignore the RH bit (MSI_ADDR_REDIRECTION_SHIFT):

4.Destination mode (DM) — This bit indicates whether the Destination
ID field should be interpreted as logical or physical APIC ID for
delivery of the lowest priority interrupt. If RH is 1 and DM is 0,
the Destination ID field is in physical destination mode and only the
processor in the system that has the matching APIC ID is considered
for delivery of that interrupt (this means no re-direction). If RH
is 1 and DM is 1, the Destination ID Field is interpreted as in
logical destination mode and the redirection is limited to only those
processors that are part of the logical group of processors based
on the processor’s logical APIC ID and the Destination ID field
in the message. The logical group of processors consists of those
identified by matching the 8-bit Destination ID with the logical
destination identified by the Destination Format Register and the
Logical Destination Register in each local APIC.

Is that intentional?


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 09/10] KVM: Update intr delivery func to accept unsigned long* bitmap
  2009-01-07 10:42 ` [PATCH 09/10] KVM: Update intr delivery func to accept unsigned long* bitmap Sheng Yang
@ 2009-01-08  0:38   ` Marcelo Tosatti
  0 siblings, 0 replies; 18+ messages in thread
From: Marcelo Tosatti @ 2009-01-08  0:38 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Avi Kivity, kvm


Better separate the bitmap patches from this series to ease merging of
the MSI changes.

On Wed, Jan 07, 2009 at 06:42:45PM +0800, Sheng Yang wrote:
> Would be used with bit ops, and would be easily extended if KVM_MAX_VCPUS is
> increased.
> 
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---
>  arch/x86/kvm/lapic.c     |    8 ++++----
>  include/linux/kvm_host.h |    2 +-
>  virt/kvm/ioapic.c        |    4 ++--
>  virt/kvm/ioapic.h        |    4 ++--
>  virt/kvm/irq_comm.c      |    6 +++---
>  5 files changed, 12 insertions(+), 12 deletions(-)

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI
  2009-01-07 16:18   ` Marcelo Tosatti
@ 2009-01-08  7:30     ` Sheng Yang
  0 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-08  7:30 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, kvm

On Thursday 08 January 2009 00:18:39 Marcelo Tosatti wrote:
> Hi Sheng,
>

Thanks for the careful review! :)
> On Wed, Jan 07, 2009 at 06:42:37PM +0800, Sheng Yang wrote:
> > diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
> > index 5162a41..7460e7f 100644
> > --- a/virt/kvm/irq_comm.c
> > +++ b/virt/kvm/irq_comm.c
> > @@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int
> > irq, bool mask) kimn->func(kimn, mask);
> >  }
> >
> > +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry
> > *entry) +{
> > +	struct kvm_gsi_route_entry *found_entry, *new_entry;
> > +	int r, gsi;
> > +
> > +	mutex_lock(&kvm->lock);
> > +	/* Find whether we need a update or a new entry */
> > +	found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
> > +	if (found_entry)
> > +		*found_entry = *entry;
> > +	else {
>
> Having a kvm_find_alloc_gsi_route_entry which either returns a present
> entry if found or returns a newly allocated one makes the code easier to
> read for me. Then just
>
>                 entry = kvm_find_alloc_gsi_route_entry
>                 *entry = *new_entry;
>
> Just style, feel free to ignore the comment.

A little different, for we have to use kvm_find_alloc_gsi_route_entry() to 
find correlated gsi for injecting interrupt, so allocate a new one is not that 
proper here. 
>
> > +		gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
> > +					  KVM_NR_GSI_ROUTE_ENTRIES);
> > +		if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
> > +			r = -ENOSPC;
> > +			goto out;
> > +		}
> > +		__set_bit(gsi, kvm->gsi_route_bitmap);
> > +		entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
> > +		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
>
> Allocate first, then set the bit in the bitmap?
>
> > +		if (!new_entry) {
> > +			r = -ENOMEM;
> > +			goto out;
>
> Because here you don't clear the state on failure.
>
> </NITPICK MODE>

Oh, you are right.:)
>
> > +		}
> > +		*new_entry = *entry;
> > +		hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
> > +	}
> > +	r = 0;
> > +out:
> > +	mutex_unlock(&kvm->lock);
> > +	return r;
> > +}
> > +
> >
> > +static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm,
> > +			struct kvm_gsi_route_guest *gsi_route,
> > +			struct kvm_gsi_route_entry_guest *guest_entries)
> > +{
> > +	struct kvm_gsi_route_entry entry;
> > +	int r, i;
> > +
> > +	for (i = 0; i < gsi_route->entries_nr; i++) {
> > +		memcpy(&entry, &guest_entries[i], sizeof(entry));
> > +		r = kvm_update_gsi_route(kvm, &entry);
> > +		if (r == 0)
> > +			guest_entries[i].gsi = entry.gsi;
> > +		else
> > +			break;
> > +	}
> > +	return r;
> > +}
> > +
> > +static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm,
> > +			struct kvm_gsi_route_guest *gsi_route,
> > +			struct kvm_gsi_route_entry_guest *guest_entries)
> > +{
> > +	struct kvm_gsi_route_entry *entry;
> > +	int r, i;
> > +
> > +	mutex_lock(&kvm->lock);
> > +	for (i = 0; i < gsi_route->entries_nr; i++) {
> > +		entry = kvm_find_gsi_route_entry(kvm, guest_entries[i].gsi);
> > +		if (!entry ||
> > +		    memcmp(entry, &guest_entries[i], sizeof(*entry)) != 0) {
> > +			printk(KERN_WARNING "kvm: illegal gsi mapping!");
>
> Don't think you need that printk?

After reconsidering, I am not sure if I should verify guest entry here, or 
just leave a gsi number to kernel for freeing. I am a little prefer only gsi 
here, for I think I can trust userspace to some degree... I would update the 
patch.(in fact, I didn't use this ioctl in userspace for now...)
>
> > +			r = -EINVAL;
> > +			goto out;
> > +		}
> > +		kvm_free_gsi_route(kvm, entry);
> > +	}
> > +out:
> > +	mutex_unlock(&kvm->lock);
> > +	return r;
> > +}
> > +
> >  static long kvm_vcpu_ioctl(struct file *filp,
> >  			   unsigned int ioctl, unsigned long arg)
> >  {
> > @@ -1803,6 +1846,7 @@ static long kvm_vm_ioctl(struct file *filp,
> >  {
> >  	struct kvm *kvm = filp->private_data;
> >  	void __user *argp = (void __user *)arg;
> > +	struct kvm_gsi_route_entry_guest *gsi_entries = NULL;
> >  	int r;
> >
> >  	if (kvm->mm != current->mm)
> > @@ -1887,10 +1931,72 @@ static long kvm_vm_ioctl(struct file *filp,
> >  		break;
> >  	}
> >  #endif
> > +	case KVM_REQUEST_GSI_ROUTE: {
> > +		struct kvm_gsi_route_guest gsi_route;
>
>                 r = -EFAULT;
>                 if (copy_from_user...)
>                         goto out;
>
>                 etc...
>
> To conform with the rest of the code in the function?

OK...
>
> > +		r = copy_from_user(&gsi_route, argp, sizeof gsi_route);
> > +		if (r)
> > +			goto out;
> > +		if (gsi_route.entries_nr == 0) {
> > +			r = -EFAULT;
> > +			goto out;
> > +		}
>
> Should check for a max of KVM_NR_GSI_ROUTE_ENTRIES ?

Yeah.
>
> > +		gsi_entries = kmalloc(gsi_route.entries_nr *
> > +				      sizeof(struct kvm_gsi_route_entry_guest),
> > +				      GFP_KERNEL);
>
> And use vmalloc/vfree instead.

Yeah... kmalloc is not necessary here...
>
> > +		if (!gsi_entries) {
> > +			r = -ENOMEM;
> > +			goto out;
> > +		}
> > +		r = copy_from_user(gsi_entries,
> > +				   (void __user *)gsi_route.entries,
> > +				   gsi_route.entries_nr *
> > +				   sizeof(struct kvm_gsi_route_entry_guest));
> > +		if (r)
> > +			goto out;
> > +		r = kvm_vm_ioctl_request_gsi_route(kvm, &gsi_route,
> > +						   gsi_entries);
> > +		if (r)
> > +			goto out;
> > +		r = copy_to_user((void __user *)gsi_route.entries,
> > +				gsi_entries,
> > +				gsi_route.entries_nr *
> > +				sizeof(struct kvm_gsi_route_entry_guest));
> > +		if (r)
> > +			goto out;
> > +		break;
> > +	}
> > +	case KVM_FREE_GSI_ROUTE: {
> > +		struct kvm_gsi_route_guest gsi_route;
> > +		r = copy_from_user(&gsi_route, argp, sizeof gsi_route);
> > +		if (r)
> > +			goto out;
> > +		if (gsi_route.entries_nr == 0) {
> > +			r = -EFAULT;
> > +			goto out;
> > +		}
>
> Check KVM_NR_GSI_ROUTE_ENTRIES and vmalloc.

Sure.

-- 
regards
Yang, Sheng



^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq
  2009-01-07 21:39   ` Marcelo Tosatti
@ 2009-01-08  9:24     ` Sheng Yang
  0 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-08  9:24 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, kvm

On Thursday 08 January 2009 05:39:32 Marcelo Tosatti wrote:
> On Wed, Jan 07, 2009 at 06:42:41PM +0800, Sheng Yang wrote:
> > Using kvm_set_irq to handle all interrupt injection.
> >
> > Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> > ---
> >  include/linux/kvm_host.h |    2 +-
> >  virt/kvm/irq_comm.c      |   79
> > +++++++++++++++++++++++++++++++++++++++++++-- virt/kvm/kvm_main.c      | 
> >  79 +++------------------------------------------- 3 files changed, 81
> > insertions(+), 79 deletions(-)
> >
> > +static void gsi_dispatch(struct kvm *kvm, u32 gsi)
> > +{
> > +	int vcpu_id;
> > +	struct kvm_vcpu *vcpu;
> > +	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
> > +	struct kvm_gsi_route_entry *gsi_entry;
> > +	int dest_id, vector, dest_mode, trig_mode, delivery_mode;
> > +	u32 deliver_bitmask;
> > +
> > +	BUG_ON(!ioapic);
> > +
> > +	gsi_entry = kvm_find_gsi_route_entry(kvm, gsi);
> > +	if (!gsi_entry) {
> > +		printk(KERN_WARNING "kvm: fail to find correlated gsi entry\n");
> > +		return;
> > +	}
> > +
> > +#ifdef CONFIG_X86
> > +	if (gsi_entry->type & KVM_GSI_ROUTE_MSI) {
> > +		dest_id = (gsi_entry->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
> > +			>> MSI_ADDR_DEST_ID_SHIFT;
> > +		vector = (gsi_entry->msi.data & MSI_DATA_VECTOR_MASK)
> > +			>> MSI_DATA_VECTOR_SHIFT;
> > +		dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
> > +				(unsigned long *)&gsi_entry->msi.address_lo);
> > +		trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
> > +				(unsigned long *)&gsi_entry->msi.data);
> > +		delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
> > +				(unsigned long *)&gsi_entry->msi.data);
> > +		deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
> > +				dest_id, dest_mode);
> > +		/* IOAPIC delivery mode value is the same as MSI here */
> > +		switch (delivery_mode) {
>
> Sheng,
>
> This code seems to ignore the RH bit (MSI_ADDR_REDIRECTION_SHIFT):
>
> 4.Destination mode (DM) — This bit indicates whether the Destination
> ID field should be interpreted as logical or physical APIC ID for
> delivery of the lowest priority interrupt. If RH is 1 and DM is 0,
> the Destination ID field is in physical destination mode and only the
> processor in the system that has the matching APIC ID is considered
> for delivery of that interrupt (this means no re-direction). If RH
> is 1 and DM is 1, the Destination ID Field is interpreted as in
> logical destination mode and the redirection is limited to only those
> processors that are part of the logical group of processors based
> on the processor’s logical APIC ID and the Destination ID field
> in the message. The logical group of processors consists of those
> identified by matching the 8-bit Destination ID with the logical
> destination identified by the Destination Format Register and the
> Logical Destination Register in each local APIC.
>
> Is that intentional?

Um... Partly... For RH bits is almost always 1... OK, I would add another 
patch for this bit later.

-- 
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: KVM: Merge MSI handling to kvm_set_irq
  2009-01-07 10:42 ` [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
  2009-01-07 21:39   ` Marcelo Tosatti
@ 2009-01-08 13:54   ` Mike Day
  2009-01-09  1:32     ` Sheng Yang
  1 sibling, 1 reply; 18+ messages in thread
From: Mike Day @ 2009-01-08 13:54 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Avi Kivity, Marcelo Tosatti, kvm

On 07/01/09 18:42 +0800, Sheng Yang wrote:
> Using kvm_set_irq to handle all interrupt injection.
> 
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---

> +static void gsi_dispatch(struct kvm *kvm, u32 gsi)

...

> +		case IOAPIC_FIXED:
> +			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
> +				if (!(deliver_bitmask & (1 << vcpu_id)))
> +					continue;
> +				deliver_bitmask &= ~(1 << vcpu_id);
> +				vcpu = ioapic->kvm->vcpus[vcpu_id];
> +				if (vcpu)
> +					kvm_apic_set_irq(vcpu, vector,
> +							trig_mode);
> +			}
> +			break;
> +		default:
> +			break;
> +		}

In cases such as the for() loop above, which are numerous in the
patchset, I wonder if using bitops would be slightly better:

		case IOAPIC_FIXED:
			while (deliver_bitmask != 0) {
				vcpu_id = ffs(deliver_bitmask);
				__clear_bit(vcpu_id - 1, &deliver_bitmask);
				vcpu = ioapic->kvm->vcpus[vcpu_id - 1];
				if (vcpu)
					kvm_apic_set_irq(vcpu, vector,
							 trig_mode);
			} ;
			

I did a quick check and the second example compiles to a more
consise set of assembler instructions. The current code uses bitops in
cases like this.

Mike

-- 
Mike Day
http://www.ncultra.org
AIM: ncmikeday |  Yahoo IM: ultra.runner
PGP key: http://www.ncultra.org/ncmike/pubkey.asc

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: KVM: Merge MSI handling to kvm_set_irq
  2009-01-08 13:54   ` Mike Day
@ 2009-01-09  1:32     ` Sheng Yang
  0 siblings, 0 replies; 18+ messages in thread
From: Sheng Yang @ 2009-01-09  1:32 UTC (permalink / raw)
  To: ncmike; +Cc: Avi Kivity, Marcelo Tosatti, kvm

On Thursday 08 January 2009 21:54:41 Mike Day wrote:
> On 07/01/09 18:42 +0800, Sheng Yang wrote:
> > Using kvm_set_irq to handle all interrupt injection.
> >
> > Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> > ---
> >
> > +static void gsi_dispatch(struct kvm *kvm, u32 gsi)
>
> ...
>
> > +		case IOAPIC_FIXED:
> > +			for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
> > +				if (!(deliver_bitmask & (1 << vcpu_id)))
> > +					continue;
> > +				deliver_bitmask &= ~(1 << vcpu_id);
> > +				vcpu = ioapic->kvm->vcpus[vcpu_id];
> > +				if (vcpu)
> > +					kvm_apic_set_irq(vcpu, vector,
> > +							trig_mode);
> > +			}
> > +			break;
> > +		default:
> > +			break;
> > +		}
>
> In cases such as the for() loop above, which are numerous in the
> patchset, I wonder if using bitops would be slightly better:
>
> 		case IOAPIC_FIXED:
> 			while (deliver_bitmask != 0) {
> 				vcpu_id = ffs(deliver_bitmask);
> 				__clear_bit(vcpu_id - 1, &deliver_bitmask);
> 				vcpu = ioapic->kvm->vcpus[vcpu_id - 1];
> 				if (vcpu)
> 					kvm_apic_set_irq(vcpu, vector,
> 							 trig_mode);
> 			} ;
>
>
> I did a quick check and the second example compiles to a more
> consise set of assembler instructions. The current code uses bitops in
> cases like this.
>

Yes, that's what I did for bitmap changing in the following patches. Please 
refer to "[PATCH 10/10] KVM: bit ops for deliver_bitmap" I sent before.

-- 
regards
Yang, Sheng

> Mike


^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2009-01-09  1:32 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-01-07 10:42 [PATCH 0/10][v4]GSI route layer for MSI/MSI-X Sheng Yang
2009-01-07 10:42 ` [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI Sheng Yang
2009-01-07 16:18   ` Marcelo Tosatti
2009-01-08  7:30     ` Sheng Yang
2009-01-07 10:42 ` [PATCH 02/10] KVM: Using gsi route for MSI device assignment Sheng Yang
2009-01-07 10:42 ` [PATCH 03/10] KVM: Improve MSI dispatch function Sheng Yang
2009-01-07 10:42 ` [PATCH 04/10] KVM: Using ioapic_irqchip() macro for kvm_set_irq Sheng Yang
2009-01-07 10:42 ` [PATCH 05/10] KVM: Merge MSI handling to kvm_set_irq Sheng Yang
2009-01-07 21:39   ` Marcelo Tosatti
2009-01-08  9:24     ` Sheng Yang
2009-01-08 13:54   ` Mike Day
2009-01-09  1:32     ` Sheng Yang
2009-01-07 10:42 ` [PATCH 06/10] KVM: Split IOAPIC structure Sheng Yang
2009-01-07 10:42 ` [PATCH 07/10] KVM: Unified the delivery of IOAPIC and MSI Sheng Yang
2009-01-07 10:42 ` [PATCH 08/10] KVM: Change API of kvm_ioapic_get_delivery_bitmask Sheng Yang
2009-01-07 10:42 ` [PATCH 09/10] KVM: Update intr delivery func to accept unsigned long* bitmap Sheng Yang
2009-01-08  0:38   ` Marcelo Tosatti
2009-01-07 10:42 ` [PATCH 10/10] KVM: bit ops for deliver_bitmap Sheng Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).