All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kvm tools: Add MSI-X support to virtio-net
@ 2011-08-11  9:47 Sasha Levin
  2011-08-11  9:47 ` [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting Sasha Levin
  2011-08-11  9:56 ` [PATCH] kvm tools: Add MSI-X support to virtio-net Pekka Enberg
  0 siblings, 2 replies; 5+ messages in thread
From: Sasha Levin @ 2011-08-11  9:47 UTC (permalink / raw)
  To: penberg; +Cc: kvm, mingo, asias.hejun, gorcunov, Sasha Levin

The device uses the virtio preferred method of working with MSI-X by
creating one vector for configuration and one vector for each vq in the
device.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/virtio/net.c |   56 ++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 52 insertions(+), 4 deletions(-)

diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c
index e865b7f..e9fe45d 100644
--- a/tools/kvm/virtio/net.c
+++ b/tools/kvm/virtio/net.c
@@ -60,6 +60,9 @@ struct net_dev {
 	u8				isr;
 	u16				queue_selector;
 	u16				base_addr;
+	u32				vq_vector[VIRTIO_NET_NUM_QUEUES];
+	u32				gsis[VIRTIO_NET_NUM_QUEUES];
+	u32				msix_io_block;
 
 	pthread_t			io_rx_thread;
 	pthread_mutex_t			io_rx_lock;
@@ -125,7 +128,8 @@ static void *virtio_net_rx_thread(void *p)
 			virt_queue__set_used_elem(vq, head, len);
 
 			/* We should interrupt guest right now, otherwise latency is huge. */
-			virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
+			kvm__irq_line(kvm, ndev.gsis[VIRTIO_NET_RX_QUEUE], VIRTIO_IRQ_HIGH);
+			kvm__irq_line(kvm, ndev.gsis[VIRTIO_NET_RX_QUEUE], VIRTIO_IRQ_LOW);
 		}
 
 	}
@@ -162,7 +166,9 @@ static void *virtio_net_tx_thread(void *p)
 			virt_queue__set_used_elem(vq, head, len);
 		}
 
-		virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
+		kvm__irq_line(kvm, ndev.gsis[VIRTIO_NET_TX_QUEUE], VIRTIO_IRQ_HIGH);
+		kvm__irq_line(kvm, ndev.gsis[VIRTIO_NET_TX_QUEUE], VIRTIO_IRQ_LOW);
 
 	}
 
@@ -219,6 +225,12 @@ static bool virtio_net_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 por
 		kvm__irq_line(kvm, pci_header.irq_line, VIRTIO_IRQ_LOW);
 		ndev.isr = VIRTIO_IRQ_LOW;
 		break;
+	case VIRTIO_MSI_CONFIG_VECTOR:
+		ioport__write16(data, ndev.config_vector);
+		break;
+	case VIRTIO_MSI_QUEUE_VECTOR:
+		ioport__write16(data, ndev.vq_vector[ndev.queue_selector]);
+		break;
 	default:
 		ret = virtio_net_pci_io_device_specific_in(data, offset, size, count);
 	};
@@ -285,10 +297,22 @@ static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 po
 		ndev.status		= ioport__read8(data);
 		break;
 	case VIRTIO_MSI_CONFIG_VECTOR:
-		ndev.config_vector	= VIRTIO_MSI_NO_VECTOR;
+		ndev.config_vector	= ioport__read16(data);
 		break;
-	case VIRTIO_MSI_QUEUE_VECTOR:
+	case VIRTIO_MSI_QUEUE_VECTOR: {
+		u32 gsi;
+		u32 vec;
+
+		vec = ndev.vq_vector[ndev.queue_selector] = ioport__read16(data);
+
+		gsi = irq__add_msix_route(kvm,
+					  pci_header.msix.table[vec].low,
+					  pci_header.msix.table[vec].high,
+					  pci_header.msix.table[vec].data);
+
+		ndev.gsis[ndev.queue_selector] = gsi;
 		break;
+	}
 	default:
 		ret			= false;
 	};
@@ -308,6 +332,15 @@ static struct ioport_operations virtio_net_io_ops = {
 	.io_out	= virtio_net_pci_io_out,
 };
 
+static void callback_mmio(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr)
+{
+	void *table = pci_header.msix.table;
+	if (is_write)
+		memcpy(table + addr - ndev.msix_io_block, data, len);
+	else
+		memcpy(data, table + addr - ndev.msix_io_block, len);
+}
+
 static bool virtio_net__tap_init(const struct virtio_net_parameters *params)
 {
 	int sock = socket(AF_INET, SOCK_STREAM, 0);
@@ -467,6 +500,21 @@ void virtio_net__init(const struct virtio_net_parameters *params)
 		ndev.ops = &uip_ops;
 	}
 
+	ndev.msix_io_block = pci_get_io_space_block();
+	kvm__register_mmio(params->kvm, ndev.msix_io_block, 0x100, callback_mmio, NULL);
+	pci_header.bar[1]	= ndev.msix_io_block |
+				PCI_BASE_ADDRESS_SPACE_MEMORY |
+				PCI_BASE_ADDRESS_MEM_TYPE_64;
+	/* bar[2] is the continuation of bar[1] for 64bit addressing */
+	pci_header.bar[2]	= 0;
+	pci_header.status	= PCI_STATUS_CAP_LIST;
+	pci_header.capabilities	= (void *)&pci_header.msix - (void *)&pci_header;
+
+	pci_header.msix.cap = PCI_CAP_ID_MSIX;
+	pci_header.msix.next = 0;
+	pci_header.msix.table_size = (VIRTIO_NET_NUM_QUEUES + 1) | PCI_MSIX_FLAGS_ENABLE;
+	pci_header.msix.table_offset = 1; /* Use BAR 1 */
+
 	virtio_net__io_thread_init(params->kvm);
 
 	for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) {
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting
  2011-08-11  9:47 [PATCH] kvm tools: Add MSI-X support to virtio-net Sasha Levin
@ 2011-08-11  9:47 ` Sasha Levin
  2011-08-11 10:02   ` Pekka Enberg
  2011-08-11  9:56 ` [PATCH] kvm tools: Add MSI-X support to virtio-net Pekka Enberg
  1 sibling, 1 reply; 5+ messages in thread
From: Sasha Levin @ 2011-08-11  9:47 UTC (permalink / raw)
  To: penberg; +Cc: kvm, mingo, asias.hejun, gorcunov, Sasha Levin

This patch changes kvm_cpu__reboot() behaviour to block until all VCPU
threads have ended, this allows us to assume that the guest is stopped
when the function has returned.

This fixes errors on close caused by releasing KVM_RUN structure while
VCPUs were still running.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/kvm-cpu.c |   10 ++++++++--
 1 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c
index 2f5d23c..64c0997 100644
--- a/tools/kvm/kvm-cpu.c
+++ b/tools/kvm/kvm-cpu.c
@@ -421,7 +421,13 @@ static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
 
 void kvm_cpu__reboot(void)
 {
-	pthread_kill(kvm_cpus[0]->thread, SIGKVMEXIT);
+	int i;
+
+	for (i = 0; i < KVM_NR_CPUS; i++)
+		if (kvm_cpus[i]) {
+			pthread_kill(kvm_cpus[i]->thread, SIGKVMEXIT);
+			pthread_join(kvm_cpus[i]->thread, NULL);
+		}
 }
 
 int kvm_cpu__start(struct kvm_cpu *cpu)
@@ -442,7 +448,7 @@ int kvm_cpu__start(struct kvm_cpu *cpu)
 	if (cpu->kvm->single_step)
 		kvm_cpu__enable_singlestep(cpu);
 
-	for (;;) {
+	while (cpu->is_running) {
 		if (cpu->paused) {
 			kvm__notify_paused();
 			cpu->paused = 0;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] kvm tools: Add MSI-X support to virtio-net
  2011-08-11  9:47 [PATCH] kvm tools: Add MSI-X support to virtio-net Sasha Levin
  2011-08-11  9:47 ` [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting Sasha Levin
@ 2011-08-11  9:56 ` Pekka Enberg
  1 sibling, 0 replies; 5+ messages in thread
From: Pekka Enberg @ 2011-08-11  9:56 UTC (permalink / raw)
  To: Sasha Levin; +Cc: kvm, mingo, asias.hejun, gorcunov

On Thu, Aug 11, 2011 at 12:47 PM, Sasha Levin <levinsasha928@gmail.com> wrote:
> The device uses the virtio preferred method of working with MSI-X by
> creating one vector for configuration and one vector for each vq in the
> device.
>
> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> ---
>  tools/kvm/virtio/net.c |   56 ++++++++++++++++++++++++++++++++++++++++++++---
>  1 files changed, 52 insertions(+), 4 deletions(-)
>
> diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c
> index e865b7f..e9fe45d 100644
> --- a/tools/kvm/virtio/net.c
> +++ b/tools/kvm/virtio/net.c
> @@ -60,6 +60,9 @@ struct net_dev {
>        u8                              isr;
>        u16                             queue_selector;
>        u16                             base_addr;
> +       u32                             vq_vector[VIRTIO_NET_NUM_QUEUES];
> +       u32                             gsis[VIRTIO_NET_NUM_QUEUES];
> +       u32                             msix_io_block;
>
>        pthread_t                       io_rx_thread;
>        pthread_mutex_t                 io_rx_lock;
> @@ -125,7 +128,8 @@ static void *virtio_net_rx_thread(void *p)
>                        virt_queue__set_used_elem(vq, head, len);
>
>                        /* We should interrupt guest right now, otherwise latency is huge. */
> -                       virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
> +                       kvm__irq_line(kvm, ndev.gsis[VIRTIO_NET_RX_QUEUE], VIRTIO_IRQ_HIGH);
> +                       kvm__irq_line(kvm, ndev.gsis[VIRTIO_NET_RX_QUEUE], VIRTIO_IRQ_LOW);

We should probably make a helper function for this HIGH/LOW pair.
kvm__irq_trigger or something?

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting
  2011-08-11  9:47 ` [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting Sasha Levin
@ 2011-08-11 10:02   ` Pekka Enberg
  2011-08-11 10:14     ` Sasha Levin
  0 siblings, 1 reply; 5+ messages in thread
From: Pekka Enberg @ 2011-08-11 10:02 UTC (permalink / raw)
  To: Sasha Levin; +Cc: kvm, mingo, asias.hejun, gorcunov

On Thu, Aug 11, 2011 at 12:47 PM, Sasha Levin <levinsasha928@gmail.com> wrote:
> This patch changes kvm_cpu__reboot() behaviour to block until all VCPU
> threads have ended, this allows us to assume that the guest is stopped
> when the function has returned.
>
> This fixes errors on close caused by releasing KVM_RUN structure while
> VCPUs were still running.
>
> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> ---
>  tools/kvm/kvm-cpu.c |   10 ++++++++--
>  1 files changed, 8 insertions(+), 2 deletions(-)
>
> diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c
> index 2f5d23c..64c0997 100644
> --- a/tools/kvm/kvm-cpu.c
> +++ b/tools/kvm/kvm-cpu.c
> @@ -421,7 +421,13 @@ static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
>
>  void kvm_cpu__reboot(void)
>  {
> -       pthread_kill(kvm_cpus[0]->thread, SIGKVMEXIT);
> +       int i;
> +
> +       for (i = 0; i < KVM_NR_CPUS; i++)
> +               if (kvm_cpus[i]) {
> +                       pthread_kill(kvm_cpus[i]->thread, SIGKVMEXIT);
> +                       pthread_join(kvm_cpus[i]->thread, NULL);
> +               }

Wouldn't it be better to first pthread_kill() all CPUs first and do
pthread_join() in separate loop? That should be faster especially if
there's lots of CPUs.

>  }
>
>  int kvm_cpu__start(struct kvm_cpu *cpu)
> @@ -442,7 +448,7 @@ int kvm_cpu__start(struct kvm_cpu *cpu)
>        if (cpu->kvm->single_step)
>                kvm_cpu__enable_singlestep(cpu);
>
> -       for (;;) {
> +       while (cpu->is_running) {
>                if (cpu->paused) {
>                        kvm__notify_paused();
>                        cpu->paused = 0;
> --
> 1.7.6
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting
  2011-08-11 10:02   ` Pekka Enberg
@ 2011-08-11 10:14     ` Sasha Levin
  0 siblings, 0 replies; 5+ messages in thread
From: Sasha Levin @ 2011-08-11 10:14 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: kvm, mingo, asias.hejun, gorcunov

On Thu, 2011-08-11 at 13:02 +0300, Pekka Enberg wrote:
> On Thu, Aug 11, 2011 at 12:47 PM, Sasha Levin <levinsasha928@gmail.com> wrote:
> > This patch changes kvm_cpu__reboot() behaviour to block until all VCPU
> > threads have ended, this allows us to assume that the guest is stopped
> > when the function has returned.
> >
> > This fixes errors on close caused by releasing KVM_RUN structure while
> > VCPUs were still running.
> >
> > Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> > ---
> >  tools/kvm/kvm-cpu.c |   10 ++++++++--
> >  1 files changed, 8 insertions(+), 2 deletions(-)
> >
> > diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c
> > index 2f5d23c..64c0997 100644
> > --- a/tools/kvm/kvm-cpu.c
> > +++ b/tools/kvm/kvm-cpu.c
> > @@ -421,7 +421,13 @@ static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
> >
> >  void kvm_cpu__reboot(void)
> >  {
> > -       pthread_kill(kvm_cpus[0]->thread, SIGKVMEXIT);
> > +       int i;
> > +
> > +       for (i = 0; i < KVM_NR_CPUS; i++)
> > +               if (kvm_cpus[i]) {
> > +                       pthread_kill(kvm_cpus[i]->thread, SIGKVMEXIT);
> > +                       pthread_join(kvm_cpus[i]->thread, NULL);
> > +               }
> 
> Wouldn't it be better to first pthread_kill() all CPUs first and do
> pthread_join() in separate loop? That should be faster especially if
> there's lots of CPUs.

Actually, we don't even need a pthread_kill() there. I'll send a new
version :)

-- 

Sasha.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2011-08-11 10:15 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-11  9:47 [PATCH] kvm tools: Add MSI-X support to virtio-net Sasha Levin
2011-08-11  9:47 ` [PATCH] kvm tools: Wait for all VCPU threads to exit while rebooting Sasha Levin
2011-08-11 10:02   ` Pekka Enberg
2011-08-11 10:14     ` Sasha Levin
2011-08-11  9:56 ` [PATCH] kvm tools: Add MSI-X support to virtio-net Pekka Enberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.