All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2 1/2] ivshmem: remove  msix_write_config
@ 2012-11-25  3:51 Liu Ping Fan
  2012-11-25  3:51 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
                   ` (2 more replies)
  0 siblings, 3 replies; 18+ messages in thread
From: Liu Ping Fan @ 2012-11-25  3:51 UTC (permalink / raw)
  To: qemu-devel; +Cc: Jan Kiszka, Cam Macdonell

From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>

This logic has been integrated into pci core, so remove it.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 hw/ivshmem.c |    1 -
 1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index f6dbb21..7c8630c 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -629,7 +629,6 @@ static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
 				 uint32_t val, int len)
 {
     pci_default_write_config(pci_dev, address, val, len);
-    msix_write_config(pci_dev, address, val, len);
 }
 
 static int pci_ivshmem_init(PCIDevice *dev)
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-25  3:51 [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config Liu Ping Fan
@ 2012-11-25  3:51 ` Liu Ping Fan
  2012-11-27 21:48   ` Cam Macdonell
  2012-12-05  5:34   ` Cam Macdonell
  2012-11-28  8:05 ` [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config liu ping fan
  2012-12-05  5:38 ` Cam Macdonell
  2 siblings, 2 replies; 18+ messages in thread
From: Liu Ping Fan @ 2012-11-25  3:51 UTC (permalink / raw)
  To: qemu-devel; +Cc: Jan Kiszka, Cam Macdonell

From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>

Using irqfd, so we can avoid switch between kernel and user when
VMs interrupts each other.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 53 insertions(+), 1 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 7c8630c..5709e89 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -19,6 +19,7 @@
 #include "hw.h"
 #include "pc.h"
 #include "pci.h"
+#include "msi.h"
 #include "msix.h"
 #include "kvm.h"
 #include "migration.h"
@@ -83,6 +84,7 @@ typedef struct IVShmemState {
     uint32_t vectors;
     uint32_t features;
     EventfdEntry *eventfd_table;
+    int *vector_virqs;
 
     Error *migration_blocker;
 
@@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
     return 0;
 }
 
+static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
+                                     MSIMessage msg)
+{
+    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+    int virq;
+    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+
+    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
+    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
+        s->vector_virqs[vector] = virq;
+        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
+    } else if (virq >= 0) {
+        kvm_irqchip_release_virq(kvm_state, virq);
+        error_report("ivshmem, can not setup irqfd\n");
+        return -1;
+    } else {
+        error_report("ivshmem, no enough msi route to setup irqfd\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
+{
+    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+    int virq = s->vector_virqs[vector];
+
+    if (s->vector_virqs[vector] >= 0) {
+        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
+        kvm_irqchip_release_virq(kvm_state, virq);
+        s->vector_virqs[vector] = -1;
+    }
+}
+
 static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
 				 uint32_t val, int len)
 {
+    bool is_enabled, was_enabled = msi_enabled(pci_dev);
+
     pci_default_write_config(pci_dev, address, val, len);
+    is_enabled = msi_enabled(pci_dev);
+    if (!was_enabled && is_enabled) {
+        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
+            ivshmem_vector_release);
+    } else if (was_enabled && !is_enabled) {
+        msix_unset_vector_notifiers(pci_dev);
+    }
 }
 
 static int pci_ivshmem_init(PCIDevice *dev)
 {
     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
     uint8_t *pci_conf;
+    int i;
 
     if (s->sizearg == NULL)
         s->ivshmem_size = 4 << 20; /* 4 MB default */
@@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
     }
 
     s->dev.config_write = ivshmem_write_config;
-
+    s->vector_virqs = g_new0(int, s->vectors);
+    for (i = 0; i < s->vectors; i++) {
+        s->vector_virqs[i] = -1;
+    }
     return 0;
 }
 
@@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
         migrate_del_blocker(s->migration_blocker);
         error_free(s->migration_blocker);
     }
+    g_free(s->vector_virqs);
 
     memory_region_destroy(&s->ivshmem_mmio);
     memory_region_del_subregion(&s->bar, &s->ivshmem);
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-25  3:51 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
@ 2012-11-27 21:48   ` Cam Macdonell
  2012-11-28  2:53     ` liu ping fan
  2012-12-05  5:34   ` Cam Macdonell
  1 sibling, 1 reply; 18+ messages in thread
From: Cam Macdonell @ 2012-11-27 21:48 UTC (permalink / raw)
  To: Liu Ping Fan; +Cc: Jan Kiszka, qemu-devel

On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> Using irqfd, so we can avoid switch between kernel and user when
> VMs interrupts each other.

Nice work.  Due to a hardware failure, there will be a small delay in
me being able to test this.  I'll follow up as soon as I can.

>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 53 insertions(+), 1 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index 7c8630c..5709e89 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -19,6 +19,7 @@
>  #include "hw.h"
>  #include "pc.h"
>  #include "pci.h"
> +#include "msi.h"
>  #include "msix.h"
>  #include "kvm.h"
>  #include "migration.h"
> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>      uint32_t vectors;
>      uint32_t features;
>      EventfdEntry *eventfd_table;
> +    int *vector_virqs;
>
>      Error *migration_blocker;
>
> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>      return 0;
>  }
>
> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
> +                                     MSIMessage msg)
> +{
> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> +    int virq;
> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> +
> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
> +        s->vector_virqs[vector] = virq;
> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
> +    } else if (virq >= 0) {
> +        kvm_irqchip_release_virq(kvm_state, virq);
> +        error_report("ivshmem, can not setup irqfd\n");
> +        return -1;
> +    } else {
> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
> +{
> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> +    int virq = s->vector_virqs[vector];
> +
> +    if (s->vector_virqs[vector] >= 0) {
> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
> +        kvm_irqchip_release_virq(kvm_state, virq);
> +        s->vector_virqs[vector] = -1;
> +    }
> +}
> +
>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>                                  uint32_t val, int len)
>  {
> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
> +
>      pci_default_write_config(pci_dev, address, val, len);
> +    is_enabled = msi_enabled(pci_dev);
> +    if (!was_enabled && is_enabled) {
> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
> +            ivshmem_vector_release);
> +    } else if (was_enabled && !is_enabled) {
> +        msix_unset_vector_notifiers(pci_dev);
> +    }
>  }
>
>  static int pci_ivshmem_init(PCIDevice *dev)
>  {
>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>      uint8_t *pci_conf;
> +    int i;
>
>      if (s->sizearg == NULL)
>          s->ivshmem_size = 4 << 20; /* 4 MB default */
> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>      }
>
>      s->dev.config_write = ivshmem_write_config;
> -
> +    s->vector_virqs = g_new0(int, s->vectors);
> +    for (i = 0; i < s->vectors; i++) {
> +        s->vector_virqs[i] = -1;
> +    }
>      return 0;
>  }
>
> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>          migrate_del_blocker(s->migration_blocker);
>          error_free(s->migration_blocker);
>      }
> +    g_free(s->vector_virqs);
>
>      memory_region_destroy(&s->ivshmem_mmio);
>      memory_region_del_subregion(&s->bar, &s->ivshmem);
> --
> 1.7.4.4
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-27 21:48   ` Cam Macdonell
@ 2012-11-28  2:53     ` liu ping fan
  2012-11-29  4:42       ` Cam Macdonell
  0 siblings, 1 reply; 18+ messages in thread
From: liu ping fan @ 2012-11-28  2:53 UTC (permalink / raw)
  To: Cam Macdonell; +Cc: Jan Kiszka, qemu-devel

On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>
>> Using irqfd, so we can avoid switch between kernel and user when
>> VMs interrupts each other.
>
> Nice work.  Due to a hardware failure, there will be a small delay in
> me being able to test this.  I'll follow up as soon as I can.
>
BTW where can I find the latest guest code for test?
I got the guest code from git://gitorious.org/nahanni/guest-code.git.
But it seems outdated, after fixing the unlocked_ioctl, and vm-id bits
position (the codes conflict with ivshmem spec), it works (I have
tested for V1).

Regards,
Pingfan

>>
>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> ---
>>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>>  1 files changed, 53 insertions(+), 1 deletions(-)
>>
>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>> index 7c8630c..5709e89 100644
>> --- a/hw/ivshmem.c
>> +++ b/hw/ivshmem.c
>> @@ -19,6 +19,7 @@
>>  #include "hw.h"
>>  #include "pc.h"
>>  #include "pci.h"
>> +#include "msi.h"
>>  #include "msix.h"
>>  #include "kvm.h"
>>  #include "migration.h"
>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>>      uint32_t vectors;
>>      uint32_t features;
>>      EventfdEntry *eventfd_table;
>> +    int *vector_virqs;
>>
>>      Error *migration_blocker;
>>
>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>>      return 0;
>>  }
>>
>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
>> +                                     MSIMessage msg)
>> +{
>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>> +    int virq;
>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>> +
>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
>> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
>> +        s->vector_virqs[vector] = virq;
>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
>> +    } else if (virq >= 0) {
>> +        kvm_irqchip_release_virq(kvm_state, virq);
>> +        error_report("ivshmem, can not setup irqfd\n");
>> +        return -1;
>> +    } else {
>> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
>> +        return -1;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
>> +{
>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>> +    int virq = s->vector_virqs[vector];
>> +
>> +    if (s->vector_virqs[vector] >= 0) {
>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
>> +        kvm_irqchip_release_virq(kvm_state, virq);
>> +        s->vector_virqs[vector] = -1;
>> +    }
>> +}
>> +
>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>                                  uint32_t val, int len)
>>  {
>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>> +
>>      pci_default_write_config(pci_dev, address, val, len);
>> +    is_enabled = msi_enabled(pci_dev);
>> +    if (!was_enabled && is_enabled) {
>> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
>> +            ivshmem_vector_release);
>> +    } else if (was_enabled && !is_enabled) {
>> +        msix_unset_vector_notifiers(pci_dev);
>> +    }
>>  }
>>
>>  static int pci_ivshmem_init(PCIDevice *dev)
>>  {
>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>      uint8_t *pci_conf;
>> +    int i;
>>
>>      if (s->sizearg == NULL)
>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>      }
>>
>>      s->dev.config_write = ivshmem_write_config;
>> -
>> +    s->vector_virqs = g_new0(int, s->vectors);
>> +    for (i = 0; i < s->vectors; i++) {
>> +        s->vector_virqs[i] = -1;
>> +    }
>>      return 0;
>>  }
>>
>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>>          migrate_del_blocker(s->migration_blocker);
>>          error_free(s->migration_blocker);
>>      }
>> +    g_free(s->vector_virqs);
>>
>>      memory_region_destroy(&s->ivshmem_mmio);
>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
>> --
>> 1.7.4.4
>>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config
  2012-11-25  3:51 [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config Liu Ping Fan
  2012-11-25  3:51 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
@ 2012-11-28  8:05 ` liu ping fan
  2012-12-05  5:38 ` Cam Macdonell
  2 siblings, 0 replies; 18+ messages in thread
From: liu ping fan @ 2012-11-28  8:05 UTC (permalink / raw)
  To: qemu-devel; +Cc: Jan Kiszka, Cam Macdonell

ping?

On Sun, Nov 25, 2012 at 11:51 AM, Liu Ping Fan <qemulist@gmail.com> wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> This logic has been integrated into pci core, so remove it.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  hw/ivshmem.c |    1 -
>  1 files changed, 0 insertions(+), 1 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index f6dbb21..7c8630c 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -629,7 +629,6 @@ static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>                                  uint32_t val, int len)
>  {
>      pci_default_write_config(pci_dev, address, val, len);
> -    msix_write_config(pci_dev, address, val, len);
>  }
>
>  static int pci_ivshmem_init(PCIDevice *dev)
> --
> 1.7.4.4
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-28  2:53     ` liu ping fan
@ 2012-11-29  4:42       ` Cam Macdonell
  2012-11-29  8:34         ` liu ping fan
  0 siblings, 1 reply; 18+ messages in thread
From: Cam Macdonell @ 2012-11-29  4:42 UTC (permalink / raw)
  To: liu ping fan; +Cc: Jan Kiszka, qemu-devel

On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan <qemulist@gmail.com> wrote:
> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>
>>> Using irqfd, so we can avoid switch between kernel and user when
>>> VMs interrupts each other.
>>
>> Nice work.  Due to a hardware failure, there will be a small delay in
>> me being able to test this.  I'll follow up as soon as I can.
>>
> BTW where can I find the latest guest code for test?
> I got the guest code from git://gitorious.org/nahanni/guest-code.git.
> But it seems outdated, after fixing the unlocked_ioctl, and vm-id bits
> position (the codes conflict with ivshmem spec), it works (I have
> tested for V1).

Hello,

Which device driver are you using?

Cam

>
> Regards,
> Pingfan
>
>>>
>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>> ---
>>>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>>>  1 files changed, 53 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>>> index 7c8630c..5709e89 100644
>>> --- a/hw/ivshmem.c
>>> +++ b/hw/ivshmem.c
>>> @@ -19,6 +19,7 @@
>>>  #include "hw.h"
>>>  #include "pc.h"
>>>  #include "pci.h"
>>> +#include "msi.h"
>>>  #include "msix.h"
>>>  #include "kvm.h"
>>>  #include "migration.h"
>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>>>      uint32_t vectors;
>>>      uint32_t features;
>>>      EventfdEntry *eventfd_table;
>>> +    int *vector_virqs;
>>>
>>>      Error *migration_blocker;
>>>
>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>>>      return 0;
>>>  }
>>>
>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
>>> +                                     MSIMessage msg)
>>> +{
>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>> +    int virq;
>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>>> +
>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
>>> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
>>> +        s->vector_virqs[vector] = virq;
>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
>>> +    } else if (virq >= 0) {
>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>>> +        error_report("ivshmem, can not setup irqfd\n");
>>> +        return -1;
>>> +    } else {
>>> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
>>> +        return -1;
>>> +    }
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
>>> +{
>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>>> +    int virq = s->vector_virqs[vector];
>>> +
>>> +    if (s->vector_virqs[vector] >= 0) {
>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>>> +        s->vector_virqs[vector] = -1;
>>> +    }
>>> +}
>>> +
>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>>                                  uint32_t val, int len)
>>>  {
>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>>> +
>>>      pci_default_write_config(pci_dev, address, val, len);
>>> +    is_enabled = msi_enabled(pci_dev);
>>> +    if (!was_enabled && is_enabled) {
>>> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
>>> +            ivshmem_vector_release);
>>> +    } else if (was_enabled && !is_enabled) {
>>> +        msix_unset_vector_notifiers(pci_dev);
>>> +    }
>>>  }
>>>
>>>  static int pci_ivshmem_init(PCIDevice *dev)
>>>  {
>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>      uint8_t *pci_conf;
>>> +    int i;
>>>
>>>      if (s->sizearg == NULL)
>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>>      }
>>>
>>>      s->dev.config_write = ivshmem_write_config;
>>> -
>>> +    s->vector_virqs = g_new0(int, s->vectors);
>>> +    for (i = 0; i < s->vectors; i++) {
>>> +        s->vector_virqs[i] = -1;
>>> +    }
>>>      return 0;
>>>  }
>>>
>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>>>          migrate_del_blocker(s->migration_blocker);
>>>          error_free(s->migration_blocker);
>>>      }
>>> +    g_free(s->vector_virqs);
>>>
>>>      memory_region_destroy(&s->ivshmem_mmio);
>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
>>> --
>>> 1.7.4.4
>>>
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-29  4:42       ` Cam Macdonell
@ 2012-11-29  8:34         ` liu ping fan
  2012-11-29 17:33           ` Cam Macdonell
  0 siblings, 1 reply; 18+ messages in thread
From: liu ping fan @ 2012-11-29  8:34 UTC (permalink / raw)
  To: Cam Macdonell; +Cc: Jan Kiszka, qemu-devel

On Thu, Nov 29, 2012 at 12:42 PM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
> On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan <qemulist@gmail.com> wrote:
>> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
>>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
>>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>>
>>>> Using irqfd, so we can avoid switch between kernel and user when
>>>> VMs interrupts each other.
>>>
>>> Nice work.  Due to a hardware failure, there will be a small delay in
>>> me being able to test this.  I'll follow up as soon as I can.
>>>
>> BTW where can I find the latest guest code for test?
>> I got the guest code from git://gitorious.org/nahanni/guest-code.git.
>> But it seems outdated, after fixing the unlocked_ioctl, and vm-id bits
>> position (the codes conflict with ivshmem spec), it works (I have
>> tested for V1).
>
> Hello,
>
> Which device driver are you using?
>
guest-code/kernel_module/standard/kvm_ivshmem.c

> Cam
>
>>
>> Regards,
>> Pingfan
>>
>>>>
>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>> ---
>>>>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>>>>  1 files changed, 53 insertions(+), 1 deletions(-)
>>>>
>>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>>>> index 7c8630c..5709e89 100644
>>>> --- a/hw/ivshmem.c
>>>> +++ b/hw/ivshmem.c
>>>> @@ -19,6 +19,7 @@
>>>>  #include "hw.h"
>>>>  #include "pc.h"
>>>>  #include "pci.h"
>>>> +#include "msi.h"
>>>>  #include "msix.h"
>>>>  #include "kvm.h"
>>>>  #include "migration.h"
>>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>>>>      uint32_t vectors;
>>>>      uint32_t features;
>>>>      EventfdEntry *eventfd_table;
>>>> +    int *vector_virqs;
>>>>
>>>>      Error *migration_blocker;
>>>>
>>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>>>>      return 0;
>>>>  }
>>>>
>>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
>>>> +                                     MSIMessage msg)
>>>> +{
>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>> +    int virq;
>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>>>> +
>>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
>>>> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
>>>> +        s->vector_virqs[vector] = virq;
>>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
>>>> +    } else if (virq >= 0) {
>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>>>> +        error_report("ivshmem, can not setup irqfd\n");
>>>> +        return -1;
>>>> +    } else {
>>>> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
>>>> +        return -1;
>>>> +    }
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
>>>> +{
>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>>>> +    int virq = s->vector_virqs[vector];
>>>> +
>>>> +    if (s->vector_virqs[vector] >= 0) {
>>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>>>> +        s->vector_virqs[vector] = -1;
>>>> +    }
>>>> +}
>>>> +
>>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>>>                                  uint32_t val, int len)
>>>>  {
>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>>>> +
>>>>      pci_default_write_config(pci_dev, address, val, len);
>>>> +    is_enabled = msi_enabled(pci_dev);
>>>> +    if (!was_enabled && is_enabled) {
>>>> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
>>>> +            ivshmem_vector_release);
>>>> +    } else if (was_enabled && !is_enabled) {
>>>> +        msix_unset_vector_notifiers(pci_dev);
>>>> +    }
>>>>  }
>>>>
>>>>  static int pci_ivshmem_init(PCIDevice *dev)
>>>>  {
>>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>>      uint8_t *pci_conf;
>>>> +    int i;
>>>>
>>>>      if (s->sizearg == NULL)
>>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
>>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>>>      }
>>>>
>>>>      s->dev.config_write = ivshmem_write_config;
>>>> -
>>>> +    s->vector_virqs = g_new0(int, s->vectors);
>>>> +    for (i = 0; i < s->vectors; i++) {
>>>> +        s->vector_virqs[i] = -1;
>>>> +    }
>>>>      return 0;
>>>>  }
>>>>
>>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>>>>          migrate_del_blocker(s->migration_blocker);
>>>>          error_free(s->migration_blocker);
>>>>      }
>>>> +    g_free(s->vector_virqs);
>>>>
>>>>      memory_region_destroy(&s->ivshmem_mmio);
>>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
>>>> --
>>>> 1.7.4.4
>>>>
>>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-29  8:34         ` liu ping fan
@ 2012-11-29 17:33           ` Cam Macdonell
  2012-12-04 11:10             ` Andrew Jones
  0 siblings, 1 reply; 18+ messages in thread
From: Cam Macdonell @ 2012-11-29 17:33 UTC (permalink / raw)
  To: liu ping fan; +Cc: Jan Kiszka, qemu-devel

On Thu, Nov 29, 2012 at 1:34 AM, liu ping fan <qemulist@gmail.com> wrote:
> On Thu, Nov 29, 2012 at 12:42 PM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
>> On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan <qemulist@gmail.com> wrote:
>>> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
>>>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
>>>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>>>
>>>>> Using irqfd, so we can avoid switch between kernel and user when
>>>>> VMs interrupts each other.
>>>>
>>>> Nice work.  Due to a hardware failure, there will be a small delay in
>>>> me being able to test this.  I'll follow up as soon as I can.
>>>>
>>> BTW where can I find the latest guest code for test?
>>> I got the guest code from git://gitorious.org/nahanni/guest-code.git.
>>> But it seems outdated, after fixing the unlocked_ioctl, and vm-id bits
>>> position (the codes conflict with ivshmem spec), it works (I have
>>> tested for V1).
>>
>> Hello,
>>
>> Which device driver are you using?
>>
> guest-code/kernel_module/standard/kvm_ivshmem.c

The uio driver is the recommended one, however if you want to use the
kvm_ivshmem one and have it working, then feel free to continue.

I had deleted it form the repo, but some users had based solutions off
it, so I added it back.

btw, my hardware issue has been resolved, so I'll get to testing your
patch soon.

Sincerely,
Cam

>
>> Cam
>>
>>>
>>> Regards,
>>> Pingfan
>>>
>>>>>
>>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>>> ---
>>>>>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>>>>>  1 files changed, 53 insertions(+), 1 deletions(-)
>>>>>
>>>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>>>>> index 7c8630c..5709e89 100644
>>>>> --- a/hw/ivshmem.c
>>>>> +++ b/hw/ivshmem.c
>>>>> @@ -19,6 +19,7 @@
>>>>>  #include "hw.h"
>>>>>  #include "pc.h"
>>>>>  #include "pci.h"
>>>>> +#include "msi.h"
>>>>>  #include "msix.h"
>>>>>  #include "kvm.h"
>>>>>  #include "migration.h"
>>>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>>>>>      uint32_t vectors;
>>>>>      uint32_t features;
>>>>>      EventfdEntry *eventfd_table;
>>>>> +    int *vector_virqs;
>>>>>
>>>>>      Error *migration_blocker;
>>>>>
>>>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>>>>>      return 0;
>>>>>  }
>>>>>
>>>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
>>>>> +                                     MSIMessage msg)
>>>>> +{
>>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>>> +    int virq;
>>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>>>>> +
>>>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
>>>>> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
>>>>> +        s->vector_virqs[vector] = virq;
>>>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
>>>>> +    } else if (virq >= 0) {
>>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>>>>> +        error_report("ivshmem, can not setup irqfd\n");
>>>>> +        return -1;
>>>>> +    } else {
>>>>> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
>>>>> +        return -1;
>>>>> +    }
>>>>> +
>>>>> +    return 0;
>>>>> +}
>>>>> +
>>>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
>>>>> +{
>>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>>>>> +    int virq = s->vector_virqs[vector];
>>>>> +
>>>>> +    if (s->vector_virqs[vector] >= 0) {
>>>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
>>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>>>>> +        s->vector_virqs[vector] = -1;
>>>>> +    }
>>>>> +}
>>>>> +
>>>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>>>>                                  uint32_t val, int len)
>>>>>  {
>>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>>>>> +
>>>>>      pci_default_write_config(pci_dev, address, val, len);
>>>>> +    is_enabled = msi_enabled(pci_dev);
>>>>> +    if (!was_enabled && is_enabled) {
>>>>> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
>>>>> +            ivshmem_vector_release);
>>>>> +    } else if (was_enabled && !is_enabled) {
>>>>> +        msix_unset_vector_notifiers(pci_dev);
>>>>> +    }
>>>>>  }
>>>>>
>>>>>  static int pci_ivshmem_init(PCIDevice *dev)
>>>>>  {
>>>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>>>>      uint8_t *pci_conf;
>>>>> +    int i;
>>>>>
>>>>>      if (s->sizearg == NULL)
>>>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
>>>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>>>>      }
>>>>>
>>>>>      s->dev.config_write = ivshmem_write_config;
>>>>> -
>>>>> +    s->vector_virqs = g_new0(int, s->vectors);
>>>>> +    for (i = 0; i < s->vectors; i++) {
>>>>> +        s->vector_virqs[i] = -1;
>>>>> +    }
>>>>>      return 0;
>>>>>  }
>>>>>
>>>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>>>>>          migrate_del_blocker(s->migration_blocker);
>>>>>          error_free(s->migration_blocker);
>>>>>      }
>>>>> +    g_free(s->vector_virqs);
>>>>>
>>>>>      memory_region_destroy(&s->ivshmem_mmio);
>>>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
>>>>> --
>>>>> 1.7.4.4
>>>>>
>>>
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-29 17:33           ` Cam Macdonell
@ 2012-12-04 11:10             ` Andrew Jones
  2012-12-05  3:17               ` Cam Macdonell
  0 siblings, 1 reply; 18+ messages in thread
From: Andrew Jones @ 2012-12-04 11:10 UTC (permalink / raw)
  To: Cam Macdonell; +Cc: Jan Kiszka, qemu-devel, liu ping fan



----- Original Message -----
> On Thu, Nov 29, 2012 at 1:34 AM, liu ping fan <qemulist@gmail.com>
> wrote:
> > On Thu, Nov 29, 2012 at 12:42 PM, Cam Macdonell
> > <cam@cs.ualberta.ca> wrote:
> >> On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan <qemulist@gmail.com>
> >> wrote:
> >>> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell
> >>> <cam@cs.ualberta.ca> wrote:
> >>>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan
> >>>> <qemulist@gmail.com> wrote:
> >>>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >>>>>
> >>>>> Using irqfd, so we can avoid switch between kernel and user
> >>>>> when
> >>>>> VMs interrupts each other.
> >>>>
> >>>> Nice work.  Due to a hardware failure, there will be a small
> >>>> delay in
> >>>> me being able to test this.  I'll follow up as soon as I can.
> >>>>
> >>> BTW where can I find the latest guest code for test?
> >>> I got the guest code from
> >>> git://gitorious.org/nahanni/guest-code.git.
> >>> But it seems outdated, after fixing the unlocked_ioctl, and vm-id
> >>> bits
> >>> position (the codes conflict with ivshmem spec), it works (I have
> >>> tested for V1).
> >>
> >> Hello,
> >>
> >> Which device driver are you using?
> >>
> > guest-code/kernel_module/standard/kvm_ivshmem.c
> 
> The uio driver is the recommended one, however if you want to use the
> kvm_ivshmem one and have it working, then feel free to continue.

If the uio driver is the recommended one, then can you please post it
to lkml? It should be integrated into drivers/virt with an appropriate
Kconfig update.

Thanks,
Drew

> 
> I had deleted it form the repo, but some users had based solutions
> off
> it, so I added it back.
> 
> btw, my hardware issue has been resolved, so I'll get to testing your
> patch soon.
> 
> Sincerely,
> Cam
> 
> >
> >> Cam
> >>
> >>>
> >>> Regards,
> >>> Pingfan
> >>>
> >>>>>
> >>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >>>>> ---
> >>>>>  hw/ivshmem.c |   54
> >>>>>  +++++++++++++++++++++++++++++++++++++++++++++++++++++-
> >>>>>  1 files changed, 53 insertions(+), 1 deletions(-)
> >>>>>
> >>>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> >>>>> index 7c8630c..5709e89 100644
> >>>>> --- a/hw/ivshmem.c
> >>>>> +++ b/hw/ivshmem.c
> >>>>> @@ -19,6 +19,7 @@
> >>>>>  #include "hw.h"
> >>>>>  #include "pc.h"
> >>>>>  #include "pci.h"
> >>>>> +#include "msi.h"
> >>>>>  #include "msix.h"
> >>>>>  #include "kvm.h"
> >>>>>  #include "migration.h"
> >>>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
> >>>>>      uint32_t vectors;
> >>>>>      uint32_t features;
> >>>>>      EventfdEntry *eventfd_table;
> >>>>> +    int *vector_virqs;
> >>>>>
> >>>>>      Error *migration_blocker;
> >>>>>
> >>>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void
> >>>>> *opaque, int version_id)
> >>>>>      return 0;
> >>>>>  }
> >>>>>
> >>>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
> >>>>> +                                     MSIMessage msg)
> >>>>> +{
> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >>>>> +    int virq;
> >>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> >>>>> +
> >>>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
> >>>>> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state,
> >>>>> n, virq) >= 0) {
> >>>>> +        s->vector_virqs[vector] = virq;
> >>>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL,
> >>>>> NULL, NULL, NULL);
> >>>>> +    } else if (virq >= 0) {
> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
> >>>>> +        error_report("ivshmem, can not setup irqfd\n");
> >>>>> +        return -1;
> >>>>> +    } else {
> >>>>> +        error_report("ivshmem, no enough msi route to setup
> >>>>> irqfd\n");
> >>>>> +        return -1;
> >>>>> +    }
> >>>>> +
> >>>>> +    return 0;
> >>>>> +}
> >>>>> +
> >>>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned
> >>>>> vector)
> >>>>> +{
> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> >>>>> +    int virq = s->vector_virqs[vector];
> >>>>> +
> >>>>> +    if (s->vector_virqs[vector] >= 0) {
> >>>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
> >>>>> +        s->vector_virqs[vector] = -1;
> >>>>> +    }
> >>>>> +}
> >>>>> +
> >>>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t
> >>>>>  address,
> >>>>>                                  uint32_t val, int len)
> >>>>>  {
> >>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
> >>>>> +
> >>>>>      pci_default_write_config(pci_dev, address, val, len);
> >>>>> +    is_enabled = msi_enabled(pci_dev);
> >>>>> +    if (!was_enabled && is_enabled) {
> >>>>> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
> >>>>> +            ivshmem_vector_release);
> >>>>> +    } else if (was_enabled && !is_enabled) {
> >>>>> +        msix_unset_vector_notifiers(pci_dev);
> >>>>> +    }
> >>>>>  }
> >>>>>
> >>>>>  static int pci_ivshmem_init(PCIDevice *dev)
> >>>>>  {
> >>>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >>>>>      uint8_t *pci_conf;
> >>>>> +    int i;
> >>>>>
> >>>>>      if (s->sizearg == NULL)
> >>>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
> >>>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice
> >>>>> *dev)
> >>>>>      }
> >>>>>
> >>>>>      s->dev.config_write = ivshmem_write_config;
> >>>>> -
> >>>>> +    s->vector_virqs = g_new0(int, s->vectors);
> >>>>> +    for (i = 0; i < s->vectors; i++) {
> >>>>> +        s->vector_virqs[i] = -1;
> >>>>> +    }
> >>>>>      return 0;
> >>>>>  }
> >>>>>
> >>>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice
> >>>>> *dev)
> >>>>>          migrate_del_blocker(s->migration_blocker);
> >>>>>          error_free(s->migration_blocker);
> >>>>>      }
> >>>>> +    g_free(s->vector_virqs);
> >>>>>
> >>>>>      memory_region_destroy(&s->ivshmem_mmio);
> >>>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
> >>>>> --
> >>>>> 1.7.4.4
> >>>>>
> >>>
> >
> 
> 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-04 11:10             ` Andrew Jones
@ 2012-12-05  3:17               ` Cam Macdonell
  2012-12-05  9:25                 ` Andrew Jones
  0 siblings, 1 reply; 18+ messages in thread
From: Cam Macdonell @ 2012-12-05  3:17 UTC (permalink / raw)
  To: Andrew Jones; +Cc: Jan Kiszka, qemu-devel, liu ping fan

On Tue, Dec 4, 2012 at 4:10 AM, Andrew Jones <drjones@redhat.com> wrote:
>
>
> ----- Original Message -----
>> On Thu, Nov 29, 2012 at 1:34 AM, liu ping fan <qemulist@gmail.com>
>> wrote:
>> > On Thu, Nov 29, 2012 at 12:42 PM, Cam Macdonell
>> > <cam@cs.ualberta.ca> wrote:
>> >> On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan <qemulist@gmail.com>
>> >> wrote:
>> >>> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell
>> >>> <cam@cs.ualberta.ca> wrote:
>> >>>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan
>> >>>> <qemulist@gmail.com> wrote:
>> >>>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> >>>>>
>> >>>>> Using irqfd, so we can avoid switch between kernel and user
>> >>>>> when
>> >>>>> VMs interrupts each other.
>> >>>>
>> >>>> Nice work.  Due to a hardware failure, there will be a small
>> >>>> delay in
>> >>>> me being able to test this.  I'll follow up as soon as I can.
>> >>>>
>> >>> BTW where can I find the latest guest code for test?
>> >>> I got the guest code from
>> >>> git://gitorious.org/nahanni/guest-code.git.
>> >>> But it seems outdated, after fixing the unlocked_ioctl, and vm-id
>> >>> bits
>> >>> position (the codes conflict with ivshmem spec), it works (I have
>> >>> tested for V1).
>> >>
>> >> Hello,
>> >>
>> >> Which device driver are you using?
>> >>
>> > guest-code/kernel_module/standard/kvm_ivshmem.c
>>
>> The uio driver is the recommended one, however if you want to use the
>> kvm_ivshmem one and have it working, then feel free to continue.
>
> If the uio driver is the recommended one, then can you please post it
> to lkml? It should be integrated into drivers/virt with an appropriate
> Kconfig update.
>

Sure.  Should it go under drivers/virt or drivers/uio?  It seems the
uio drivers all get grouped together.

Thanks,
Cam

> Thanks,
> Drew
>
>>
>> I had deleted it form the repo, but some users had based solutions
>> off
>> it, so I added it back.
>>
>> btw, my hardware issue has been resolved, so I'll get to testing your
>> patch soon.
>>
>> Sincerely,
>> Cam
>>
>> >
>> >> Cam
>> >>
>> >>>
>> >>> Regards,
>> >>> Pingfan
>> >>>
>> >>>>>
>> >>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> >>>>> ---
>> >>>>>  hw/ivshmem.c |   54
>> >>>>>  +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>> >>>>>  1 files changed, 53 insertions(+), 1 deletions(-)
>> >>>>>
>> >>>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>> >>>>> index 7c8630c..5709e89 100644
>> >>>>> --- a/hw/ivshmem.c
>> >>>>> +++ b/hw/ivshmem.c
>> >>>>> @@ -19,6 +19,7 @@
>> >>>>>  #include "hw.h"
>> >>>>>  #include "pc.h"
>> >>>>>  #include "pci.h"
>> >>>>> +#include "msi.h"
>> >>>>>  #include "msix.h"
>> >>>>>  #include "kvm.h"
>> >>>>>  #include "migration.h"
>> >>>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>> >>>>>      uint32_t vectors;
>> >>>>>      uint32_t features;
>> >>>>>      EventfdEntry *eventfd_table;
>> >>>>> +    int *vector_virqs;
>> >>>>>
>> >>>>>      Error *migration_blocker;
>> >>>>>
>> >>>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void
>> >>>>> *opaque, int version_id)
>> >>>>>      return 0;
>> >>>>>  }
>> >>>>>
>> >>>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
>> >>>>> +                                     MSIMessage msg)
>> >>>>> +{
>> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>> >>>>> +    int virq;
>> >>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>> >>>>> +
>> >>>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
>> >>>>> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state,
>> >>>>> n, virq) >= 0) {
>> >>>>> +        s->vector_virqs[vector] = virq;
>> >>>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL,
>> >>>>> NULL, NULL, NULL);
>> >>>>> +    } else if (virq >= 0) {
>> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>> >>>>> +        error_report("ivshmem, can not setup irqfd\n");
>> >>>>> +        return -1;
>> >>>>> +    } else {
>> >>>>> +        error_report("ivshmem, no enough msi route to setup
>> >>>>> irqfd\n");
>> >>>>> +        return -1;
>> >>>>> +    }
>> >>>>> +
>> >>>>> +    return 0;
>> >>>>> +}
>> >>>>> +
>> >>>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned
>> >>>>> vector)
>> >>>>> +{
>> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>> >>>>> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
>> >>>>> +    int virq = s->vector_virqs[vector];
>> >>>>> +
>> >>>>> +    if (s->vector_virqs[vector] >= 0) {
>> >>>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
>> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
>> >>>>> +        s->vector_virqs[vector] = -1;
>> >>>>> +    }
>> >>>>> +}
>> >>>>> +
>> >>>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t
>> >>>>>  address,
>> >>>>>                                  uint32_t val, int len)
>> >>>>>  {
>> >>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>> >>>>> +
>> >>>>>      pci_default_write_config(pci_dev, address, val, len);
>> >>>>> +    is_enabled = msi_enabled(pci_dev);
>> >>>>> +    if (!was_enabled && is_enabled) {
>> >>>>> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
>> >>>>> +            ivshmem_vector_release);
>> >>>>> +    } else if (was_enabled && !is_enabled) {
>> >>>>> +        msix_unset_vector_notifiers(pci_dev);
>> >>>>> +    }
>> >>>>>  }
>> >>>>>
>> >>>>>  static int pci_ivshmem_init(PCIDevice *dev)
>> >>>>>  {
>> >>>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>> >>>>>      uint8_t *pci_conf;
>> >>>>> +    int i;
>> >>>>>
>> >>>>>      if (s->sizearg == NULL)
>> >>>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
>> >>>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice
>> >>>>> *dev)
>> >>>>>      }
>> >>>>>
>> >>>>>      s->dev.config_write = ivshmem_write_config;
>> >>>>> -
>> >>>>> +    s->vector_virqs = g_new0(int, s->vectors);
>> >>>>> +    for (i = 0; i < s->vectors; i++) {
>> >>>>> +        s->vector_virqs[i] = -1;
>> >>>>> +    }
>> >>>>>      return 0;
>> >>>>>  }
>> >>>>>
>> >>>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice
>> >>>>> *dev)
>> >>>>>          migrate_del_blocker(s->migration_blocker);
>> >>>>>          error_free(s->migration_blocker);
>> >>>>>      }
>> >>>>> +    g_free(s->vector_virqs);
>> >>>>>
>> >>>>>      memory_region_destroy(&s->ivshmem_mmio);
>> >>>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
>> >>>>> --
>> >>>>> 1.7.4.4
>> >>>>>
>> >>>
>> >
>>
>>
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-11-25  3:51 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
  2012-11-27 21:48   ` Cam Macdonell
@ 2012-12-05  5:34   ` Cam Macdonell
  2012-12-05  8:50     ` Jan Kiszka
  1 sibling, 1 reply; 18+ messages in thread
From: Cam Macdonell @ 2012-12-05  5:34 UTC (permalink / raw)
  To: Liu Ping Fan; +Cc: Jan Kiszka, qemu-devel

On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> Using irqfd, so we can avoid switch between kernel and user when
> VMs interrupts each other.
>

Hi Liu Ping,

With this patch applied I was still seeing transitions to user-level
on the receipt of an msi interrupt.

uncomment DEBUG_IVSHMEM in hw/ivshmem.c (and fix one compile error in
the debug statement :) )

IVSHMEM: msix initialized (1 vectors)
...
IVSHMEM: interrupt on vector 0x7f2971b1d8d0 0
IVSHMEM: interrupt on vector 0x7f2971b1d8d0 0

if we're using irqfd, this should be avoided.  Here's my command-line:

-device ivshmem,chardev=nahanni,msi=on,ioeventfd=on,size=2048,use64=1,role=peer

There are two issues as I see it:
1) irqfd is not being enabled in my tests
2) the defaults handlers are still being added

One difference is that I'm using the UIO driver, which enables PCI
using pci_enable_msix as follows

pci_enable_msix(ivs_info->dev, ivs_info->msix_entries,
                    ivs_info->nvectors);

and succeeds

[    2.651253] uio_ivshmem 0000:00:04.0: irq 43 for MSI/MSI-X
[    2.651326] MSI-X enabled

(continued below)

> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 53 insertions(+), 1 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index 7c8630c..5709e89 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -19,6 +19,7 @@
>  #include "hw.h"
>  #include "pc.h"
>  #include "pci.h"
> +#include "msi.h"
>  #include "msix.h"
>  #include "kvm.h"
>  #include "migration.h"
> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>      uint32_t vectors;
>      uint32_t features;
>      EventfdEntry *eventfd_table;
> +    int *vector_virqs;
>
>      Error *migration_blocker;
>
> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>      return 0;
>  }
>
> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
> +                                     MSIMessage msg)
> +{
> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> +    int virq;
> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> +
> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
> +        s->vector_virqs[vector] = virq;
> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
> +    } else if (virq >= 0) {
> +        kvm_irqchip_release_virq(kvm_state, virq);
> +        error_report("ivshmem, can not setup irqfd\n");
> +        return -1;
> +    } else {
> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
> +{
> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> +    int virq = s->vector_virqs[vector];
> +
> +    if (s->vector_virqs[vector] >= 0) {
> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
> +        kvm_irqchip_release_virq(kvm_state, virq);
> +        s->vector_virqs[vector] = -1;
> +    }
> +}
> +
>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>                                  uint32_t val, int len)
>  {
> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
> +
>      pci_default_write_config(pci_dev, address, val, len);
> +    is_enabled = msi_enabled(pci_dev);

Problem 1)  in my tests is_enabled is always 0, so I don't think the
irqfds are getting setup

> +    if (!was_enabled && is_enabled) {
> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
> +            ivshmem_vector_release);
> +    } else if (was_enabled && !is_enabled) {
> +        msix_unset_vector_notifiers(pci_dev);
> +    }
>  }
>
>  static int pci_ivshmem_init(PCIDevice *dev)
>  {
>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>      uint8_t *pci_conf;
> +    int i;
>
>      if (s->sizearg == NULL)
>          s->ivshmem_size = 4 << 20; /* 4 MB default */
> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>      }
>
>      s->dev.config_write = ivshmem_write_config;
> -
> +    s->vector_virqs = g_new0(int, s->vectors);
> +    for (i = 0; i < s->vectors; i++) {
> +        s->vector_virqs[i] = -1;
> +    }
>      return 0;
>  }
>
> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>          migrate_del_blocker(s->migration_blocker);
>          error_free(s->migration_blocker);
>      }
> +    g_free(s->vector_virqs);
>
>      memory_region_destroy(&s->ivshmem_mmio);
>      memory_region_del_subregion(&s->bar, &s->ivshmem);
> --
> 1.7.4.4
>

Problem 2)
We'll also have to not add the handlers as below if irqfd is present
otherwise we'll get double interrupts, so we'll have to add a check
here too.

    /* if MSI is supported we need multiple interrupts */
    if (!ivshmem_has_feature(s, IVSHMEM_MSI)) {
        s->eventfd_table[vector].pdev = &s->dev;
        s->eventfd_table[vector].vector = vector;

        qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd,
                      ivshmem_event, &s->eventfd_table[vector]);
    } else {
        qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive,
                      ivshmem_event, s);
    }

Sincerely,
Cam

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config
  2012-11-25  3:51 [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config Liu Ping Fan
  2012-11-25  3:51 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
  2012-11-28  8:05 ` [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config liu ping fan
@ 2012-12-05  5:38 ` Cam Macdonell
  2 siblings, 0 replies; 18+ messages in thread
From: Cam Macdonell @ 2012-12-05  5:38 UTC (permalink / raw)
  To: Liu Ping Fan; +Cc: Jan Kiszka, qemu-devel

On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> This logic has been integrated into pci core, so remove it.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Signed-off-by: Cam Macdonell <cam@cs.ualberta.ca>
> ---
>  hw/ivshmem.c |    1 -
>  1 files changed, 0 insertions(+), 1 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index f6dbb21..7c8630c 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -629,7 +629,6 @@ static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>                                  uint32_t val, int len)
>  {
>      pci_default_write_config(pci_dev, address, val, len);
> -    msix_write_config(pci_dev, address, val, len);
>  }
>
>  static int pci_ivshmem_init(PCIDevice *dev)
> --
> 1.7.4.4
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-05  5:34   ` Cam Macdonell
@ 2012-12-05  8:50     ` Jan Kiszka
  2012-12-06  5:10       ` Cam Macdonell
  0 siblings, 1 reply; 18+ messages in thread
From: Jan Kiszka @ 2012-12-05  8:50 UTC (permalink / raw)
  To: Cam Macdonell; +Cc: Liu Ping Fan, qemu-devel

On 2012-12-05 06:34, Cam Macdonell wrote:
>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>                                  uint32_t val, int len)
>>  {
>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>> +
>>      pci_default_write_config(pci_dev, address, val, len);
>> +    is_enabled = msi_enabled(pci_dev);
> 
> Problem 1)  in my tests is_enabled is always 0, so I don't think the
> irqfds are getting setup

You likely want to call msix_enabled here.

Jan

-- 
Siemens AG, Corporate Technology, CT RTC ITP SDP-DE
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-05  3:17               ` Cam Macdonell
@ 2012-12-05  9:25                 ` Andrew Jones
  0 siblings, 0 replies; 18+ messages in thread
From: Andrew Jones @ 2012-12-05  9:25 UTC (permalink / raw)
  To: Cam Macdonell; +Cc: Jan Kiszka, qemu-devel, liu ping fan



----- Original Message -----
> On Tue, Dec 4, 2012 at 4:10 AM, Andrew Jones <drjones@redhat.com>
> wrote:
> >
> >
> > ----- Original Message -----
> >> On Thu, Nov 29, 2012 at 1:34 AM, liu ping fan <qemulist@gmail.com>
> >> wrote:
> >> > On Thu, Nov 29, 2012 at 12:42 PM, Cam Macdonell
> >> > <cam@cs.ualberta.ca> wrote:
> >> >> On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan
> >> >> <qemulist@gmail.com>
> >> >> wrote:
> >> >>> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell
> >> >>> <cam@cs.ualberta.ca> wrote:
> >> >>>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan
> >> >>>> <qemulist@gmail.com> wrote:
> >> >>>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >> >>>>>
> >> >>>>> Using irqfd, so we can avoid switch between kernel and user
> >> >>>>> when
> >> >>>>> VMs interrupts each other.
> >> >>>>
> >> >>>> Nice work.  Due to a hardware failure, there will be a small
> >> >>>> delay in
> >> >>>> me being able to test this.  I'll follow up as soon as I can.
> >> >>>>
> >> >>> BTW where can I find the latest guest code for test?
> >> >>> I got the guest code from
> >> >>> git://gitorious.org/nahanni/guest-code.git.
> >> >>> But it seems outdated, after fixing the unlocked_ioctl, and
> >> >>> vm-id
> >> >>> bits
> >> >>> position (the codes conflict with ivshmem spec), it works (I
> >> >>> have
> >> >>> tested for V1).
> >> >>
> >> >> Hello,
> >> >>
> >> >> Which device driver are you using?
> >> >>
> >> > guest-code/kernel_module/standard/kvm_ivshmem.c
> >>
> >> The uio driver is the recommended one, however if you want to use
> >> the
> >> kvm_ivshmem one and have it working, then feel free to continue.
> >
> > If the uio driver is the recommended one, then can you please post
> > it
> > to lkml? It should be integrated into drivers/virt with an
> > appropriate
> > Kconfig update.
> >
> 
> Sure.  Should it go under drivers/virt or drivers/uio?  It seems the
> uio drivers all get grouped together.

Good point. That is the current practice. As there's still only a
handful of uio drivers, then I guess it doesn't make sense to try and
change that at this point. It seems that it would make more sense to
use drivers/uio just for generic uio drivers though, and then the other
uio drivers would go under drivers/<type>/uio, e.g. drivers/virt/uio

Drew

> 
> Thanks,
> Cam
> 
> > Thanks,
> > Drew
> >
> >>
> >> I had deleted it form the repo, but some users had based solutions
> >> off
> >> it, so I added it back.
> >>
> >> btw, my hardware issue has been resolved, so I'll get to testing
> >> your
> >> patch soon.
> >>
> >> Sincerely,
> >> Cam
> >>
> >> >
> >> >> Cam
> >> >>
> >> >>>
> >> >>> Regards,
> >> >>> Pingfan
> >> >>>
> >> >>>>>
> >> >>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >> >>>>> ---
> >> >>>>>  hw/ivshmem.c |   54
> >> >>>>>  +++++++++++++++++++++++++++++++++++++++++++++++++++++-
> >> >>>>>  1 files changed, 53 insertions(+), 1 deletions(-)
> >> >>>>>
> >> >>>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> >> >>>>> index 7c8630c..5709e89 100644
> >> >>>>> --- a/hw/ivshmem.c
> >> >>>>> +++ b/hw/ivshmem.c
> >> >>>>> @@ -19,6 +19,7 @@
> >> >>>>>  #include "hw.h"
> >> >>>>>  #include "pc.h"
> >> >>>>>  #include "pci.h"
> >> >>>>> +#include "msi.h"
> >> >>>>>  #include "msix.h"
> >> >>>>>  #include "kvm.h"
> >> >>>>>  #include "migration.h"
> >> >>>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
> >> >>>>>      uint32_t vectors;
> >> >>>>>      uint32_t features;
> >> >>>>>      EventfdEntry *eventfd_table;
> >> >>>>> +    int *vector_virqs;
> >> >>>>>
> >> >>>>>      Error *migration_blocker;
> >> >>>>>
> >> >>>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f,
> >> >>>>> void
> >> >>>>> *opaque, int version_id)
> >> >>>>>      return 0;
> >> >>>>>  }
> >> >>>>>
> >> >>>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned
> >> >>>>> vector,
> >> >>>>> +                                     MSIMessage msg)
> >> >>>>> +{
> >> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >> >>>>> +    int virq;
> >> >>>>> +    EventNotifier *n =
> >> >>>>> &s->peers[s->vm_id].eventfds[vector];
> >> >>>>> +
> >> >>>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
> >> >>>>> +    if (virq >= 0 &&
> >> >>>>> kvm_irqchip_add_irqfd_notifier(kvm_state,
> >> >>>>> n, virq) >= 0) {
> >> >>>>> +        s->vector_virqs[vector] = virq;
> >> >>>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL,
> >> >>>>> NULL, NULL, NULL);
> >> >>>>> +    } else if (virq >= 0) {
> >> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
> >> >>>>> +        error_report("ivshmem, can not setup irqfd\n");
> >> >>>>> +        return -1;
> >> >>>>> +    } else {
> >> >>>>> +        error_report("ivshmem, no enough msi route to setup
> >> >>>>> irqfd\n");
> >> >>>>> +        return -1;
> >> >>>>> +    }
> >> >>>>> +
> >> >>>>> +    return 0;
> >> >>>>> +}
> >> >>>>> +
> >> >>>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned
> >> >>>>> vector)
> >> >>>>> +{
> >> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >> >>>>> +    EventNotifier *n =
> >> >>>>> &s->peers[s->vm_id].eventfds[vector];
> >> >>>>> +    int virq = s->vector_virqs[vector];
> >> >>>>> +
> >> >>>>> +    if (s->vector_virqs[vector] >= 0) {
> >> >>>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n,
> >> >>>>> virq);
> >> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
> >> >>>>> +        s->vector_virqs[vector] = -1;
> >> >>>>> +    }
> >> >>>>> +}
> >> >>>>> +
> >> >>>>>  static void ivshmem_write_config(PCIDevice *pci_dev,
> >> >>>>>  uint32_t
> >> >>>>>  address,
> >> >>>>>                                  uint32_t val, int len)
> >> >>>>>  {
> >> >>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
> >> >>>>> +
> >> >>>>>      pci_default_write_config(pci_dev, address, val, len);
> >> >>>>> +    is_enabled = msi_enabled(pci_dev);
> >> >>>>> +    if (!was_enabled && is_enabled) {
> >> >>>>> +        msix_set_vector_notifiers(pci_dev,
> >> >>>>> ivshmem_vector_use,
> >> >>>>> +            ivshmem_vector_release);
> >> >>>>> +    } else if (was_enabled && !is_enabled) {
> >> >>>>> +        msix_unset_vector_notifiers(pci_dev);
> >> >>>>> +    }
> >> >>>>>  }
> >> >>>>>
> >> >>>>>  static int pci_ivshmem_init(PCIDevice *dev)
> >> >>>>>  {
> >> >>>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >> >>>>>      uint8_t *pci_conf;
> >> >>>>> +    int i;
> >> >>>>>
> >> >>>>>      if (s->sizearg == NULL)
> >> >>>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
> >> >>>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice
> >> >>>>> *dev)
> >> >>>>>      }
> >> >>>>>
> >> >>>>>      s->dev.config_write = ivshmem_write_config;
> >> >>>>> -
> >> >>>>> +    s->vector_virqs = g_new0(int, s->vectors);
> >> >>>>> +    for (i = 0; i < s->vectors; i++) {
> >> >>>>> +        s->vector_virqs[i] = -1;
> >> >>>>> +    }
> >> >>>>>      return 0;
> >> >>>>>  }
> >> >>>>>
> >> >>>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice
> >> >>>>> *dev)
> >> >>>>>          migrate_del_blocker(s->migration_blocker);
> >> >>>>>          error_free(s->migration_blocker);
> >> >>>>>      }
> >> >>>>> +    g_free(s->vector_virqs);
> >> >>>>>
> >> >>>>>      memory_region_destroy(&s->ivshmem_mmio);
> >> >>>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
> >> >>>>> --
> >> >>>>> 1.7.4.4
> >> >>>>>
> >> >>>
> >> >
> >>
> >>
> >
> 
> 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-05  8:50     ` Jan Kiszka
@ 2012-12-06  5:10       ` Cam Macdonell
  2012-12-06  6:26         ` liu ping fan
  0 siblings, 1 reply; 18+ messages in thread
From: Cam Macdonell @ 2012-12-06  5:10 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Liu Ping Fan, qemu-devel

On Wed, Dec 5, 2012 at 1:50 AM, Jan Kiszka <jan.kiszka@siemens.com> wrote:
> On 2012-12-05 06:34, Cam Macdonell wrote:
>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>>                                  uint32_t val, int len)
>>>  {
>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>>> +
>>>      pci_default_write_config(pci_dev, address, val, len);
>>> +    is_enabled = msi_enabled(pci_dev);
>>
>> Problem 1)  in my tests is_enabled is always 0, so I don't think the
>> irqfds are getting setup
>
> You likely want to call msix_enabled here.

Yup, that gets it working.

Liu Ping, can you update the patch to use msix_enabled()?

Also, it seems that with irqfd enabled the user-level handlers are not
triggered, but it may still be a better idea to not add the user-level
handlers to the char devices at all if irqfd is enabled.

Cam

>
> Jan
>
> --
> Siemens AG, Corporate Technology, CT RTC ITP SDP-DE
> Corporate Competence Center Embedded Linux
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-06  5:10       ` Cam Macdonell
@ 2012-12-06  6:26         ` liu ping fan
  0 siblings, 0 replies; 18+ messages in thread
From: liu ping fan @ 2012-12-06  6:26 UTC (permalink / raw)
  To: Cam Macdonell; +Cc: Jan Kiszka, qemu-devel

On Thu, Dec 6, 2012 at 1:10 PM, Cam Macdonell <cam@cs.ualberta.ca> wrote:
> On Wed, Dec 5, 2012 at 1:50 AM, Jan Kiszka <jan.kiszka@siemens.com> wrote:
>> On 2012-12-05 06:34, Cam Macdonell wrote:
>>>>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>>>>                                  uint32_t val, int len)
>>>>  {
>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
>>>> +
>>>>      pci_default_write_config(pci_dev, address, val, len);
>>>> +    is_enabled = msi_enabled(pci_dev);
>>>
>>> Problem 1)  in my tests is_enabled is always 0, so I don't think the
>>> irqfds are getting setup
>>
>> You likely want to call msix_enabled here.
>
> Yup, that gets it working.
>
> Liu Ping, can you update the patch to use msix_enabled()?
>
:-) , I just finish my test on uio before reading this

> Also, it seems that with irqfd enabled the user-level handlers are not
> triggered, but it may still be a better idea to not add the user-level
> handlers to the char devices at all if irqfd is enabled.
>
Here, in ivshmem_vector_use(),
qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL)
serves this purpose. When irqfd enabled, the fake_irqfd will be
removed from eventfd's listeners.

I will fix the msix_enabled, and send out next version.

Regards,
Pingfan
> Cam
>
>>
>> Jan
>>
>> --
>> Siemens AG, Corporate Technology, CT RTC ITP SDP-DE
>> Corporate Competence Center Embedded Linux
>>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-06  6:37 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
@ 2012-12-13  7:34   ` liu ping fan
  0 siblings, 0 replies; 18+ messages in thread
From: liu ping fan @ 2012-12-13  7:34 UTC (permalink / raw)
  To: qemu-devel; +Cc: Jan Kiszka, Cam Macdonell, Anthony Liguori

Hi Jan and Cam,

It has been tested with uio driver. And other opinion for the code?

Regards,
Pingfan

On Thu, Dec 6, 2012 at 2:37 PM, Liu Ping Fan <qemulist@gmail.com> wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> Using irqfd, so we can avoid switch between kernel and user when
> VMs interrupts each other.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> Signed-off-by: Cam Macdonell <cam@cs.ualberta.ca>
> ---
>  hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 53 insertions(+), 1 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index 7c8630c..b394b07 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -19,6 +19,7 @@
>  #include "hw.h"
>  #include "pc.h"
>  #include "pci.h"
> +#include "msi.h"
>  #include "msix.h"
>  #include "kvm.h"
>  #include "migration.h"
> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
>      uint32_t vectors;
>      uint32_t features;
>      EventfdEntry *eventfd_table;
> +    int *vector_virqs;
>
>      Error *migration_blocker;
>
> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
>      return 0;
>  }
>
> +static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
> +                                     MSIMessage msg)
> +{
> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> +    int virq;
> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> +
> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
> +    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
> +        s->vector_virqs[vector] = virq;
> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
> +    } else if (virq >= 0) {
> +        kvm_irqchip_release_virq(kvm_state, virq);
> +        error_report("ivshmem, can not setup irqfd\n");
> +        return -1;
> +    } else {
> +        error_report("ivshmem, no enough msi route to setup irqfd\n");
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
> +{
> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> +    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
> +    int virq = s->vector_virqs[vector];
> +
> +    if (s->vector_virqs[vector] >= 0) {
> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
> +        kvm_irqchip_release_virq(kvm_state, virq);
> +        s->vector_virqs[vector] = -1;
> +    }
> +}
> +
>  static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
>                                  uint32_t val, int len)
>  {
> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
> +
>      pci_default_write_config(pci_dev, address, val, len);
> +    is_enabled = msix_enabled(pci_dev);
> +    if (!was_enabled && is_enabled) {
> +        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
> +            ivshmem_vector_release);
> +    } else if (was_enabled && !is_enabled) {
> +        msix_unset_vector_notifiers(pci_dev);
> +    }
>  }
>
>  static int pci_ivshmem_init(PCIDevice *dev)
>  {
>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>      uint8_t *pci_conf;
> +    int i;
>
>      if (s->sizearg == NULL)
>          s->ivshmem_size = 4 << 20; /* 4 MB default */
> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>      }
>
>      s->dev.config_write = ivshmem_write_config;
> -
> +    s->vector_virqs = g_new0(int, s->vectors);
> +    for (i = 0; i < s->vectors; i++) {
> +        s->vector_virqs[i] = -1;
> +    }
>      return 0;
>  }
>
> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
>          migrate_del_blocker(s->migration_blocker);
>          error_free(s->migration_blocker);
>      }
> +    g_free(s->vector_virqs);
>
>      memory_region_destroy(&s->ivshmem_mmio);
>      memory_region_del_subregion(&s->bar, &s->ivshmem);
> --
> 1.7.4.4
>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
  2012-12-06  6:37 Liu Ping Fan
@ 2012-12-06  6:37 ` Liu Ping Fan
  2012-12-13  7:34   ` liu ping fan
  0 siblings, 1 reply; 18+ messages in thread
From: Liu Ping Fan @ 2012-12-06  6:37 UTC (permalink / raw)
  To: qemu-devel; +Cc: Jan Kiszka, Cam Macdonell, Anthony Liguori

From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>

Using irqfd, so we can avoid switch between kernel and user when
VMs interrupts each other.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Signed-off-by: Cam Macdonell <cam@cs.ualberta.ca>
---
 hw/ivshmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 53 insertions(+), 1 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 7c8630c..b394b07 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -19,6 +19,7 @@
 #include "hw.h"
 #include "pc.h"
 #include "pci.h"
+#include "msi.h"
 #include "msix.h"
 #include "kvm.h"
 #include "migration.h"
@@ -83,6 +84,7 @@ typedef struct IVShmemState {
     uint32_t vectors;
     uint32_t features;
     EventfdEntry *eventfd_table;
+    int *vector_virqs;
 
     Error *migration_blocker;
 
@@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
     return 0;
 }
 
+static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
+                                     MSIMessage msg)
+{
+    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+    int virq;
+    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+
+    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
+    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
+        s->vector_virqs[vector] = virq;
+        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
+    } else if (virq >= 0) {
+        kvm_irqchip_release_virq(kvm_state, virq);
+        error_report("ivshmem, can not setup irqfd\n");
+        return -1;
+    } else {
+        error_report("ivshmem, no enough msi route to setup irqfd\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
+{
+    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+    int virq = s->vector_virqs[vector];
+
+    if (s->vector_virqs[vector] >= 0) {
+        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
+        kvm_irqchip_release_virq(kvm_state, virq);
+        s->vector_virqs[vector] = -1;
+    }
+}
+
 static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
 				 uint32_t val, int len)
 {
+    bool is_enabled, was_enabled = msi_enabled(pci_dev);
+
     pci_default_write_config(pci_dev, address, val, len);
+    is_enabled = msix_enabled(pci_dev);
+    if (!was_enabled && is_enabled) {
+        msix_set_vector_notifiers(pci_dev, ivshmem_vector_use,
+            ivshmem_vector_release);
+    } else if (was_enabled && !is_enabled) {
+        msix_unset_vector_notifiers(pci_dev);
+    }
 }
 
 static int pci_ivshmem_init(PCIDevice *dev)
 {
     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
     uint8_t *pci_conf;
+    int i;
 
     if (s->sizearg == NULL)
         s->ivshmem_size = 4 << 20; /* 4 MB default */
@@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
     }
 
     s->dev.config_write = ivshmem_write_config;
-
+    s->vector_virqs = g_new0(int, s->vectors);
+    for (i = 0; i < s->vectors; i++) {
+        s->vector_virqs[i] = -1;
+    }
     return 0;
 }
 
@@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice *dev)
         migrate_del_blocker(s->migration_blocker);
         error_free(s->migration_blocker);
     }
+    g_free(s->vector_virqs);
 
     memory_region_destroy(&s->ivshmem_mmio);
     memory_region_del_subregion(&s->bar, &s->ivshmem);
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2012-12-13  7:34 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-11-25  3:51 [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config Liu Ping Fan
2012-11-25  3:51 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
2012-11-27 21:48   ` Cam Macdonell
2012-11-28  2:53     ` liu ping fan
2012-11-29  4:42       ` Cam Macdonell
2012-11-29  8:34         ` liu ping fan
2012-11-29 17:33           ` Cam Macdonell
2012-12-04 11:10             ` Andrew Jones
2012-12-05  3:17               ` Cam Macdonell
2012-12-05  9:25                 ` Andrew Jones
2012-12-05  5:34   ` Cam Macdonell
2012-12-05  8:50     ` Jan Kiszka
2012-12-06  5:10       ` Cam Macdonell
2012-12-06  6:26         ` liu ping fan
2012-11-28  8:05 ` [Qemu-devel] [PATCH v2 1/2] ivshmem: remove msix_write_config liu ping fan
2012-12-05  5:38 ` Cam Macdonell
2012-12-06  6:37 Liu Ping Fan
2012-12-06  6:37 ` [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs Liu Ping Fan
2012-12-13  7:34   ` liu ping fan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.