All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
@ 2011-11-18  5:50 ` zanghongyong
  0 siblings, 0 replies; 8+ messages in thread
From: zanghongyong @ 2011-11-18  5:50 UTC (permalink / raw)
  To: avi, qemu-devel, kvm
  Cc: james.chenjiabo, wuchangyi, xiaowei.yang, hanweidong,
	zanghongyong, wusongwei, louzhengwei, cam

From: Hongyong Zang <zanghongyong@huawei.com>

This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.

Signed-off-by: Hongyong Zang <zanghongyong@huawei.com>
---
 hw/ivshmem.c |   24 ++++++++++++++++++++++--
 kvm-all.c    |   23 +++++++++++++++++++++++
 kvm.h        |    1 +
 3 files changed, 46 insertions(+), 2 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 242fbea..031cdd8 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -29,6 +29,7 @@
 #define IVSHMEM_MASTER  1
 
 #define IVSHMEM_REG_BAR_SIZE 0x100
+#define IVSHIO_REG_BAR_SIZE 0x10
 
 //#define DEBUG_IVSHMEM
 #ifdef DEBUG_IVSHMEM
@@ -57,8 +58,10 @@ typedef struct IVShmemState {
     CharDriverState **eventfd_chr;
     CharDriverState *server_chr;
     MemoryRegion ivshmem_mmio;
+    MemoryRegion ivshmem_pio;
 
     pcibus_t mmio_addr;
+    pcibus_t pio_addr;
     /* We might need to register the BAR before we actually have the memory.
      * So prepare a container MemoryRegion for the BAR immediately and
      * add a subregion when we have the memory.
@@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
     return ret;
 }
 
-static const MemoryRegionOps ivshmem_mmio_ops = {
+static const MemoryRegionOps ivshmem_io_ops = {
     .read = ivshmem_io_read,
     .write = ivshmem_io_write,
     .endianness = DEVICE_NATIVE_ENDIAN,
@@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
     for (i = 0; i < guest_curr_max; i++) {
         kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
                     s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
+        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
+                    s->pio_addr + DOORBELL, (posn << 16) | i, 0);
         close(s->peers[posn].eventfds[i]);
     }
 
@@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
                                       true,
                                       (i << 16) | j,
                                       s->peers[i].eventfds[j]);
+            memory_region_add_eventfd(&s->ivshmem_pio,
+                                      DOORBELL,
+                                      4,
+                                      true,
+                                      (i << 16) | j,
+                                      s->peers[i].eventfds[j]);
         }
     }
 }
@@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
                         (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
             fprintf(stderr, "ivshmem: ioeventfd not available\n");
         }
+        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
+                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
+            fprintf(stderr, "ivshmem: ioeventfd not available\n");
+        }
     }
 
     return;
@@ -656,8 +671,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
 
     s->shm_fd = 0;
 
-    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
+    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_io_ops, s,
                           "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
+    memory_region_init_io(&s->ivshmem_pio, &ivshmem_io_ops, s,
+                          "ivshmem-pio", IVSHIO_REG_BAR_SIZE);
 
     if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
         setup_ioeventfds(s);
@@ -666,6 +683,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
     /* region for registers*/
     pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
                      &s->ivshmem_mmio);
+    pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_IO,
+                     &s->ivshmem_pio);
 
     memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
 
@@ -742,6 +761,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
 
     memory_region_destroy(&s->ivshmem_mmio);
+    memory_region_destroy(&s->ivshmem_pio);
     memory_region_del_subregion(&s->bar, &s->ivshmem);
     memory_region_destroy(&s->ivshmem);
     memory_region_destroy(&s->bar);
diff --git a/kvm-all.c b/kvm-all.c
index 5d500e1..737c2e2 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
     return 0;
 }
 
+int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
+{
+    struct kvm_ioeventfd kick = {
+        .datamatch = val,
+        .addr = addr,
+        .len = 4,
+        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+        .fd = fd,
+    };
+    int r;
+    if (!kvm_enabled()) {
+        return -ENOSYS;
+    }
+    if (!assign) {
+        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+    }
+    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+    if (r < 0) {
+        return r;
+    }
+    return 0;
+}
+
 int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
 {
     struct kvm_ioeventfd kick = {
diff --git a/kvm.h b/kvm.h
index b15e1dd..c2373c9 100644
--- a/kvm.h
+++ b/kvm.h
@@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
 
 int kvm_set_irqfd(int gsi, int fd, bool assigned);
 
+int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
 int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
 
 typedef struct KVMMsiMessage {
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
@ 2011-11-18  5:50 ` zanghongyong
  0 siblings, 0 replies; 8+ messages in thread
From: zanghongyong @ 2011-11-18  5:50 UTC (permalink / raw)
  To: avi, qemu-devel, kvm
  Cc: wusongwei, hanweidong, wuchangyi, louzhengwei, xiaowei.yang,
	james.chenjiabo, zanghongyong, cam

From: Hongyong Zang <zanghongyong@huawei.com>

This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.

Signed-off-by: Hongyong Zang <zanghongyong@huawei.com>
---
 hw/ivshmem.c |   24 ++++++++++++++++++++++--
 kvm-all.c    |   23 +++++++++++++++++++++++
 kvm.h        |    1 +
 3 files changed, 46 insertions(+), 2 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 242fbea..031cdd8 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -29,6 +29,7 @@
 #define IVSHMEM_MASTER  1
 
 #define IVSHMEM_REG_BAR_SIZE 0x100
+#define IVSHIO_REG_BAR_SIZE 0x10
 
 //#define DEBUG_IVSHMEM
 #ifdef DEBUG_IVSHMEM
@@ -57,8 +58,10 @@ typedef struct IVShmemState {
     CharDriverState **eventfd_chr;
     CharDriverState *server_chr;
     MemoryRegion ivshmem_mmio;
+    MemoryRegion ivshmem_pio;
 
     pcibus_t mmio_addr;
+    pcibus_t pio_addr;
     /* We might need to register the BAR before we actually have the memory.
      * So prepare a container MemoryRegion for the BAR immediately and
      * add a subregion when we have the memory.
@@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
     return ret;
 }
 
-static const MemoryRegionOps ivshmem_mmio_ops = {
+static const MemoryRegionOps ivshmem_io_ops = {
     .read = ivshmem_io_read,
     .write = ivshmem_io_write,
     .endianness = DEVICE_NATIVE_ENDIAN,
@@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
     for (i = 0; i < guest_curr_max; i++) {
         kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
                     s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
+        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
+                    s->pio_addr + DOORBELL, (posn << 16) | i, 0);
         close(s->peers[posn].eventfds[i]);
     }
 
@@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
                                       true,
                                       (i << 16) | j,
                                       s->peers[i].eventfds[j]);
+            memory_region_add_eventfd(&s->ivshmem_pio,
+                                      DOORBELL,
+                                      4,
+                                      true,
+                                      (i << 16) | j,
+                                      s->peers[i].eventfds[j]);
         }
     }
 }
@@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
                         (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
             fprintf(stderr, "ivshmem: ioeventfd not available\n");
         }
+        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
+                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
+            fprintf(stderr, "ivshmem: ioeventfd not available\n");
+        }
     }
 
     return;
@@ -656,8 +671,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
 
     s->shm_fd = 0;
 
-    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
+    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_io_ops, s,
                           "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
+    memory_region_init_io(&s->ivshmem_pio, &ivshmem_io_ops, s,
+                          "ivshmem-pio", IVSHIO_REG_BAR_SIZE);
 
     if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
         setup_ioeventfds(s);
@@ -666,6 +683,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
     /* region for registers*/
     pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
                      &s->ivshmem_mmio);
+    pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_IO,
+                     &s->ivshmem_pio);
 
     memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
 
@@ -742,6 +761,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
 
     memory_region_destroy(&s->ivshmem_mmio);
+    memory_region_destroy(&s->ivshmem_pio);
     memory_region_del_subregion(&s->bar, &s->ivshmem);
     memory_region_destroy(&s->ivshmem);
     memory_region_destroy(&s->bar);
diff --git a/kvm-all.c b/kvm-all.c
index 5d500e1..737c2e2 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
     return 0;
 }
 
+int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
+{
+    struct kvm_ioeventfd kick = {
+        .datamatch = val,
+        .addr = addr,
+        .len = 4,
+        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+        .fd = fd,
+    };
+    int r;
+    if (!kvm_enabled()) {
+        return -ENOSYS;
+    }
+    if (!assign) {
+        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+    }
+    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+    if (r < 0) {
+        return r;
+    }
+    return 0;
+}
+
 int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
 {
     struct kvm_ioeventfd kick = {
diff --git a/kvm.h b/kvm.h
index b15e1dd..c2373c9 100644
--- a/kvm.h
+++ b/kvm.h
@@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
 
 int kvm_set_irqfd(int gsi, int fd, bool assigned);
 
+int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
 int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
 
 typedef struct KVMMsiMessage {
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
  2011-11-18  5:50 ` zanghongyong
@ 2011-11-20  9:27   ` Avi Kivity
  -1 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2011-11-20  9:27 UTC (permalink / raw)
  To: zanghongyong
  Cc: wusongwei, kvm, hanweidong, qemu-devel, wuchangyi, xiaowei.yang,
	james.chenjiabo, louzhengwei, cam

On 11/18/2011 07:50 AM, zanghongyong@huawei.com wrote:
> From: Hongyong Zang <zanghongyong@huawei.com>
>
> This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.

Please update the spec, and split the patch into an infrastructure patch
(for long eventfds in the memory API) and an ivshmem patch.

>  
>  //#define DEBUG_IVSHMEM
>  #ifdef DEBUG_IVSHMEM
> @@ -57,8 +58,10 @@ typedef struct IVShmemState {
>      CharDriverState **eventfd_chr;
>      CharDriverState *server_chr;
>      MemoryRegion ivshmem_mmio;
> +    MemoryRegion ivshmem_pio;
>  
>      pcibus_t mmio_addr;
> +    pcibus_t pio_addr;
>      /* We might need to register the BAR before we actually have the memory.
>       * So prepare a container MemoryRegion for the BAR immediately and
>       * add a subregion when we have the memory.
> @@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>      return ret;
>  }
>  
> -static const MemoryRegionOps ivshmem_mmio_ops = {
> +static const MemoryRegionOps ivshmem_io_ops = {
>      .read = ivshmem_io_read,
>      .write = ivshmem_io_write,
>      .endianness = DEVICE_NATIVE_ENDIAN,
> @@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>      for (i = 0; i < guest_curr_max; i++) {
>          kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>                      s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
> +        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
> +                    s->pio_addr + DOORBELL, (posn << 16) | i, 0);

This really shouldn't be needed - the memory API should take care of it.

>          close(s->peers[posn].eventfds[i]);
>      }
>  
> @@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
>                                        true,
>                                        (i << 16) | j,
>                                        s->peers[i].eventfds[j]);
> +            memory_region_add_eventfd(&s->ivshmem_pio,
> +                                      DOORBELL,
> +                                      4,
> +                                      true,
> +                                      (i << 16) | j,
> +                                      s->peers[i].eventfds[j]);
>          }
>      }

Where is the memory API support for this?

>  }
> @@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>                          (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
>              fprintf(stderr, "ivshmem: ioeventfd not available\n");
>          }
> +        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
> +                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
> +            fprintf(stderr, "ivshmem: ioeventfd not available\n");
> +        }
>      }

Nor should this be needed.

Please make BAR 3 disappear if started with -M pc-1.0.


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
@ 2011-11-20  9:27   ` Avi Kivity
  0 siblings, 0 replies; 8+ messages in thread
From: Avi Kivity @ 2011-11-20  9:27 UTC (permalink / raw)
  To: zanghongyong
  Cc: wusongwei, kvm, hanweidong, qemu-devel, wuchangyi, xiaowei.yang,
	james.chenjiabo, louzhengwei, cam

On 11/18/2011 07:50 AM, zanghongyong@huawei.com wrote:
> From: Hongyong Zang <zanghongyong@huawei.com>
>
> This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.

Please update the spec, and split the patch into an infrastructure patch
(for long eventfds in the memory API) and an ivshmem patch.

>  
>  //#define DEBUG_IVSHMEM
>  #ifdef DEBUG_IVSHMEM
> @@ -57,8 +58,10 @@ typedef struct IVShmemState {
>      CharDriverState **eventfd_chr;
>      CharDriverState *server_chr;
>      MemoryRegion ivshmem_mmio;
> +    MemoryRegion ivshmem_pio;
>  
>      pcibus_t mmio_addr;
> +    pcibus_t pio_addr;
>      /* We might need to register the BAR before we actually have the memory.
>       * So prepare a container MemoryRegion for the BAR immediately and
>       * add a subregion when we have the memory.
> @@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>      return ret;
>  }
>  
> -static const MemoryRegionOps ivshmem_mmio_ops = {
> +static const MemoryRegionOps ivshmem_io_ops = {
>      .read = ivshmem_io_read,
>      .write = ivshmem_io_write,
>      .endianness = DEVICE_NATIVE_ENDIAN,
> @@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>      for (i = 0; i < guest_curr_max; i++) {
>          kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>                      s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
> +        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
> +                    s->pio_addr + DOORBELL, (posn << 16) | i, 0);

This really shouldn't be needed - the memory API should take care of it.

>          close(s->peers[posn].eventfds[i]);
>      }
>  
> @@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
>                                        true,
>                                        (i << 16) | j,
>                                        s->peers[i].eventfds[j]);
> +            memory_region_add_eventfd(&s->ivshmem_pio,
> +                                      DOORBELL,
> +                                      4,
> +                                      true,
> +                                      (i << 16) | j,
> +                                      s->peers[i].eventfds[j]);
>          }
>      }

Where is the memory API support for this?

>  }
> @@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>                          (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
>              fprintf(stderr, "ivshmem: ioeventfd not available\n");
>          }
> +        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
> +                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
> +            fprintf(stderr, "ivshmem: ioeventfd not available\n");
> +        }
>      }

Nor should this be needed.

Please make BAR 3 disappear if started with -M pc-1.0.


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
  2011-11-18  5:50 ` zanghongyong
@ 2011-11-29  6:38   ` Cam Macdonell
  -1 siblings, 0 replies; 8+ messages in thread
From: Cam Macdonell @ 2011-11-29  6:38 UTC (permalink / raw)
  To: zanghongyong
  Cc: avi, qemu-devel, kvm, james.chenjiabo, wuchangyi, xiaowei.yang,
	hanweidong, wusongwei, louzhengwei

On Thu, Nov 17, 2011 at 10:50 PM,  <zanghongyong@huawei.com> wrote:
> From: Hongyong Zang <zanghongyong@huawei.com>
>
> This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.

Come to think of it, should we bump the PIO to BAR4 so that the shared
memory region could be made a 64-bit BAR and therefore take up BAR2
and BAR3?

>
> Signed-off-by: Hongyong Zang <zanghongyong@huawei.com>
> ---
>  hw/ivshmem.c |   24 ++++++++++++++++++++++--
>  kvm-all.c    |   23 +++++++++++++++++++++++
>  kvm.h        |    1 +
>  3 files changed, 46 insertions(+), 2 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index 242fbea..031cdd8 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -29,6 +29,7 @@
>  #define IVSHMEM_MASTER  1
>
>  #define IVSHMEM_REG_BAR_SIZE 0x100
> +#define IVSHIO_REG_BAR_SIZE 0x10
>
>  //#define DEBUG_IVSHMEM
>  #ifdef DEBUG_IVSHMEM
> @@ -57,8 +58,10 @@ typedef struct IVShmemState {
>     CharDriverState **eventfd_chr;
>     CharDriverState *server_chr;
>     MemoryRegion ivshmem_mmio;
> +    MemoryRegion ivshmem_pio;
>
>     pcibus_t mmio_addr;
> +    pcibus_t pio_addr;
>     /* We might need to register the BAR before we actually have the memory.
>      * So prepare a container MemoryRegion for the BAR immediately and
>      * add a subregion when we have the memory.
> @@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>     return ret;
>  }
>
> -static const MemoryRegionOps ivshmem_mmio_ops = {
> +static const MemoryRegionOps ivshmem_io_ops = {
>     .read = ivshmem_io_read,
>     .write = ivshmem_io_write,
>     .endianness = DEVICE_NATIVE_ENDIAN,
> @@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>     for (i = 0; i < guest_curr_max; i++) {
>         kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>                     s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
> +        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
> +                    s->pio_addr + DOORBELL, (posn << 16) | i, 0);
>         close(s->peers[posn].eventfds[i]);
>     }
>
> @@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
>                                       true,
>                                       (i << 16) | j,
>                                       s->peers[i].eventfds[j]);
> +            memory_region_add_eventfd(&s->ivshmem_pio,
> +                                      DOORBELL,
> +                                      4,
> +                                      true,
> +                                      (i << 16) | j,
> +                                      s->peers[i].eventfds[j]);
>         }
>     }
>  }
> @@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>                         (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
>             fprintf(stderr, "ivshmem: ioeventfd not available\n");
>         }
> +        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
> +                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
> +            fprintf(stderr, "ivshmem: ioeventfd not available\n");
> +        }
>     }
>
>     return;
> @@ -656,8 +671,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>
>     s->shm_fd = 0;
>
> -    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
> +    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_io_ops, s,
>                           "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
> +    memory_region_init_io(&s->ivshmem_pio, &ivshmem_io_ops, s,
> +                          "ivshmem-pio", IVSHIO_REG_BAR_SIZE);
>
>     if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>         setup_ioeventfds(s);
> @@ -666,6 +683,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
>     /* region for registers*/
>     pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
>                      &s->ivshmem_mmio);
> +    pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_IO,
> +                     &s->ivshmem_pio);
>
>     memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
>
> @@ -742,6 +761,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
>     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>
>     memory_region_destroy(&s->ivshmem_mmio);
> +    memory_region_destroy(&s->ivshmem_pio);
>     memory_region_del_subregion(&s->bar, &s->ivshmem);
>     memory_region_destroy(&s->ivshmem);
>     memory_region_destroy(&s->bar);
> diff --git a/kvm-all.c b/kvm-all.c
> index 5d500e1..737c2e2 100644
> --- a/kvm-all.c
> +++ b/kvm-all.c
> @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
>     return 0;
>  }
>
> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
> +{
> +    struct kvm_ioeventfd kick = {
> +        .datamatch = val,
> +        .addr = addr,
> +        .len = 4,
> +        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
> +        .fd = fd,
> +    };
> +    int r;
> +    if (!kvm_enabled()) {
> +        return -ENOSYS;
> +    }
> +    if (!assign) {
> +        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
> +    }
> +    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
> +    if (r < 0) {
> +        return r;
> +    }
> +    return 0;
> +}
> +
>  int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
>  {
>     struct kvm_ioeventfd kick = {
> diff --git a/kvm.h b/kvm.h
> index b15e1dd..c2373c9 100644
> --- a/kvm.h
> +++ b/kvm.h
> @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
>
>  int kvm_set_irqfd(int gsi, int fd, bool assigned);
>
> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
>  int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
>
>  typedef struct KVMMsiMessage {
> --
> 1.7.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
@ 2011-11-29  6:38   ` Cam Macdonell
  0 siblings, 0 replies; 8+ messages in thread
From: Cam Macdonell @ 2011-11-29  6:38 UTC (permalink / raw)
  To: zanghongyong
  Cc: wusongwei, kvm, hanweidong, wuchangyi, qemu-devel, xiaowei.yang,
	james.chenjiabo, louzhengwei, avi

On Thu, Nov 17, 2011 at 10:50 PM,  <zanghongyong@huawei.com> wrote:
> From: Hongyong Zang <zanghongyong@huawei.com>
>
> This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.

Come to think of it, should we bump the PIO to BAR4 so that the shared
memory region could be made a 64-bit BAR and therefore take up BAR2
and BAR3?

>
> Signed-off-by: Hongyong Zang <zanghongyong@huawei.com>
> ---
>  hw/ivshmem.c |   24 ++++++++++++++++++++++--
>  kvm-all.c    |   23 +++++++++++++++++++++++
>  kvm.h        |    1 +
>  3 files changed, 46 insertions(+), 2 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index 242fbea..031cdd8 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -29,6 +29,7 @@
>  #define IVSHMEM_MASTER  1
>
>  #define IVSHMEM_REG_BAR_SIZE 0x100
> +#define IVSHIO_REG_BAR_SIZE 0x10
>
>  //#define DEBUG_IVSHMEM
>  #ifdef DEBUG_IVSHMEM
> @@ -57,8 +58,10 @@ typedef struct IVShmemState {
>     CharDriverState **eventfd_chr;
>     CharDriverState *server_chr;
>     MemoryRegion ivshmem_mmio;
> +    MemoryRegion ivshmem_pio;
>
>     pcibus_t mmio_addr;
> +    pcibus_t pio_addr;
>     /* We might need to register the BAR before we actually have the memory.
>      * So prepare a container MemoryRegion for the BAR immediately and
>      * add a subregion when we have the memory.
> @@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>     return ret;
>  }
>
> -static const MemoryRegionOps ivshmem_mmio_ops = {
> +static const MemoryRegionOps ivshmem_io_ops = {
>     .read = ivshmem_io_read,
>     .write = ivshmem_io_write,
>     .endianness = DEVICE_NATIVE_ENDIAN,
> @@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>     for (i = 0; i < guest_curr_max; i++) {
>         kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>                     s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
> +        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
> +                    s->pio_addr + DOORBELL, (posn << 16) | i, 0);
>         close(s->peers[posn].eventfds[i]);
>     }
>
> @@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
>                                       true,
>                                       (i << 16) | j,
>                                       s->peers[i].eventfds[j]);
> +            memory_region_add_eventfd(&s->ivshmem_pio,
> +                                      DOORBELL,
> +                                      4,
> +                                      true,
> +                                      (i << 16) | j,
> +                                      s->peers[i].eventfds[j]);
>         }
>     }
>  }
> @@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>                         (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
>             fprintf(stderr, "ivshmem: ioeventfd not available\n");
>         }
> +        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
> +                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
> +            fprintf(stderr, "ivshmem: ioeventfd not available\n");
> +        }
>     }
>
>     return;
> @@ -656,8 +671,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>
>     s->shm_fd = 0;
>
> -    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
> +    memory_region_init_io(&s->ivshmem_mmio, &ivshmem_io_ops, s,
>                           "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
> +    memory_region_init_io(&s->ivshmem_pio, &ivshmem_io_ops, s,
> +                          "ivshmem-pio", IVSHIO_REG_BAR_SIZE);
>
>     if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>         setup_ioeventfds(s);
> @@ -666,6 +683,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
>     /* region for registers*/
>     pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
>                      &s->ivshmem_mmio);
> +    pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_IO,
> +                     &s->ivshmem_pio);
>
>     memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
>
> @@ -742,6 +761,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
>     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>
>     memory_region_destroy(&s->ivshmem_mmio);
> +    memory_region_destroy(&s->ivshmem_pio);
>     memory_region_del_subregion(&s->bar, &s->ivshmem);
>     memory_region_destroy(&s->ivshmem);
>     memory_region_destroy(&s->bar);
> diff --git a/kvm-all.c b/kvm-all.c
> index 5d500e1..737c2e2 100644
> --- a/kvm-all.c
> +++ b/kvm-all.c
> @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
>     return 0;
>  }
>
> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
> +{
> +    struct kvm_ioeventfd kick = {
> +        .datamatch = val,
> +        .addr = addr,
> +        .len = 4,
> +        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
> +        .fd = fd,
> +    };
> +    int r;
> +    if (!kvm_enabled()) {
> +        return -ENOSYS;
> +    }
> +    if (!assign) {
> +        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
> +    }
> +    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
> +    if (r < 0) {
> +        return r;
> +    }
> +    return 0;
> +}
> +
>  int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
>  {
>     struct kvm_ioeventfd kick = {
> diff --git a/kvm.h b/kvm.h
> index b15e1dd..c2373c9 100644
> --- a/kvm.h
> +++ b/kvm.h
> @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
>
>  int kvm_set_irqfd(int gsi, int fd, bool assigned);
>
> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
>  int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
>
>  typedef struct KVMMsiMessage {
> --
> 1.7.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
  2011-11-29  6:38   ` Cam Macdonell
@ 2011-11-29  7:02     ` Zang Hongyong
  -1 siblings, 0 replies; 8+ messages in thread
From: Zang Hongyong @ 2011-11-29  7:02 UTC (permalink / raw)
  To: Cam Macdonell
  Cc: avi, qemu-devel, kvm, james.chenjiabo, wuchangyi, xiaowei.yang,
	hanweidong, wusongwei, louzhengwei

于 2011/11/29,星期二 14:38, Cam Macdonell 写道:
> On Thu, Nov 17, 2011 at 10:50 PM,<zanghongyong@huawei.com>  wrote:
>> From: Hongyong Zang<zanghongyong@huawei.com>
>>
>> This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.
> Come to think of it, should we bump the PIO to BAR4 so that the shared
> memory region could be made a 64-bit BAR and therefore take up BAR2
> and BAR3?
OK. The PIO BAR can be placed on BAR4. We'll finish the patch later.

Regards,
Hongyong
>> Signed-off-by: Hongyong Zang<zanghongyong@huawei.com>
>> ---
>>   hw/ivshmem.c |   24 ++++++++++++++++++++++--
>>   kvm-all.c    |   23 +++++++++++++++++++++++
>>   kvm.h        |    1 +
>>   3 files changed, 46 insertions(+), 2 deletions(-)
>>
>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>> index 242fbea..031cdd8 100644
>> --- a/hw/ivshmem.c
>> +++ b/hw/ivshmem.c
>> @@ -29,6 +29,7 @@
>>   #define IVSHMEM_MASTER  1
>>
>>   #define IVSHMEM_REG_BAR_SIZE 0x100
>> +#define IVSHIO_REG_BAR_SIZE 0x10
>>
>>   //#define DEBUG_IVSHMEM
>>   #ifdef DEBUG_IVSHMEM
>> @@ -57,8 +58,10 @@ typedef struct IVShmemState {
>>      CharDriverState **eventfd_chr;
>>      CharDriverState *server_chr;
>>      MemoryRegion ivshmem_mmio;
>> +    MemoryRegion ivshmem_pio;
>>
>>      pcibus_t mmio_addr;
>> +    pcibus_t pio_addr;
>>      /* We might need to register the BAR before we actually have the memory.
>>       * So prepare a container MemoryRegion for the BAR immediately and
>>       * add a subregion when we have the memory.
>> @@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>>      return ret;
>>   }
>>
>> -static const MemoryRegionOps ivshmem_mmio_ops = {
>> +static const MemoryRegionOps ivshmem_io_ops = {
>>      .read = ivshmem_io_read,
>>      .write = ivshmem_io_write,
>>      .endianness = DEVICE_NATIVE_ENDIAN,
>> @@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>>      for (i = 0; i<  guest_curr_max; i++) {
>>          kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>>                      s->mmio_addr + DOORBELL, (posn<<  16) | i, 0);
>> +        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
>> +                    s->pio_addr + DOORBELL, (posn<<  16) | i, 0);
>>          close(s->peers[posn].eventfds[i]);
>>      }
>>
>> @@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
>>                                        true,
>>                                        (i<<  16) | j,
>>                                        s->peers[i].eventfds[j]);
>> +            memory_region_add_eventfd(&s->ivshmem_pio,
>> +                                      DOORBELL,
>> +                                      4,
>> +                                      true,
>> +                                      (i<<  16) | j,
>> +                                      s->peers[i].eventfds[j]);
>>          }
>>      }
>>   }
>> @@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>>                          (incoming_posn<<  16) | guest_max_eventfd, 1)<  0) {
>>              fprintf(stderr, "ivshmem: ioeventfd not available\n");
>>          }
>> +        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
>> +                        (incoming_posn<<  16) | guest_max_eventfd, 1)<  0) {
>> +            fprintf(stderr, "ivshmem: ioeventfd not available\n");
>> +        }
>>      }
>>
>>      return;
>> @@ -656,8 +671,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>
>>      s->shm_fd = 0;
>>
>> -    memory_region_init_io(&s->ivshmem_mmio,&ivshmem_mmio_ops, s,
>> +    memory_region_init_io(&s->ivshmem_mmio,&ivshmem_io_ops, s,
>>                            "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
>> +    memory_region_init_io(&s->ivshmem_pio,&ivshmem_io_ops, s,
>> +                          "ivshmem-pio", IVSHIO_REG_BAR_SIZE);
>>
>>      if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>>          setup_ioeventfds(s);
>> @@ -666,6 +683,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>      /* region for registers*/
>>      pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
>>                       &s->ivshmem_mmio);
>> +    pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_IO,
>> +&s->ivshmem_pio);
>>
>>      memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
>>
>> @@ -742,6 +761,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>
>>      memory_region_destroy(&s->ivshmem_mmio);
>> +    memory_region_destroy(&s->ivshmem_pio);
>>      memory_region_del_subregion(&s->bar,&s->ivshmem);
>>      memory_region_destroy(&s->ivshmem);
>>      memory_region_destroy(&s->bar);
>> diff --git a/kvm-all.c b/kvm-all.c
>> index 5d500e1..737c2e2 100644
>> --- a/kvm-all.c
>> +++ b/kvm-all.c
>> @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
>>      return 0;
>>   }
>>
>> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
>> +{
>> +    struct kvm_ioeventfd kick = {
>> +        .datamatch = val,
>> +        .addr = addr,
>> +        .len = 4,
>> +        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
>> +        .fd = fd,
>> +    };
>> +    int r;
>> +    if (!kvm_enabled()) {
>> +        return -ENOSYS;
>> +    }
>> +    if (!assign) {
>> +        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
>> +    }
>> +    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD,&kick);
>> +    if (r<  0) {
>> +        return r;
>> +    }
>> +    return 0;
>> +}
>> +
>>   int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
>>   {
>>      struct kvm_ioeventfd kick = {
>> diff --git a/kvm.h b/kvm.h
>> index b15e1dd..c2373c9 100644
>> --- a/kvm.h
>> +++ b/kvm.h
>> @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
>>
>>   int kvm_set_irqfd(int gsi, int fd, bool assigned);
>>
>> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
>>   int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
>>
>>   typedef struct KVMMsiMessage {
>> --
>> 1.7.1
>>
> .
>



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time
@ 2011-11-29  7:02     ` Zang Hongyong
  0 siblings, 0 replies; 8+ messages in thread
From: Zang Hongyong @ 2011-11-29  7:02 UTC (permalink / raw)
  To: Cam Macdonell
  Cc: wusongwei, kvm, hanweidong, wuchangyi, qemu-devel, xiaowei.yang,
	james.chenjiabo, louzhengwei, avi

于 2011/11/29,星期二 14:38, Cam Macdonell 写道:
> On Thu, Nov 17, 2011 at 10:50 PM,<zanghongyong@huawei.com>  wrote:
>> From: Hongyong Zang<zanghongyong@huawei.com>
>>
>> This patch, adds a PIO BAR3 for guest notifying qemu. And we find the new notification way of PIO BAR3 reduces 30% time in comparison with the original MMIO BAR0 way.
> Come to think of it, should we bump the PIO to BAR4 so that the shared
> memory region could be made a 64-bit BAR and therefore take up BAR2
> and BAR3?
OK. The PIO BAR can be placed on BAR4. We'll finish the patch later.

Regards,
Hongyong
>> Signed-off-by: Hongyong Zang<zanghongyong@huawei.com>
>> ---
>>   hw/ivshmem.c |   24 ++++++++++++++++++++++--
>>   kvm-all.c    |   23 +++++++++++++++++++++++
>>   kvm.h        |    1 +
>>   3 files changed, 46 insertions(+), 2 deletions(-)
>>
>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>> index 242fbea..031cdd8 100644
>> --- a/hw/ivshmem.c
>> +++ b/hw/ivshmem.c
>> @@ -29,6 +29,7 @@
>>   #define IVSHMEM_MASTER  1
>>
>>   #define IVSHMEM_REG_BAR_SIZE 0x100
>> +#define IVSHIO_REG_BAR_SIZE 0x10
>>
>>   //#define DEBUG_IVSHMEM
>>   #ifdef DEBUG_IVSHMEM
>> @@ -57,8 +58,10 @@ typedef struct IVShmemState {
>>      CharDriverState **eventfd_chr;
>>      CharDriverState *server_chr;
>>      MemoryRegion ivshmem_mmio;
>> +    MemoryRegion ivshmem_pio;
>>
>>      pcibus_t mmio_addr;
>> +    pcibus_t pio_addr;
>>      /* We might need to register the BAR before we actually have the memory.
>>       * So prepare a container MemoryRegion for the BAR immediately and
>>       * add a subregion when we have the memory.
>> @@ -234,7 +237,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>>      return ret;
>>   }
>>
>> -static const MemoryRegionOps ivshmem_mmio_ops = {
>> +static const MemoryRegionOps ivshmem_io_ops = {
>>      .read = ivshmem_io_read,
>>      .write = ivshmem_io_write,
>>      .endianness = DEVICE_NATIVE_ENDIAN,
>> @@ -348,6 +351,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>>      for (i = 0; i<  guest_curr_max; i++) {
>>          kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>>                      s->mmio_addr + DOORBELL, (posn<<  16) | i, 0);
>> +        kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
>> +                    s->pio_addr + DOORBELL, (posn<<  16) | i, 0);
>>          close(s->peers[posn].eventfds[i]);
>>      }
>>
>> @@ -367,6 +372,12 @@ static void setup_ioeventfds(IVShmemState *s) {
>>                                        true,
>>                                        (i<<  16) | j,
>>                                        s->peers[i].eventfds[j]);
>> +            memory_region_add_eventfd(&s->ivshmem_pio,
>> +                                      DOORBELL,
>> +                                      4,
>> +                                      true,
>> +                                      (i<<  16) | j,
>> +                                      s->peers[i].eventfds[j]);
>>          }
>>      }
>>   }
>> @@ -495,6 +506,10 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>>                          (incoming_posn<<  16) | guest_max_eventfd, 1)<  0) {
>>              fprintf(stderr, "ivshmem: ioeventfd not available\n");
>>          }
>> +        if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
>> +                        (incoming_posn<<  16) | guest_max_eventfd, 1)<  0) {
>> +            fprintf(stderr, "ivshmem: ioeventfd not available\n");
>> +        }
>>      }
>>
>>      return;
>> @@ -656,8 +671,10 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>
>>      s->shm_fd = 0;
>>
>> -    memory_region_init_io(&s->ivshmem_mmio,&ivshmem_mmio_ops, s,
>> +    memory_region_init_io(&s->ivshmem_mmio,&ivshmem_io_ops, s,
>>                            "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
>> +    memory_region_init_io(&s->ivshmem_pio,&ivshmem_io_ops, s,
>> +                          "ivshmem-pio", IVSHIO_REG_BAR_SIZE);
>>
>>      if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>>          setup_ioeventfds(s);
>> @@ -666,6 +683,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>      /* region for registers*/
>>      pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
>>                       &s->ivshmem_mmio);
>> +    pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_IO,
>> +&s->ivshmem_pio);
>>
>>      memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
>>
>> @@ -742,6 +761,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>
>>      memory_region_destroy(&s->ivshmem_mmio);
>> +    memory_region_destroy(&s->ivshmem_pio);
>>      memory_region_del_subregion(&s->bar,&s->ivshmem);
>>      memory_region_destroy(&s->ivshmem);
>>      memory_region_destroy(&s->bar);
>> diff --git a/kvm-all.c b/kvm-all.c
>> index 5d500e1..737c2e2 100644
>> --- a/kvm-all.c
>> +++ b/kvm-all.c
>> @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
>>      return 0;
>>   }
>>
>> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
>> +{
>> +    struct kvm_ioeventfd kick = {
>> +        .datamatch = val,
>> +        .addr = addr,
>> +        .len = 4,
>> +        .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
>> +        .fd = fd,
>> +    };
>> +    int r;
>> +    if (!kvm_enabled()) {
>> +        return -ENOSYS;
>> +    }
>> +    if (!assign) {
>> +        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
>> +    }
>> +    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD,&kick);
>> +    if (r<  0) {
>> +        return r;
>> +    }
>> +    return 0;
>> +}
>> +
>>   int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
>>   {
>>      struct kvm_ioeventfd kick = {
>> diff --git a/kvm.h b/kvm.h
>> index b15e1dd..c2373c9 100644
>> --- a/kvm.h
>> +++ b/kvm.h
>> @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
>>
>>   int kvm_set_irqfd(int gsi, int fd, bool assigned);
>>
>> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
>>   int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
>>
>>   typedef struct KVMMsiMessage {
>> --
>> 1.7.1
>>
> .
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2011-11-29  7:03 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-11-18  5:50 [Qemu-devel] [PATCH v2] ivshmem: add a new PIO BAR3(Doorbell) besides MMIO BAR0 to reduce notification time zanghongyong
2011-11-18  5:50 ` zanghongyong
2011-11-20  9:27 ` Avi Kivity
2011-11-20  9:27   ` [Qemu-devel] " Avi Kivity
2011-11-29  6:38 ` Cam Macdonell
2011-11-29  6:38   ` Cam Macdonell
2011-11-29  7:02   ` Zang Hongyong
2011-11-29  7:02     ` Zang Hongyong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.