All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
To: joro@8bytes.org
Cc: paul@codesourcery.com, blauwirbel@gmail.com,
	anthony@codemonkey.ws, avi@redhat.com, kvm@vger.kernel.org,
	qemu-devel@nongnu.org,
	Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Subject: [PATCH 4/7] ide: use the PCI memory access interface
Date: Sun, 15 Aug 2010 22:27:19 +0300	[thread overview]
Message-ID: <1281900442-29971-5-git-send-email-eduard.munteanu@linux360.ro> (raw)
In-Reply-To: <1281900442-29971-1-git-send-email-eduard.munteanu@linux360.ro>

Emulated PCI IDE controllers now use the memory access interface. This
also allows an emulated IOMMU to translate and check accesses.

Map invalidation results in cancelling DMA transfers. Since the guest OS
can't properly recover the DMA results in case the mapping is changed,
this is a fairly good approximation.

Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
---
 dma-helpers.c     |   46 +++++++++++++++++++++++++++++++++++++++++-----
 dma.h             |   21 ++++++++++++++++++++-
 hw/ide/core.c     |   15 ++++++++-------
 hw/ide/internal.h |   39 +++++++++++++++++++++++++++++++++++++++
 hw/ide/pci.c      |    7 +++++++
 5 files changed, 115 insertions(+), 13 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index d4fc077..9c3a21a 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -10,12 +10,36 @@
 #include "dma.h"
 #include "block_int.h"
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
+static void *qemu_sglist_default_map(void *opaque,
+                                     QEMUSGInvalMapFunc *inval_cb,
+                                     void *inval_opaque,
+                                     target_phys_addr_t addr,
+                                     target_phys_addr_t *len,
+                                     int is_write)
+{
+    return cpu_physical_memory_map(addr, len, is_write);
+}
+
+static void qemu_sglist_default_unmap(void *opaque,
+                                      void *buffer,
+                                      target_phys_addr_t len,
+                                      int is_write,
+                                      target_phys_addr_t access_len)
+{
+    cpu_physical_memory_unmap(buffer, len, is_write, access_len);
+}
+
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
+                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, void *opaque)
 {
     qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
     qsg->nsg = 0;
     qsg->nalloc = alloc_hint;
     qsg->size = 0;
+
+    qsg->map = map ? map : qemu_sglist_default_map;
+    qsg->unmap = unmap ? unmap : qemu_sglist_default_unmap;
+    qsg->opaque = opaque;
 }
 
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
@@ -73,12 +97,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
     int i;
 
     for (i = 0; i < dbs->iov.niov; ++i) {
-        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
-                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
-                                  dbs->iov.iov[i].iov_len);
+        dbs->sg->unmap(dbs->sg->opaque,
+                       dbs->iov.iov[i].iov_base,
+                       dbs->iov.iov[i].iov_len, !dbs->is_write,
+                       dbs->iov.iov[i].iov_len);
     }
 }
 
+static void dma_bdrv_cancel(void *opaque)
+{
+    DMAAIOCB *dbs = opaque;
+
+    bdrv_aio_cancel(dbs->acb);
+    dma_bdrv_unmap(dbs);
+    qemu_iovec_destroy(&dbs->iov);
+    qemu_aio_release(dbs);
+}
+
 static void dma_bdrv_cb(void *opaque, int ret)
 {
     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
@@ -100,7 +135,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
     while (dbs->sg_cur_index < dbs->sg->nsg) {
         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+        mem = dbs->sg->map(dbs->sg->opaque, dma_bdrv_cancel, dbs,
+                           cur_addr, &cur_len, !dbs->is_write);
         if (!mem)
             break;
         qemu_iovec_add(&dbs->iov, mem, cur_len);
diff --git a/dma.h b/dma.h
index f3bb275..d48f35c 100644
--- a/dma.h
+++ b/dma.h
@@ -15,6 +15,19 @@
 #include "hw/hw.h"
 #include "block.h"
 
+typedef void QEMUSGInvalMapFunc(void *opaque);
+typedef void *QEMUSGMapFunc(void *opaque,
+                            QEMUSGInvalMapFunc *inval_cb,
+                            void *inval_opaque,
+                            target_phys_addr_t addr,
+                            target_phys_addr_t *len,
+                            int is_write);
+typedef void QEMUSGUnmapFunc(void *opaque,
+                             void *buffer,
+                             target_phys_addr_t len,
+                             int is_write,
+                             target_phys_addr_t access_len);
+
 typedef struct {
     target_phys_addr_t base;
     target_phys_addr_t len;
@@ -25,9 +38,15 @@ typedef struct {
     int nsg;
     int nalloc;
     target_phys_addr_t size;
+
+    QEMUSGMapFunc *map;
+    QEMUSGUnmapFunc *unmap;
+    void *opaque;
 } QEMUSGList;
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
+                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap,
+                      void *opaque);
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
                      target_phys_addr_t len);
 void qemu_sglist_destroy(QEMUSGList *qsg);
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 0b3b7c2..c19013a 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -435,7 +435,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
     } prd;
     int l, len;
 
-    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1);
+    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1,
+                     bm->map, bm->unmap, bm->opaque);
     s->io_buffer_size = 0;
     for(;;) {
         if (bm->cur_prd_len == 0) {
@@ -443,7 +444,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
             if (bm->cur_prd_last ||
                 (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
                 return s->io_buffer_size != 0;
-            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
             bm->cur_addr += 8;
             prd.addr = le32_to_cpu(prd.addr);
             prd.size = le32_to_cpu(prd.size);
@@ -526,7 +527,7 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
             if (bm->cur_prd_last ||
                 (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
                 return 0;
-            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
             bm->cur_addr += 8;
             prd.addr = le32_to_cpu(prd.addr);
             prd.size = le32_to_cpu(prd.size);
@@ -541,11 +542,11 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
             l = bm->cur_prd_len;
         if (l > 0) {
             if (is_write) {
-                cpu_physical_memory_write(bm->cur_prd_addr,
-                                          s->io_buffer + s->io_buffer_index, l);
+                bmdma_memory_write(bm, bm->cur_prd_addr,
+                                   s->io_buffer + s->io_buffer_index, l);
             } else {
-                cpu_physical_memory_read(bm->cur_prd_addr,
-                                          s->io_buffer + s->io_buffer_index, l);
+                bmdma_memory_read(bm, bm->cur_prd_addr,
+                                  s->io_buffer + s->io_buffer_index, l);
             }
             bm->cur_prd_addr += l;
             bm->cur_prd_len -= l;
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index eef1ee1..0f3b707 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -476,6 +476,24 @@ struct IDEDeviceInfo {
 #define BM_CMD_START     0x01
 #define BM_CMD_READ      0x08
 
+typedef void BMDMAInvalMapFunc(void *opaque);
+typedef void BMDMARWFunc(void *opaque,
+                         target_phys_addr_t addr,
+                         uint8_t *buf,
+                         target_phys_addr_t len,
+                         int is_write);
+typedef void *BMDMAMapFunc(void *opaque,
+                           BMDMAInvalMapFunc *inval_cb,
+                           void *inval_opaque,
+                           target_phys_addr_t addr,
+                           target_phys_addr_t *len,
+                           int is_write);
+typedef void BMDMAUnmapFunc(void *opaque,
+                            void *buffer,
+                            target_phys_addr_t len,
+                            int is_write,
+                            target_phys_addr_t access_len);
+
 struct BMDMAState {
     uint8_t cmd;
     uint8_t status;
@@ -495,8 +513,29 @@ struct BMDMAState {
     int64_t sector_num;
     uint32_t nsector;
     QEMUBH *bh;
+
+    BMDMARWFunc *rw;
+    BMDMAMapFunc *map;
+    BMDMAUnmapFunc *unmap;
+    void *opaque;
 };
 
+static inline void bmdma_memory_read(BMDMAState *bm,
+                                     target_phys_addr_t addr,
+                                     uint8_t *buf,
+                                     target_phys_addr_t len)
+{
+    bm->rw(bm->opaque, addr, buf, len, 0);
+}
+
+static inline void bmdma_memory_write(BMDMAState *bm,
+                                      target_phys_addr_t addr,
+                                      uint8_t *buf,
+                                      target_phys_addr_t len)
+{
+    bm->rw(bm->opaque, addr, buf, len, 1);
+}
+
 static inline IDEState *idebus_active_if(IDEBus *bus)
 {
     return bus->ifs + bus->unit;
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 4d95cc5..5879044 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -183,4 +183,11 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
             continue;
         ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
     }
+
+    for (i = 0; i < 2; i++) {
+        d->bmdma[i].rw = (void *) pci_memory_rw;
+        d->bmdma[i].map = (void *) pci_memory_map;
+        d->bmdma[i].unmap = (void *) pci_memory_unmap;
+        d->bmdma[i].opaque = dev;
+    }
 }
-- 
1.7.1


WARNING: multiple messages have this Message-ID (diff)
From: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
To: joro@8bytes.org
Cc: kvm@vger.kernel.org, qemu-devel@nongnu.org, blauwirbel@gmail.com,
	paul@codesourcery.com,
	Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>,
	avi@redhat.com
Subject: [Qemu-devel] [PATCH 4/7] ide: use the PCI memory access interface
Date: Sun, 15 Aug 2010 22:27:19 +0300	[thread overview]
Message-ID: <1281900442-29971-5-git-send-email-eduard.munteanu@linux360.ro> (raw)
In-Reply-To: <1281900442-29971-1-git-send-email-eduard.munteanu@linux360.ro>

Emulated PCI IDE controllers now use the memory access interface. This
also allows an emulated IOMMU to translate and check accesses.

Map invalidation results in cancelling DMA transfers. Since the guest OS
can't properly recover the DMA results in case the mapping is changed,
this is a fairly good approximation.

Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
---
 dma-helpers.c     |   46 +++++++++++++++++++++++++++++++++++++++++-----
 dma.h             |   21 ++++++++++++++++++++-
 hw/ide/core.c     |   15 ++++++++-------
 hw/ide/internal.h |   39 +++++++++++++++++++++++++++++++++++++++
 hw/ide/pci.c      |    7 +++++++
 5 files changed, 115 insertions(+), 13 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index d4fc077..9c3a21a 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -10,12 +10,36 @@
 #include "dma.h"
 #include "block_int.h"
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
+static void *qemu_sglist_default_map(void *opaque,
+                                     QEMUSGInvalMapFunc *inval_cb,
+                                     void *inval_opaque,
+                                     target_phys_addr_t addr,
+                                     target_phys_addr_t *len,
+                                     int is_write)
+{
+    return cpu_physical_memory_map(addr, len, is_write);
+}
+
+static void qemu_sglist_default_unmap(void *opaque,
+                                      void *buffer,
+                                      target_phys_addr_t len,
+                                      int is_write,
+                                      target_phys_addr_t access_len)
+{
+    cpu_physical_memory_unmap(buffer, len, is_write, access_len);
+}
+
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
+                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, void *opaque)
 {
     qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
     qsg->nsg = 0;
     qsg->nalloc = alloc_hint;
     qsg->size = 0;
+
+    qsg->map = map ? map : qemu_sglist_default_map;
+    qsg->unmap = unmap ? unmap : qemu_sglist_default_unmap;
+    qsg->opaque = opaque;
 }
 
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
@@ -73,12 +97,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
     int i;
 
     for (i = 0; i < dbs->iov.niov; ++i) {
-        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
-                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
-                                  dbs->iov.iov[i].iov_len);
+        dbs->sg->unmap(dbs->sg->opaque,
+                       dbs->iov.iov[i].iov_base,
+                       dbs->iov.iov[i].iov_len, !dbs->is_write,
+                       dbs->iov.iov[i].iov_len);
     }
 }
 
+static void dma_bdrv_cancel(void *opaque)
+{
+    DMAAIOCB *dbs = opaque;
+
+    bdrv_aio_cancel(dbs->acb);
+    dma_bdrv_unmap(dbs);
+    qemu_iovec_destroy(&dbs->iov);
+    qemu_aio_release(dbs);
+}
+
 static void dma_bdrv_cb(void *opaque, int ret)
 {
     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
@@ -100,7 +135,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
     while (dbs->sg_cur_index < dbs->sg->nsg) {
         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+        mem = dbs->sg->map(dbs->sg->opaque, dma_bdrv_cancel, dbs,
+                           cur_addr, &cur_len, !dbs->is_write);
         if (!mem)
             break;
         qemu_iovec_add(&dbs->iov, mem, cur_len);
diff --git a/dma.h b/dma.h
index f3bb275..d48f35c 100644
--- a/dma.h
+++ b/dma.h
@@ -15,6 +15,19 @@
 #include "hw/hw.h"
 #include "block.h"
 
+typedef void QEMUSGInvalMapFunc(void *opaque);
+typedef void *QEMUSGMapFunc(void *opaque,
+                            QEMUSGInvalMapFunc *inval_cb,
+                            void *inval_opaque,
+                            target_phys_addr_t addr,
+                            target_phys_addr_t *len,
+                            int is_write);
+typedef void QEMUSGUnmapFunc(void *opaque,
+                             void *buffer,
+                             target_phys_addr_t len,
+                             int is_write,
+                             target_phys_addr_t access_len);
+
 typedef struct {
     target_phys_addr_t base;
     target_phys_addr_t len;
@@ -25,9 +38,15 @@ typedef struct {
     int nsg;
     int nalloc;
     target_phys_addr_t size;
+
+    QEMUSGMapFunc *map;
+    QEMUSGUnmapFunc *unmap;
+    void *opaque;
 } QEMUSGList;
 
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
+                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap,
+                      void *opaque);
 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
                      target_phys_addr_t len);
 void qemu_sglist_destroy(QEMUSGList *qsg);
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 0b3b7c2..c19013a 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -435,7 +435,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
     } prd;
     int l, len;
 
-    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1);
+    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1,
+                     bm->map, bm->unmap, bm->opaque);
     s->io_buffer_size = 0;
     for(;;) {
         if (bm->cur_prd_len == 0) {
@@ -443,7 +444,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
             if (bm->cur_prd_last ||
                 (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
                 return s->io_buffer_size != 0;
-            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
             bm->cur_addr += 8;
             prd.addr = le32_to_cpu(prd.addr);
             prd.size = le32_to_cpu(prd.size);
@@ -526,7 +527,7 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
             if (bm->cur_prd_last ||
                 (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
                 return 0;
-            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
+            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
             bm->cur_addr += 8;
             prd.addr = le32_to_cpu(prd.addr);
             prd.size = le32_to_cpu(prd.size);
@@ -541,11 +542,11 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
             l = bm->cur_prd_len;
         if (l > 0) {
             if (is_write) {
-                cpu_physical_memory_write(bm->cur_prd_addr,
-                                          s->io_buffer + s->io_buffer_index, l);
+                bmdma_memory_write(bm, bm->cur_prd_addr,
+                                   s->io_buffer + s->io_buffer_index, l);
             } else {
-                cpu_physical_memory_read(bm->cur_prd_addr,
-                                          s->io_buffer + s->io_buffer_index, l);
+                bmdma_memory_read(bm, bm->cur_prd_addr,
+                                  s->io_buffer + s->io_buffer_index, l);
             }
             bm->cur_prd_addr += l;
             bm->cur_prd_len -= l;
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index eef1ee1..0f3b707 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -476,6 +476,24 @@ struct IDEDeviceInfo {
 #define BM_CMD_START     0x01
 #define BM_CMD_READ      0x08
 
+typedef void BMDMAInvalMapFunc(void *opaque);
+typedef void BMDMARWFunc(void *opaque,
+                         target_phys_addr_t addr,
+                         uint8_t *buf,
+                         target_phys_addr_t len,
+                         int is_write);
+typedef void *BMDMAMapFunc(void *opaque,
+                           BMDMAInvalMapFunc *inval_cb,
+                           void *inval_opaque,
+                           target_phys_addr_t addr,
+                           target_phys_addr_t *len,
+                           int is_write);
+typedef void BMDMAUnmapFunc(void *opaque,
+                            void *buffer,
+                            target_phys_addr_t len,
+                            int is_write,
+                            target_phys_addr_t access_len);
+
 struct BMDMAState {
     uint8_t cmd;
     uint8_t status;
@@ -495,8 +513,29 @@ struct BMDMAState {
     int64_t sector_num;
     uint32_t nsector;
     QEMUBH *bh;
+
+    BMDMARWFunc *rw;
+    BMDMAMapFunc *map;
+    BMDMAUnmapFunc *unmap;
+    void *opaque;
 };
 
+static inline void bmdma_memory_read(BMDMAState *bm,
+                                     target_phys_addr_t addr,
+                                     uint8_t *buf,
+                                     target_phys_addr_t len)
+{
+    bm->rw(bm->opaque, addr, buf, len, 0);
+}
+
+static inline void bmdma_memory_write(BMDMAState *bm,
+                                      target_phys_addr_t addr,
+                                      uint8_t *buf,
+                                      target_phys_addr_t len)
+{
+    bm->rw(bm->opaque, addr, buf, len, 1);
+}
+
 static inline IDEState *idebus_active_if(IDEBus *bus)
 {
     return bus->ifs + bus->unit;
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 4d95cc5..5879044 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -183,4 +183,11 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
             continue;
         ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
     }
+
+    for (i = 0; i < 2; i++) {
+        d->bmdma[i].rw = (void *) pci_memory_rw;
+        d->bmdma[i].map = (void *) pci_memory_map;
+        d->bmdma[i].unmap = (void *) pci_memory_unmap;
+        d->bmdma[i].opaque = dev;
+    }
 }
-- 
1.7.1

  parent reply	other threads:[~2010-08-15 19:30 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-08-15 19:27 [PATCH 0/7] AMD IOMMU emulation patches v3 Eduard - Gabriel Munteanu
2010-08-15 19:27 ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-15 19:27 ` [PATCH 1/7] pci: add range_covers_range() Eduard - Gabriel Munteanu
2010-08-15 19:27   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-18  4:39   ` Isaku Yamahata
2010-08-18  4:39     ` Isaku Yamahata
2010-08-15 19:27 ` [PATCH 2/7] pci: memory access API and IOMMU support Eduard - Gabriel Munteanu
2010-08-15 19:27   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-18  5:02   ` Isaku Yamahata
2010-08-18  5:02     ` Isaku Yamahata
2010-08-15 19:27 ` [PATCH 3/7] AMD IOMMU emulation Eduard - Gabriel Munteanu
2010-08-15 19:27   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-16 17:57   ` Blue Swirl
2010-08-16 17:57     ` [Qemu-devel] " Blue Swirl
2010-08-15 19:27 ` Eduard - Gabriel Munteanu [this message]
2010-08-15 19:27   ` [Qemu-devel] [PATCH 4/7] ide: use the PCI memory access interface Eduard - Gabriel Munteanu
2010-08-15 19:27 ` [PATCH 5/7] rtl8139: " Eduard - Gabriel Munteanu
2010-08-15 19:27   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-15 19:27 ` [PATCH 6/7] eepro100: " Eduard - Gabriel Munteanu
2010-08-15 19:27   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-15 19:27 ` [PATCH 7/7] ac97: " Eduard - Gabriel Munteanu
2010-08-15 19:27   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-15 20:42   ` malc
2010-08-15 20:42     ` malc
2010-08-16  1:47 ` [PATCH 0/7] AMD IOMMU emulation patches v3 Anthony Liguori
2010-08-16  1:47   ` [Qemu-devel] " Anthony Liguori
2010-08-28 14:54 [PATCH 0/7] AMD IOMMU emulation patchset v4 Eduard - Gabriel Munteanu
2010-08-28 14:54 ` [PATCH 4/7] ide: use the PCI memory access interface Eduard - Gabriel Munteanu
2010-09-02  5:19   ` Michael S. Tsirkin
2010-09-02  9:12     ` Eduard - Gabriel Munteanu
2010-09-02  9:58       ` Michael S. Tsirkin
2010-09-02 15:01         ` Eduard - Gabriel Munteanu
2010-09-02 15:24           ` Avi Kivity
2010-09-02 15:39             ` Michael S. Tsirkin
2010-09-02 16:07               ` Avi Kivity
2010-09-02 15:31           ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1281900442-29971-5-git-send-email-eduard.munteanu@linux360.ro \
    --to=eduard.munteanu@linux360.ro \
    --cc=anthony@codemonkey.ws \
    --cc=avi@redhat.com \
    --cc=blauwirbel@gmail.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=paul@codesourcery.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.