All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: joro@8bytes.org, blauwirbel@gmail.com, paul@codesourcery.com,
	avi@redhat.com, anthony@codemonkey.ws, av1474@comtv.ru,
	yamahata@valinux.co.jp, kvm@vger.kernel.org,
	qemu-devel@nongnu.org
Subject: Re: [PATCH 4/7] ide: use the PCI memory access interface
Date: Thu, 2 Sep 2010 08:19:11 +0300	[thread overview]
Message-ID: <20100902051911.GA4273@redhat.com> (raw)
In-Reply-To: <1283007298-10942-5-git-send-email-eduard.munteanu@linux360.ro>

On Sat, Aug 28, 2010 at 05:54:55PM +0300, Eduard - Gabriel Munteanu wrote:
> Emulated PCI IDE controllers now use the memory access interface. This
> also allows an emulated IOMMU to translate and check accesses.
> 
> Map invalidation results in cancelling DMA transfers. Since the guest OS
> can't properly recover the DMA results in case the mapping is changed,
> this is a fairly good approximation.
> 
> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
> ---
>  dma-helpers.c     |   46 +++++++++++++++++++++++++++++++++++++++++-----
>  dma.h             |   21 ++++++++++++++++++++-
>  hw/ide/core.c     |   15 ++++++++-------
>  hw/ide/internal.h |   39 +++++++++++++++++++++++++++++++++++++++
>  hw/ide/macio.c    |    4 ++--
>  hw/ide/pci.c      |    7 +++++++
>  6 files changed, 117 insertions(+), 15 deletions(-)
> 
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 712ed89..a0dcdb8 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -10,12 +10,36 @@
>  #include "dma.h"
>  #include "block_int.h"
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
> +static void *qemu_sglist_default_map(void *opaque,
> +                                     QEMUSGInvalMapFunc *inval_cb,
> +                                     void *inval_opaque,
> +                                     target_phys_addr_t addr,
> +                                     target_phys_addr_t *len,
> +                                     int is_write)
> +{
> +    return cpu_physical_memory_map(addr, len, is_write);
> +}
> +
> +static void qemu_sglist_default_unmap(void *opaque,
> +                                      void *buffer,
> +                                      target_phys_addr_t len,
> +                                      int is_write,
> +                                      target_phys_addr_t access_len)
> +{
> +    cpu_physical_memory_unmap(buffer, len, is_write, access_len);
> +}
> +
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
> +                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, void *opaque)
>  {
>      qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
>      qsg->nsg = 0;
>      qsg->nalloc = alloc_hint;
>      qsg->size = 0;
> +
> +    qsg->map = map ? map : qemu_sglist_default_map;
> +    qsg->unmap = unmap ? unmap : qemu_sglist_default_unmap;
> +    qsg->opaque = opaque;
>  }
>  
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
> @@ -73,12 +97,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
>      int i;
>  
>      for (i = 0; i < dbs->iov.niov; ++i) {
> -        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
> -                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
> -                                  dbs->iov.iov[i].iov_len);
> +        dbs->sg->unmap(dbs->sg->opaque,
> +                       dbs->iov.iov[i].iov_base,
> +                       dbs->iov.iov[i].iov_len, !dbs->is_write,
> +                       dbs->iov.iov[i].iov_len);
>      }
>  }
>  
> +static void dma_bdrv_cancel(void *opaque)
> +{
> +    DMAAIOCB *dbs = opaque;
> +
> +    bdrv_aio_cancel(dbs->acb);
> +    dma_bdrv_unmap(dbs);
> +    qemu_iovec_destroy(&dbs->iov);
> +    qemu_aio_release(dbs);
> +}
> +
>  static void dma_bdrv_cb(void *opaque, int ret)
>  {
>      DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> @@ -100,7 +135,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
>      while (dbs->sg_cur_index < dbs->sg->nsg) {
>          cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
>          cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
> -        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
> +        mem = dbs->sg->map(dbs->sg->opaque, dma_bdrv_cancel, dbs,
> +                           cur_addr, &cur_len, !dbs->is_write);
>          if (!mem)
>              break;
>          qemu_iovec_add(&dbs->iov, mem, cur_len);
> diff --git a/dma.h b/dma.h
> index f3bb275..d48f35c 100644
> --- a/dma.h
> +++ b/dma.h
> @@ -15,6 +15,19 @@
>  #include "hw/hw.h"
>  #include "block.h"
>  
> +typedef void QEMUSGInvalMapFunc(void *opaque);
> +typedef void *QEMUSGMapFunc(void *opaque,
> +                            QEMUSGInvalMapFunc *inval_cb,
> +                            void *inval_opaque,
> +                            target_phys_addr_t addr,
> +                            target_phys_addr_t *len,
> +                            int is_write);
> +typedef void QEMUSGUnmapFunc(void *opaque,
> +                             void *buffer,
> +                             target_phys_addr_t len,
> +                             int is_write,
> +                             target_phys_addr_t access_len);
> +
>  typedef struct {
>      target_phys_addr_t base;
>      target_phys_addr_t len;
> @@ -25,9 +38,15 @@ typedef struct {
>      int nsg;
>      int nalloc;
>      target_phys_addr_t size;
> +
> +    QEMUSGMapFunc *map;
> +    QEMUSGUnmapFunc *unmap;
> +    void *opaque;
>  } QEMUSGList;
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
> +                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap,
> +                      void *opaque);
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
>                       target_phys_addr_t len);
>  void qemu_sglist_destroy(QEMUSGList *qsg);
> diff --git a/hw/ide/core.c b/hw/ide/core.c
> index af52c2c..024a125 100644
> --- a/hw/ide/core.c
> +++ b/hw/ide/core.c
> @@ -436,7 +436,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
>      } prd;
>      int l, len;
>  
> -    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1);
> +    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1,
> +                     bm->map, bm->unmap, bm->opaque);
>      s->io_buffer_size = 0;
>      for(;;) {
>          if (bm->cur_prd_len == 0) {
> @@ -444,7 +445,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
>                  return s->io_buffer_size != 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -527,7 +528,7 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
>                  return 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -542,11 +543,11 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
>              l = bm->cur_prd_len;
>          if (l > 0) {
>              if (is_write) {
> -                cpu_physical_memory_write(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, l);
> +                bmdma_memory_write(bm, bm->cur_prd_addr,
> +                                   s->io_buffer + s->io_buffer_index, l);
>              } else {
> -                cpu_physical_memory_read(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, l);
> +                bmdma_memory_read(bm, bm->cur_prd_addr,
> +                                  s->io_buffer + s->io_buffer_index, l);
>              }
>              bm->cur_prd_addr += l;
>              bm->cur_prd_len -= l;
> diff --git a/hw/ide/internal.h b/hw/ide/internal.h
> index 4165543..f686d38 100644
> --- a/hw/ide/internal.h
> +++ b/hw/ide/internal.h
> @@ -477,6 +477,24 @@ struct IDEDeviceInfo {
>  #define BM_CMD_START     0x01
>  #define BM_CMD_READ      0x08
>  
> +typedef void BMDMAInvalMapFunc(void *opaque);
> +typedef void BMDMARWFunc(void *opaque,
> +                         target_phys_addr_t addr,
> +                         uint8_t *buf,
> +                         target_phys_addr_t len,
> +                         int is_write);
> +typedef void *BMDMAMapFunc(void *opaque,
> +                           BMDMAInvalMapFunc *inval_cb,
> +                           void *inval_opaque,
> +                           target_phys_addr_t addr,
> +                           target_phys_addr_t *len,
> +                           int is_write);
> +typedef void BMDMAUnmapFunc(void *opaque,
> +                            void *buffer,
> +                            target_phys_addr_t len,
> +                            int is_write,
> +                            target_phys_addr_t access_len);
> +
>  struct BMDMAState {
>      uint8_t cmd;
>      uint8_t status;
> @@ -496,8 +514,29 @@ struct BMDMAState {
>      int64_t sector_num;
>      uint32_t nsector;
>      QEMUBH *bh;
> +
> +    BMDMARWFunc *rw;
> +    BMDMAMapFunc *map;
> +    BMDMAUnmapFunc *unmap;
> +    void *opaque;
>  };
>  
> +static inline void bmdma_memory_read(BMDMAState *bm,
> +                                     target_phys_addr_t addr,
> +                                     uint8_t *buf,
> +                                     target_phys_addr_t len)
> +{
> +    bm->rw(bm->opaque, addr, buf, len, 0);
> +}
> +
> +static inline void bmdma_memory_write(BMDMAState *bm,
> +                                      target_phys_addr_t addr,
> +                                      uint8_t *buf,
> +                                      target_phys_addr_t len)
> +{
> +    bm->rw(bm->opaque, addr, buf, len, 1);
> +}
> +

Here again, I am concerned about indirection and pointer chaising on data path.
Can we have an iommu pointer in the device, and do a fast path in case
there is no iommu?

>  static inline IDEState *idebus_active_if(IDEBus *bus)
>  {
>      return bus->ifs + bus->unit;
> diff --git a/hw/ide/macio.c b/hw/ide/macio.c
> index bd1c73e..962ae13 100644
> --- a/hw/ide/macio.c
> +++ b/hw/ide/macio.c
> @@ -79,7 +79,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
>  
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL, NULL, NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> @@ -141,7 +141,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
>      s->io_buffer_index = 0;
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL, NULL, NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> diff --git a/hw/ide/pci.c b/hw/ide/pci.c
> index 4d95cc5..5879044 100644
> --- a/hw/ide/pci.c
> +++ b/hw/ide/pci.c
> @@ -183,4 +183,11 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
>              continue;
>          ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
>      }
> +
> +    for (i = 0; i < 2; i++) {
> +        d->bmdma[i].rw = (void *) pci_memory_rw;
> +        d->bmdma[i].map = (void *) pci_memory_map;
> +        d->bmdma[i].unmap = (void *) pci_memory_unmap;
> +        d->bmdma[i].opaque = dev;
> +    }
>  }

These casts show something is wrong with the API, IMO.

> -- 
> 1.7.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

WARNING: multiple messages have this Message-ID (diff)
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: kvm@vger.kernel.org, joro@8bytes.org, qemu-devel@nongnu.org,
	blauwirbel@gmail.com, yamahata@valinux.co.jp,
	paul@codesourcery.com, avi@redhat.com
Subject: [Qemu-devel] Re: [PATCH 4/7] ide: use the PCI memory access interface
Date: Thu, 2 Sep 2010 08:19:11 +0300	[thread overview]
Message-ID: <20100902051911.GA4273@redhat.com> (raw)
In-Reply-To: <1283007298-10942-5-git-send-email-eduard.munteanu@linux360.ro>

On Sat, Aug 28, 2010 at 05:54:55PM +0300, Eduard - Gabriel Munteanu wrote:
> Emulated PCI IDE controllers now use the memory access interface. This
> also allows an emulated IOMMU to translate and check accesses.
> 
> Map invalidation results in cancelling DMA transfers. Since the guest OS
> can't properly recover the DMA results in case the mapping is changed,
> this is a fairly good approximation.
> 
> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
> ---
>  dma-helpers.c     |   46 +++++++++++++++++++++++++++++++++++++++++-----
>  dma.h             |   21 ++++++++++++++++++++-
>  hw/ide/core.c     |   15 ++++++++-------
>  hw/ide/internal.h |   39 +++++++++++++++++++++++++++++++++++++++
>  hw/ide/macio.c    |    4 ++--
>  hw/ide/pci.c      |    7 +++++++
>  6 files changed, 117 insertions(+), 15 deletions(-)
> 
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 712ed89..a0dcdb8 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -10,12 +10,36 @@
>  #include "dma.h"
>  #include "block_int.h"
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
> +static void *qemu_sglist_default_map(void *opaque,
> +                                     QEMUSGInvalMapFunc *inval_cb,
> +                                     void *inval_opaque,
> +                                     target_phys_addr_t addr,
> +                                     target_phys_addr_t *len,
> +                                     int is_write)
> +{
> +    return cpu_physical_memory_map(addr, len, is_write);
> +}
> +
> +static void qemu_sglist_default_unmap(void *opaque,
> +                                      void *buffer,
> +                                      target_phys_addr_t len,
> +                                      int is_write,
> +                                      target_phys_addr_t access_len)
> +{
> +    cpu_physical_memory_unmap(buffer, len, is_write, access_len);
> +}
> +
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
> +                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, void *opaque)
>  {
>      qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
>      qsg->nsg = 0;
>      qsg->nalloc = alloc_hint;
>      qsg->size = 0;
> +
> +    qsg->map = map ? map : qemu_sglist_default_map;
> +    qsg->unmap = unmap ? unmap : qemu_sglist_default_unmap;
> +    qsg->opaque = opaque;
>  }
>  
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
> @@ -73,12 +97,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
>      int i;
>  
>      for (i = 0; i < dbs->iov.niov; ++i) {
> -        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
> -                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
> -                                  dbs->iov.iov[i].iov_len);
> +        dbs->sg->unmap(dbs->sg->opaque,
> +                       dbs->iov.iov[i].iov_base,
> +                       dbs->iov.iov[i].iov_len, !dbs->is_write,
> +                       dbs->iov.iov[i].iov_len);
>      }
>  }
>  
> +static void dma_bdrv_cancel(void *opaque)
> +{
> +    DMAAIOCB *dbs = opaque;
> +
> +    bdrv_aio_cancel(dbs->acb);
> +    dma_bdrv_unmap(dbs);
> +    qemu_iovec_destroy(&dbs->iov);
> +    qemu_aio_release(dbs);
> +}
> +
>  static void dma_bdrv_cb(void *opaque, int ret)
>  {
>      DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> @@ -100,7 +135,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
>      while (dbs->sg_cur_index < dbs->sg->nsg) {
>          cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
>          cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
> -        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
> +        mem = dbs->sg->map(dbs->sg->opaque, dma_bdrv_cancel, dbs,
> +                           cur_addr, &cur_len, !dbs->is_write);
>          if (!mem)
>              break;
>          qemu_iovec_add(&dbs->iov, mem, cur_len);
> diff --git a/dma.h b/dma.h
> index f3bb275..d48f35c 100644
> --- a/dma.h
> +++ b/dma.h
> @@ -15,6 +15,19 @@
>  #include "hw/hw.h"
>  #include "block.h"
>  
> +typedef void QEMUSGInvalMapFunc(void *opaque);
> +typedef void *QEMUSGMapFunc(void *opaque,
> +                            QEMUSGInvalMapFunc *inval_cb,
> +                            void *inval_opaque,
> +                            target_phys_addr_t addr,
> +                            target_phys_addr_t *len,
> +                            int is_write);
> +typedef void QEMUSGUnmapFunc(void *opaque,
> +                             void *buffer,
> +                             target_phys_addr_t len,
> +                             int is_write,
> +                             target_phys_addr_t access_len);
> +
>  typedef struct {
>      target_phys_addr_t base;
>      target_phys_addr_t len;
> @@ -25,9 +38,15 @@ typedef struct {
>      int nsg;
>      int nalloc;
>      target_phys_addr_t size;
> +
> +    QEMUSGMapFunc *map;
> +    QEMUSGUnmapFunc *unmap;
> +    void *opaque;
>  } QEMUSGList;
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
> +                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap,
> +                      void *opaque);
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
>                       target_phys_addr_t len);
>  void qemu_sglist_destroy(QEMUSGList *qsg);
> diff --git a/hw/ide/core.c b/hw/ide/core.c
> index af52c2c..024a125 100644
> --- a/hw/ide/core.c
> +++ b/hw/ide/core.c
> @@ -436,7 +436,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
>      } prd;
>      int l, len;
>  
> -    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1);
> +    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1,
> +                     bm->map, bm->unmap, bm->opaque);
>      s->io_buffer_size = 0;
>      for(;;) {
>          if (bm->cur_prd_len == 0) {
> @@ -444,7 +445,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
>                  return s->io_buffer_size != 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -527,7 +528,7 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
>                  return 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -542,11 +543,11 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
>              l = bm->cur_prd_len;
>          if (l > 0) {
>              if (is_write) {
> -                cpu_physical_memory_write(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, l);
> +                bmdma_memory_write(bm, bm->cur_prd_addr,
> +                                   s->io_buffer + s->io_buffer_index, l);
>              } else {
> -                cpu_physical_memory_read(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, l);
> +                bmdma_memory_read(bm, bm->cur_prd_addr,
> +                                  s->io_buffer + s->io_buffer_index, l);
>              }
>              bm->cur_prd_addr += l;
>              bm->cur_prd_len -= l;
> diff --git a/hw/ide/internal.h b/hw/ide/internal.h
> index 4165543..f686d38 100644
> --- a/hw/ide/internal.h
> +++ b/hw/ide/internal.h
> @@ -477,6 +477,24 @@ struct IDEDeviceInfo {
>  #define BM_CMD_START     0x01
>  #define BM_CMD_READ      0x08
>  
> +typedef void BMDMAInvalMapFunc(void *opaque);
> +typedef void BMDMARWFunc(void *opaque,
> +                         target_phys_addr_t addr,
> +                         uint8_t *buf,
> +                         target_phys_addr_t len,
> +                         int is_write);
> +typedef void *BMDMAMapFunc(void *opaque,
> +                           BMDMAInvalMapFunc *inval_cb,
> +                           void *inval_opaque,
> +                           target_phys_addr_t addr,
> +                           target_phys_addr_t *len,
> +                           int is_write);
> +typedef void BMDMAUnmapFunc(void *opaque,
> +                            void *buffer,
> +                            target_phys_addr_t len,
> +                            int is_write,
> +                            target_phys_addr_t access_len);
> +
>  struct BMDMAState {
>      uint8_t cmd;
>      uint8_t status;
> @@ -496,8 +514,29 @@ struct BMDMAState {
>      int64_t sector_num;
>      uint32_t nsector;
>      QEMUBH *bh;
> +
> +    BMDMARWFunc *rw;
> +    BMDMAMapFunc *map;
> +    BMDMAUnmapFunc *unmap;
> +    void *opaque;
>  };
>  
> +static inline void bmdma_memory_read(BMDMAState *bm,
> +                                     target_phys_addr_t addr,
> +                                     uint8_t *buf,
> +                                     target_phys_addr_t len)
> +{
> +    bm->rw(bm->opaque, addr, buf, len, 0);
> +}
> +
> +static inline void bmdma_memory_write(BMDMAState *bm,
> +                                      target_phys_addr_t addr,
> +                                      uint8_t *buf,
> +                                      target_phys_addr_t len)
> +{
> +    bm->rw(bm->opaque, addr, buf, len, 1);
> +}
> +

Here again, I am concerned about indirection and pointer chaising on data path.
Can we have an iommu pointer in the device, and do a fast path in case
there is no iommu?

>  static inline IDEState *idebus_active_if(IDEBus *bus)
>  {
>      return bus->ifs + bus->unit;
> diff --git a/hw/ide/macio.c b/hw/ide/macio.c
> index bd1c73e..962ae13 100644
> --- a/hw/ide/macio.c
> +++ b/hw/ide/macio.c
> @@ -79,7 +79,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
>  
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL, NULL, NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> @@ -141,7 +141,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
>      s->io_buffer_index = 0;
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL, NULL, NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> diff --git a/hw/ide/pci.c b/hw/ide/pci.c
> index 4d95cc5..5879044 100644
> --- a/hw/ide/pci.c
> +++ b/hw/ide/pci.c
> @@ -183,4 +183,11 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
>              continue;
>          ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
>      }
> +
> +    for (i = 0; i < 2; i++) {
> +        d->bmdma[i].rw = (void *) pci_memory_rw;
> +        d->bmdma[i].map = (void *) pci_memory_map;
> +        d->bmdma[i].unmap = (void *) pci_memory_unmap;
> +        d->bmdma[i].opaque = dev;
> +    }
>  }

These casts show something is wrong with the API, IMO.

> -- 
> 1.7.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

  reply	other threads:[~2010-09-02  5:25 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-08-28 14:54 [PATCH 0/7] AMD IOMMU emulation patchset v4 Eduard - Gabriel Munteanu
2010-08-28 14:54 ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-28 14:54 ` [PATCH 1/7] pci: expand tabs to spaces in pci_regs.h Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-31 20:29   ` Michael S. Tsirkin
2010-08-31 20:29     ` [Qemu-devel] " Michael S. Tsirkin
2010-08-31 22:58     ` Eduard - Gabriel Munteanu
2010-08-31 22:58       ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-01 10:39       ` Michael S. Tsirkin
2010-09-01 10:39         ` [Qemu-devel] " Michael S. Tsirkin
2010-08-28 14:54 ` [PATCH 2/7] pci: memory access API and IOMMU support Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-02  5:28   ` Michael S. Tsirkin
2010-09-02  5:28     ` [Qemu-devel] " Michael S. Tsirkin
2010-09-02  8:40     ` Eduard - Gabriel Munteanu
2010-09-02  8:40       ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-02  9:49       ` Michael S. Tsirkin
2010-09-02  9:49         ` [Qemu-devel] " Michael S. Tsirkin
2010-09-04  9:01         ` Blue Swirl
2010-09-04  9:01           ` [Qemu-devel] " Blue Swirl
2010-09-05  7:10           ` Michael S. Tsirkin
2010-09-05  7:10             ` [Qemu-devel] " Michael S. Tsirkin
2010-08-28 14:54 ` [PATCH 3/7] AMD IOMMU emulation Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-28 15:58   ` Blue Swirl
2010-08-28 15:58     ` [Qemu-devel] " Blue Swirl
2010-08-28 21:53     ` Eduard - Gabriel Munteanu
2010-08-28 21:53       ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-29 20:37       ` Blue Swirl
2010-08-29 20:37         ` [Qemu-devel] " Blue Swirl
2010-08-30  3:07   ` [Qemu-devel] " Isaku Yamahata
2010-08-30  3:07     ` Isaku Yamahata
2010-08-30  5:54     ` Eduard - Gabriel Munteanu
2010-08-30  5:54       ` Eduard - Gabriel Munteanu
2010-08-28 14:54 ` [PATCH 4/7] ide: use the PCI memory access interface Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-02  5:19   ` Michael S. Tsirkin [this message]
2010-09-02  5:19     ` [Qemu-devel] " Michael S. Tsirkin
2010-09-02  9:12     ` Eduard - Gabriel Munteanu
2010-09-02  9:12       ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-02  9:58       ` Michael S. Tsirkin
2010-09-02  9:58         ` [Qemu-devel] " Michael S. Tsirkin
2010-09-02 15:01         ` Eduard - Gabriel Munteanu
2010-09-02 15:01           ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-02 15:24           ` Avi Kivity
2010-09-02 15:24             ` [Qemu-devel] " Avi Kivity
2010-09-02 15:39             ` Michael S. Tsirkin
2010-09-02 15:39               ` [Qemu-devel] " Michael S. Tsirkin
2010-09-02 16:07               ` Avi Kivity
2010-09-02 16:07                 ` [Qemu-devel] " Avi Kivity
2010-09-02 15:31           ` Michael S. Tsirkin
2010-09-02 15:31             ` [Qemu-devel] " Michael S. Tsirkin
2010-08-28 14:54 ` [PATCH 5/7] rtl8139: " Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-28 14:54 ` [PATCH 6/7] eepro100: " Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-28 14:54 ` [PATCH 7/7] ac97: " Eduard - Gabriel Munteanu
2010-08-28 14:54   ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-28 16:00 ` [PATCH 0/7] AMD IOMMU emulation patchset v4 Blue Swirl
2010-08-28 16:00   ` [Qemu-devel] " Blue Swirl
2010-08-29  9:55   ` Joerg Roedel
2010-08-29  9:55     ` [Qemu-devel] " Joerg Roedel
2010-08-29 20:44     ` Blue Swirl
2010-08-29 20:44       ` [Qemu-devel] " Blue Swirl
2010-08-29 22:08       ` [PATCH 2/7] pci: memory access API and IOMMU support Eduard - Gabriel Munteanu
2010-08-29 22:08         ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-08-29 22:11         ` Eduard - Gabriel Munteanu
2010-08-29 22:11           ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-01 20:10         ` [Qemu-devel] " Stefan Weil
2010-09-01 20:10           ` Stefan Weil
2010-09-02  6:00           ` Michael S. Tsirkin
2010-09-02  6:00             ` Michael S. Tsirkin
2010-09-02  9:08             ` Eduard - Gabriel Munteanu
2010-09-02  9:08               ` Eduard - Gabriel Munteanu
2010-09-02 13:24               ` Anthony Liguori
2010-09-02 13:24                 ` Anthony Liguori
2010-09-02  8:51           ` Eduard - Gabriel Munteanu
2010-09-02  8:51             ` Eduard - Gabriel Munteanu
2010-09-02 16:05             ` Stefan Weil
2010-09-02 16:05               ` Stefan Weil
2010-09-02 16:14               ` Eduard - Gabriel Munteanu
2010-09-02 16:14                 ` Eduard - Gabriel Munteanu
2010-09-13 20:01 ` [PATCH RFC] dma_rw.h (was Re: [PATCH 0/7] AMD IOMMU emulation patchset v4) Michael S. Tsirkin
2010-09-13 20:01   ` [Qemu-devel] " Michael S. Tsirkin
2010-09-13 20:45   ` Anthony Liguori
2010-09-13 20:45     ` Anthony Liguori
2010-09-16  7:12     ` Eduard - Gabriel Munteanu
2010-09-16  7:12       ` Eduard - Gabriel Munteanu
2010-09-16  9:35     ` Michael S. Tsirkin
2010-09-16  9:35       ` Michael S. Tsirkin
2010-09-16  7:06   ` Eduard - Gabriel Munteanu
2010-09-16  7:06     ` [Qemu-devel] " Eduard - Gabriel Munteanu
2010-09-16  9:20     ` Michael S. Tsirkin
2010-09-16  9:20       ` [Qemu-devel] " Michael S. Tsirkin
2010-09-16 11:15       ` Eduard - Gabriel Munteanu
2010-09-16 11:15         ` [Qemu-devel] " Eduard - Gabriel Munteanu
  -- strict thread matches above, loose matches on Subject: below --
2010-08-15 19:27 [PATCH 0/7] AMD IOMMU emulation patches v3 Eduard - Gabriel Munteanu
2010-08-15 19:27 ` [PATCH 4/7] ide: use the PCI memory access interface Eduard - Gabriel Munteanu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100902051911.GA4273@redhat.com \
    --to=mst@redhat.com \
    --cc=anthony@codemonkey.ws \
    --cc=av1474@comtv.ru \
    --cc=avi@redhat.com \
    --cc=blauwirbel@gmail.com \
    --cc=eduard.munteanu@linux360.ro \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=paul@codesourcery.com \
    --cc=qemu-devel@nongnu.org \
    --cc=yamahata@valinux.co.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.