From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:40838) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1S5gwV-0004j2-6D for qemu-devel@nongnu.org; Thu, 08 Mar 2012 12:16:01 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1S5gwO-0004qO-KS for qemu-devel@nongnu.org; Thu, 08 Mar 2012 12:15:54 -0500 Received: from mail-yw0-f45.google.com ([209.85.213.45]:52155) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1S5gwO-0004oW-E2 for qemu-devel@nongnu.org; Thu, 08 Mar 2012 12:15:48 -0500 Received: by mail-yw0-f45.google.com with SMTP id o21so436011yho.4 for ; Thu, 08 Mar 2012 09:15:47 -0800 (PST) Sender: Paolo Bonzini From: Paolo Bonzini Date: Thu, 8 Mar 2012 18:15:09 +0100 Message-Id: <1331226917-6658-10-git-send-email-pbonzini@redhat.com> In-Reply-To: <1331226917-6658-1-git-send-email-pbonzini@redhat.com> References: <1331226917-6658-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [RFC PATCH 09/17] ide: issue discard asynchronously but serialize the pieces List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Now that discard can actually issue writes, make it asynchronous. At the same time, avoid consuming too much memory while it is running. Signed-off-by: Paolo Bonzini --- hw/ide/core.c | 78 ++++++++++++++++++++++++++++++++++++++------------------- 1 files changed, 53 insertions(+), 25 deletions(-) diff --git a/hw/ide/core.c b/hw/ide/core.c index 4d568ac..f12470e 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -312,14 +312,26 @@ typedef struct TrimAIOCB { BlockDriverAIOCB common; QEMUBH *bh; int ret; + QEMUIOVector *qiov; + BlockDriverAIOCB *aiocb; + int i, j; } TrimAIOCB; static void trim_aio_cancel(BlockDriverAIOCB *acb) { TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common); + /* Exit the loop in case bdrv_aio_cancel calls ide_issue_trim_cb again. */ + iocb->j = iocb->qiov->niov - 1; + iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1; + + /* Tell ide_issue_trim_cb not to trigger the completion, too. */ qemu_bh_delete(iocb->bh); iocb->bh = NULL; + + if (iocb->aiocb) { + bdrv_aio_cancel(iocb->aiocb); + } qemu_aio_release(iocb); } @@ -336,43 +346,59 @@ static void ide_trim_bh_cb(void *opaque) qemu_bh_delete(iocb->bh); iocb->bh = NULL; - qemu_aio_release(iocb); } +static void ide_issue_trim_cb(void *opaque, int ret) +{ + TrimAIOCB *iocb = opaque; + if (ret >= 0) { + do { + int j = iocb->j; + if (++iocb->i < iocb->qiov->iov[j].iov_len / 8) { + int i = iocb->i; + uint64_t *buffer = iocb->qiov->iov[j].iov_base; + + /* 6-byte LBA + 2-byte range per entry */ + uint64_t entry = le64_to_cpu(buffer[i]); + uint64_t sector = entry & 0x0000ffffffffffffULL; + uint16_t count = entry >> 48; + + if (count > 0) { + iocb->aiocb = bdrv_aio_discard(iocb->common.bs, + sector, count, + ide_issue_trim_cb, opaque); + return; + } + + /* If count = 0, advance to next iov. */ + } + + iocb->i = -1; + } while (++iocb->j < iocb->qiov->niov); + } else { + iocb->ret = ret; + } + + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); + } +} + BlockDriverAIOCB *ide_issue_trim(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { TrimAIOCB *iocb; - int i, j, ret; iocb = qemu_aio_get(&trim_aio_pool, bs, cb, opaque); iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); iocb->ret = 0; - - for (j = 0; j < qiov->niov; j++) { - uint64_t *buffer = qiov->iov[j].iov_base; - - for (i = 0; i < qiov->iov[j].iov_len / 8; i++) { - /* 6-byte LBA + 2-byte range per entry */ - uint64_t entry = le64_to_cpu(buffer[i]); - uint64_t sector = entry & 0x0000ffffffffffffULL; - uint16_t count = entry >> 48; - - if (count == 0) { - break; - } - - ret = bdrv_discard(bs, sector, count); - if (!iocb->ret) { - iocb->ret = ret; - } - } - } - - qemu_bh_schedule(iocb->bh); - + iocb->qiov = qiov; + iocb->i = -1; + iocb->j = 0; + ide_issue_trim_cb(iocb, 0); return &iocb->common; } -- 1.7.7.6