From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([140.186.70.92]:43010) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1R1gUA-0004YI-SB for qemu-devel@nongnu.org; Thu, 08 Sep 2011 11:25:54 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1R1gU0-0000wp-Vx for qemu-devel@nongnu.org; Thu, 08 Sep 2011 11:25:50 -0400 Received: from mail-ew0-f45.google.com ([209.85.215.45]:51390) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1R1gU0-0000tp-Ol for qemu-devel@nongnu.org; Thu, 08 Sep 2011 11:25:40 -0400 Received: by mail-ew0-f45.google.com with SMTP id 24so460535ewy.4 for ; Thu, 08 Sep 2011 08:25:40 -0700 (PDT) Sender: Paolo Bonzini From: Paolo Bonzini Date: Thu, 8 Sep 2011 17:25:05 +0200 Message-Id: <1315495505-28906-13-git-send-email-pbonzini@redhat.com> In-Reply-To: <1315495505-28906-1-git-send-email-pbonzini@redhat.com> References: <1315495505-28906-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH 12/12] nbd: split requests List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org qemu-nbd has a limit of slightly less than 1M per request. Work around this in the nbd block driver. Signed-off-by: Paolo Bonzini --- block/nbd.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 files changed, 46 insertions(+), 6 deletions(-) diff --git a/block/nbd.c b/block/nbd.c index 5a75263..468a517 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -213,8 +213,9 @@ static int nbd_open(BlockDriverState *bs, const char* filename, int flags) return result; } -static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) +static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov, + int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; @@ -241,7 +242,7 @@ static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, reply.error = EIO; goto done; } - if (qemu_co_recvv(s->sock, qiov->iov, request.len, 0) != request.len) { + if (qemu_co_recvv(s->sock, qiov->iov, request.len, offset) != request.len) { reply.error = EIO; } @@ -251,8 +252,9 @@ done: } -static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) +static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov, + int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; @@ -273,7 +275,7 @@ static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, reply.error = errno; goto done; } - ret = qemu_co_sendv(s->sock, qiov->iov, request.len, 0); + ret = qemu_co_sendv(s->sock, qiov->iov, request.len, offset); if (ret != request.len) { reply.error = EIO; goto done; @@ -291,6 +293,44 @@ done: return -reply.error; } +/* qemu-nbd has a limit of slightly less than 1M per request. For safety, + * transfer at most 512K per request. */ +#define NBD_MAX_SECTORS 1024 + +static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + int offset = 0; + int ret; + while (nb_sectors > NBD_MAX_SECTORS) { + ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); + if (ret < 0) { + return ret; + } + offset += NBD_MAX_SECTORS * 512; + sector_num += NBD_MAX_SECTORS; + nb_sectors -= NBD_MAX_SECTORS; + } + return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset); +} + +static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + int offset = 0; + int ret; + while (nb_sectors > NBD_MAX_SECTORS) { + ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); + if (ret < 0) { + return ret; + } + offset += NBD_MAX_SECTORS * 512; + sector_num += NBD_MAX_SECTORS; + nb_sectors -= NBD_MAX_SECTORS; + } + return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset); +} + static int nbd_co_flush(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; -- 1.7.6