From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:46435) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1XWlA7-00065Q-CK for qemu-devel@nongnu.org; Wed, 24 Sep 2014 07:55:17 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1XWlA1-0007ma-4e for qemu-devel@nongnu.org; Wed, 24 Sep 2014 07:55:11 -0400 Received: from mx1.redhat.com ([209.132.183.28]:14668) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1XWlA0-0007dr-Td for qemu-devel@nongnu.org; Wed, 24 Sep 2014 07:55:05 -0400 Received: from int-mx13.intmail.prod.int.phx2.redhat.com (int-mx13.intmail.prod.int.phx2.redhat.com [10.5.11.26]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id s8OBImun008524 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=FAIL) for ; Wed, 24 Sep 2014 07:18:48 -0400 Message-ID: <5422A894.6070806@redhat.com> Date: Wed, 24 Sep 2014 13:18:44 +0200 From: Paolo Bonzini MIME-Version: 1.0 References: <1411547278-25915-1-git-send-email-famz@redhat.com> <1411547278-25915-8-git-send-email-famz@redhat.com> In-Reply-To: <1411547278-25915-8-git-send-email-famz@redhat.com> Content-Type: text/plain; charset=iso-8859-15 Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH v2 7/7] virtio-scsi: Handle TMF request cancellation asynchronously List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Fam Zheng , qemu-devel@nongnu.org Cc: Kevin Wolf , Stefan Hajnoczi Il 24/09/2014 10:27, Fam Zheng ha scritto: > For VIRTIO_SCSI_T_TMF_ABORT_TASK and VIRTIO_SCSI_T_TMF_ABORT_TASK_SET, > use scsi_req_cancel_async to start the cancellation. > > Because each tmf command may cancel multiple requests, we need to use a > counter to track the number of remaining requests we still need to wait > for. > > Signed-off-by: Fam Zheng > --- > hw/scsi/virtio-scsi.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++----- > 1 file changed, 77 insertions(+), 7 deletions(-) > > diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c > index fa36e23..9bd7d8a 100644 > --- a/hw/scsi/virtio-scsi.c > +++ b/hw/scsi/virtio-scsi.c > @@ -208,12 +208,40 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) > return req; > } > > -static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > +typedef struct { > + VirtIOSCSIReq *tmf_req; > + int remaining; > +} VirtIOSCSICancelTracker; What about putting "remaining" directly in VirtIOSCSIReq? > +typedef struct { > + Notifier notifier; > + VirtIOSCSICancelTracker *tracker; > +} VirtIOSCSICancelNotifier; > + > +static void virtio_scsi_cancel_notify(Notifier *notifier, void *data) > +{ > + VirtIOSCSICancelNotifier *n = container_of(notifier, > + VirtIOSCSICancelNotifier, > + notifier); > + > + if (--n->tracker->remaining == 0) { > + virtio_scsi_complete_req(n->tracker->tmf_req); > + g_free(n->tracker); > + } > + g_free(n); > +} > + > +/* Return true if the request is ready to be completed and return to guest; > + * false if the request will be completed (by some other events) later, for > + * example in the case of async cancellation. */ > +static bool virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) Perhaps return 0/-EINPROGRESS so that it's easier to remember the calling convention? > { > SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun); > SCSIRequest *r, *next; > BusChild *kid; > int target; > + bool ret = true; > + int cancel_count; > > if (s->dataplane_started && bdrv_get_aio_context(d->conf.bs) != s->ctx) { > aio_context_acquire(s->ctx); > @@ -251,7 +279,18 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > */ > req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; > } else { > - scsi_req_cancel(r); > + VirtIOSCSICancelNotifier *notifier; > + VirtIOSCSICancelTracker *tracker; > + > + notifier = g_new(VirtIOSCSICancelNotifier, 1); Slice allocator? > + notifier->notifier.notify > + = virtio_scsi_cancel_notify; > + tracker = g_new(VirtIOSCSICancelTracker, 1); Same here if you keep VirtIOSCSICancelTracker. > + tracker->tmf_req = req; > + tracker->remaining = 1; > + notifier->tracker = tracker; > + scsi_req_cancel_async(r, ¬ifier->notifier); > + ret = false; > } > } > break; > @@ -277,6 +316,7 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { > goto incorrect_lun; > } > + cancel_count = 0; > QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { > if (r->hba_private) { > if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) { > @@ -286,10 +326,36 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; > break; > } else { > - scsi_req_cancel(r); > + /* Before we actually cancel any requests in the next for > + * loop, let's count them. This way, if the bus starts > + * calling back to the notifier even before we finish the > + * loop, the counter, which value is already seen in > + * virtio_scsi_cancel_notify, will prevent us from > + * completing the tmf too quickly. */ > + cancel_count++; > } > } > } > + if (cancel_count) { > + VirtIOSCSICancelNotifier *notifier; > + VirtIOSCSICancelTracker *tracker; > + > + tracker = g_new(VirtIOSCSICancelTracker, 1); Same as above. > + tracker->tmf_req = req; > + tracker->remaining = cancel_count; > + > + QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { > + if (r->hba_private) { > + notifier = g_new(VirtIOSCSICancelNotifier, 1); Same as above. > + notifier->notifier.notify > + = virtio_scsi_cancel_notify; > + notifier->tracker = tracker; > + scsi_req_cancel_async(r, ¬ifier->notifier); > + } > + } > + ret = false; > + } > + > break; > > case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: > @@ -310,20 +376,22 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > break; > } > > - return; > + return ret; > > incorrect_lun: > req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN; > - return; > + return ret; > > fail: > req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; > + return ret; > } > > void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) > { > VirtIODevice *vdev = (VirtIODevice *)s; > int type; > + bool should_complete = true; > > if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, > &type, sizeof(type)) < sizeof(type)) { > @@ -337,7 +405,7 @@ void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) > sizeof(VirtIOSCSICtrlTMFResp)) < 0) { > virtio_scsi_bad_req(); > } else { > - virtio_scsi_do_tmf(s, req); > + should_complete = virtio_scsi_do_tmf(s, req); > } > > } else if (req->req.tmf.type == VIRTIO_SCSI_T_AN_QUERY || > @@ -350,7 +418,9 @@ void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) > req->resp.an.response = VIRTIO_SCSI_S_OK; > } > } > - virtio_scsi_complete_req(req); > + if (should_complete) { > + virtio_scsi_complete_req(req); > + } > } > > static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) > Very nice apart from these comments. Paolo