linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Liu Yuan <namei.unix@gmail.com>
To: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Stefan Hajnoczi <stefanha@gmail.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Rusty Russell <rusty@rustcorp.com.au>,
	Avi Kivity <avi@redhat.com>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Khoa Huynh <khoa@us.ibm.com>
Subject: Re: [RFC PATCH]vhost-blk: In-kernel accelerator for virtio block device
Date: Fri, 05 Aug 2011 19:04:45 +0800	[thread overview]
Message-ID: <4E3BCE4D.7090809@gmail.com> (raw)
In-Reply-To: <1312495132.9603.4.camel@badari-desktop>

[-- Attachment #1: Type: text/plain, Size: 993 bytes --]

On 08/05/2011 05:58 AM, Badari Pulavarty wrote:
> Hi Liu Yuan,
>
> I started testing your patches. I applied your kernel patch to 3.0
> and applied QEMU to latest git.
>
> I passed 6 blockdevices from the host to guest (4 vcpu, 4GB RAM).
> I ran simple "dd" read tests from the guest on all block devices
> (with various blocksizes, iflag=direct).
>
> Unfortunately, system doesn't stay up. I immediately get into
> panic on the host. I didn't get time to debug the problem. Wondering
> if you have seen this issue before and/or you have new patchset
> to try ?
>
> Let me know.
>
> Thanks,
> Badari
>

Okay, it is actually a bug pointed out by MST on the other thread, that 
it needs a mutex for completion thread.

Now would you please this attachment?This patch only applies to kernel 
part, on top of v1 kernel patch.

This patch mainly moves completion thread into vhost thread as a 
function. As a result, both requests submitting and completion 
signalling is in the same thread.

Yuan

[-- Attachment #2: vblk-for-kernel-2.patch --]
[-- Type: text/x-patch, Size: 3505 bytes --]

diff --git a/drivers/vhost/blk.c b/drivers/vhost/blk.c
index ecaf6fe..5cba543 100644
--- a/drivers/vhost/blk.c
+++ b/drivers/vhost/blk.c
@@ -47,6 +47,7 @@ struct vhost_blk {
 	struct eventfd_ctx *ectx;
 	struct file *efile;
 	struct task_struct *worker;
+	struct vhost_poll poll;
 };
 
 struct used_info {
@@ -62,6 +63,7 @@ static struct kmem_cache *used_info_cachep;
 static void blk_flush(struct vhost_blk *blk)
 {
        vhost_poll_flush(&blk->vq.poll);
+       vhost_poll_flush(&blk->poll);
 }
 
 static long blk_set_features(struct vhost_blk *blk, u64 features)
@@ -146,11 +148,11 @@ static long blk_reset_owner(struct vhost_blk *b)
         blk_stop(b);
         blk_flush(b);
         ret = vhost_dev_reset_owner(&b->dev);
-	if (b->worker) {
-		b->should_stop = 1;
-		smp_mb();
-		eventfd_signal(b->ectx, 1);
-	}
+//	if (b->worker) {
+//		b->should_stop = 1;
+//		smp_mb();
+//		eventfd_signal(b->ectx, 1);
+//	}
 err:
         mutex_unlock(&b->dev.mutex);
         return ret;
@@ -323,6 +325,7 @@ static void completion_thread_destory(struct vhost_blk *blk)
 
 static long blk_set_owner(struct vhost_blk *blk)
 {
+	eventfd_signal(blk->ectx, 1);
 	return completion_thread_setup(blk);
 }
 
@@ -361,8 +364,8 @@ static long vhost_blk_ioctl(struct file *f, unsigned int ioctl,
 		default:
 			mutex_lock(&blk->dev.mutex);
 			ret = vhost_dev_ioctl(&blk->dev, ioctl, arg);
-			if (!ret && ioctl == VHOST_SET_OWNER)
-				ret = blk_set_owner(blk);
+//			if (!ret && ioctl == VHOST_SET_OWNER)
+//				ret = blk_set_owner(blk);
 			blk_flush(blk);
 			mutex_unlock(&blk->dev.mutex);
 			break;
@@ -480,10 +483,51 @@ static void handle_guest_kick(struct vhost_work *work)
 	handle_kick(blk);
 }
 
+static void handle_completetion(struct vhost_work* work)
+{
+	struct vhost_blk *blk = container_of(work, struct vhost_blk, poll.work);
+	struct timespec ts = { 0 };
+	int ret, i, nr;
+	u64 count;
+
+	do {
+		ret = eventfd_ctx_read(blk->ectx, 1, &count);
+	} while (unlikely(ret == -ERESTARTSYS));
+
+	do {
+		nr = kernel_read_events(blk->ioctx, count, MAX_EVENTS, events, &ts);
+	} while (unlikely(nr == -EINTR));
+	dprintk("%s, count %llu, nr %d\n", __func__, count, nr);
+
+	if (unlikely(nr <= 0))
+		return;
+
+	for (i = 0; i < nr; i++) {
+		struct used_info *u = (struct used_info *)events[i].obj;
+		int len, status;
+
+		dprintk("%s, head %d complete in %d\n", __func__, u->head, i);
+		len = io_event_ret(&events[i]);
+		//status = u->len == len ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR;
+		status = len > 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR;
+		if (copy_to_user(u->status, &status, sizeof status)) {
+			vq_err(&blk->vq, "%s failed to write status\n", __func__);
+			BUG(); /* FIXME: maybe a bit radical? */
+		}
+		vhost_add_used(&blk->vq, u->head, u->len);
+		kfree(u);
+	}
+
+	vhost_signal(&blk->dev, &blk->vq);
+}
+
 static void eventfd_setup(struct vhost_blk *blk)
 {
+	//struct vhost_virtqueue *vq = &blk->vq;
 	blk->efile = eventfd_file_create(0, 0);
 	blk->ectx = eventfd_ctx_fileget(blk->efile);
+	vhost_poll_init(&blk->poll, handle_completetion, POLLIN, &blk->dev);
+	vhost_poll_start(&blk->poll, blk->efile);
 }
 
 static int vhost_blk_open(struct inode *inode, struct file *f)
@@ -528,7 +572,7 @@ static int vhost_blk_release(struct inode *inode, struct file *f)
 	vhost_dev_cleanup(&blk->dev);
 	/* Yet another flush? See comments in vhost_net_release() */
 	blk_flush(blk);
-	completion_thread_destory(blk);
+//	completion_thread_destory(blk);
 	eventfd_destroy(blk);
 	kfree(blk);
 

  parent reply	other threads:[~2011-08-05 11:04 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-07-28 14:29 [RFC PATCH]vhost-blk: In-kernel accelerator for virtio block device Liu Yuan
2011-07-28 14:29 ` [RFC PATCH] vhost-blk: An in-kernel accelerator for virtio-blk Liu Yuan
2011-07-28 14:47   ` Christoph Hellwig
2011-07-29 11:19     ` Liu Yuan
2011-07-28 15:18   ` Stefan Hajnoczi
2011-07-28 15:22   ` Michael S. Tsirkin
2011-07-29 15:09     ` Liu Yuan
2011-08-01  6:25     ` Liu Yuan
2011-08-01  8:12       ` Michael S. Tsirkin
2011-08-01  8:55         ` Liu Yuan
2011-08-01 10:26           ` Michael S. Tsirkin
2011-08-11 19:59     ` Dongsu Park
2011-08-12  8:56       ` Alan Cox
2011-07-28 14:29 ` [RFC PATCH] vhost: Enable vhost-blk support Liu Yuan
2011-07-28 15:44 ` [RFC PATCH]vhost-blk: In-kernel accelerator for virtio block device Stefan Hajnoczi
2011-07-29  4:48   ` Stefan Hajnoczi
2011-07-29  7:59     ` Liu Yuan
2011-07-29 10:55       ` Christoph Hellwig
2011-07-29  7:22   ` Liu Yuan
2011-07-29  9:06     ` Stefan Hajnoczi
2011-07-29 12:01       ` Liu Yuan
2011-07-29 12:29         ` Stefan Hajnoczi
2011-07-29 12:50           ` Stefan Hajnoczi
2011-07-29 14:45             ` Liu Yuan
2011-07-29 14:50               ` Liu Yuan
2011-07-29 15:25         ` Sasha Levin
2011-08-01  8:17           ` Avi Kivity
2011-08-01  9:18             ` Liu Yuan
2011-08-01  9:37               ` Avi Kivity
2011-07-29 18:12     ` Badari Pulavarty
2011-08-01  5:46       ` Liu Yuan
2011-08-01  8:12         ` Christoph Hellwig
2011-08-04 21:58         ` Badari Pulavarty
2011-08-05  7:56           ` Liu Yuan
2011-08-05 11:04           ` Liu Yuan [this message]
2011-08-05 18:02             ` Badari Pulavarty
2011-08-08  1:35               ` Liu Yuan
2011-08-08  5:04                 ` Badari Pulavarty
2011-08-08  7:31                   ` Liu Yuan
2011-08-08 17:16                     ` Badari Pulavarty
2011-08-10  2:19                       ` Liu Yuan
2011-08-10 20:37                         ` Badari Pulavarty

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4E3BCE4D.7090809@gmail.com \
    --to=namei.unix@gmail.com \
    --cc=avi@redhat.com \
    --cc=khoa@us.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=pbadari@us.ibm.com \
    --cc=rusty@rustcorp.com.au \
    --cc=stefanha@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).