From mboxrd@z Thu Jan 1 00:00:00 1970 From: xiangxia.m.yue@gmail.com Subject: [PATCH net-next v9 1/6] net: vhost: lock the vqs one by one Date: Sun, 9 Sep 2018 04:51:22 -0700 Message-ID: <1536493887-2637-2-git-send-email-xiangxia.m.yue@gmail.com> References: <1536493887-2637-1-git-send-email-xiangxia.m.yue@gmail.com> Cc: virtualization@lists.linux-foundation.org, netdev@vger.kernel.org, Tonghao Zhang To: jasowang@redhat.com, mst@redhat.com, makita.toshiaki@lab.ntt.co.jp Return-path: Received: from mail-pl1-f196.google.com ([209.85.214.196]:40134 "EHLO mail-pl1-f196.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726662AbeIIQmQ (ORCPT ); Sun, 9 Sep 2018 12:42:16 -0400 Received: by mail-pl1-f196.google.com with SMTP id s17-v6so8470674plp.7 for ; Sun, 09 Sep 2018 04:52:50 -0700 (PDT) In-Reply-To: <1536493887-2637-1-git-send-email-xiangxia.m.yue@gmail.com> Sender: netdev-owner@vger.kernel.org List-ID: From: Tonghao Zhang This patch changes the way that lock all vqs at the same, to lock them one by one. It will be used for next patch to avoid the deadlock. Signed-off-by: Tonghao Zhang Acked-by: Jason Wang Signed-off-by: Jason Wang --- drivers/vhost/vhost.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a502f1a..a1c06e7 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) { int i; - for (i = 0; i < d->nvqs; ++i) + for (i = 0; i < d->nvqs; ++i) { + mutex_lock(&d->vqs[i]->mutex); __vhost_vq_meta_reset(d->vqs[i]); + mutex_unlock(&d->vqs[i]->mutex); + } } static void vhost_vq_reset(struct vhost_dev *dev, @@ -890,20 +893,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) -static void vhost_dev_lock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_lock_nested(&d->vqs[i]->mutex, i); -} - -static void vhost_dev_unlock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_unlock(&d->vqs[i]->mutex); -} - static int vhost_new_umem_range(struct vhost_umem *umem, u64 start, u64 size, u64 end, u64 userspace_addr, int perm) @@ -953,7 +942,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, if (msg->iova <= vq_msg->iova && msg->iova + msg->size - 1 > vq_msg->iova && vq_msg->type == VHOST_IOTLB_MISS) { + mutex_lock(&node->vq->mutex); vhost_poll_queue(&node->vq->poll); + mutex_unlock(&node->vq->mutex); + list_del(&node->node); kfree(node); } @@ -985,7 +977,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, int ret = 0; mutex_lock(&dev->mutex); - vhost_dev_lock_vqs(dev); switch (msg->type) { case VHOST_IOTLB_UPDATE: if (!dev->iotlb) { @@ -1019,7 +1010,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, break; } - vhost_dev_unlock_vqs(dev); mutex_unlock(&dev->mutex); return ret; -- 1.8.3.1