KVM Archive on lore.kernel.org
 help / color / Atom feed
From: Alex Williamson <alex.williamson@redhat.com>
To: yan.y.zhao@intel.com
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 3/3] vfio/type1: Introduce pfn_list mutex
Date: Thu, 16 Jan 2020 11:18:13 -0700
Message-ID: <157919869385.21002.5744246004583751102.stgit@gimli.home> (raw)
In-Reply-To: <157919849533.21002.4782774695733669879.stgit@gimli.home>

We can promote external page {un}pinning to a reader lock, allowing
concurrency since these don't change the vfio_iommu state.  We do need
to protect the vpfn list per vfio_dma in place of that serialization
though.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
 drivers/vfio/vfio_iommu_type1.c |   24 +++++++++++++++++++-----
 1 file changed, 19 insertions(+), 5 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e78067cc74b3..ea63306c16f7 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -90,6 +90,7 @@ struct vfio_dma {
 	bool			iommu_mapped;
 	bool			lock_cap;	/* capable(CAP_IPC_LOCK) */
 	struct task_struct	*task;
+	struct mutex		pfn_list_lock;
 	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
 };
 
@@ -539,7 +540,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	if (!iommu->v2)
 		return -EACCES;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 
 	/* Fail if notifier list is empty */
 	if (!iommu->notifier.head) {
@@ -570,8 +571,11 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 			goto pin_unwind;
 		}
 
+		mutex_lock(&dma->pfn_list_lock);
+
 		vpfn = vfio_iova_get_vfio_pfn(dma, iova);
 		if (vpfn) {
+			mutex_unlock(&dma->pfn_list_lock);
 			phys_pfn[i] = vpfn->pfn;
 			continue;
 		}
@@ -579,14 +583,19 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 		remote_vaddr = dma->vaddr + iova - dma->iova;
 		ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
 					     do_accounting);
-		if (ret)
+		if (ret) {
+			mutex_unlock(&dma->pfn_list_lock);
 			goto pin_unwind;
+		}
 
 		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
 		if (ret) {
 			vfio_unpin_page_external(dma, iova, do_accounting);
+			mutex_unlock(&dma->pfn_list_lock);
 			goto pin_unwind;
 		}
+
+		mutex_unlock(&dma->pfn_list_lock);
 	}
 
 	ret = i;
@@ -599,11 +608,13 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 
 		iova = user_pfn[j] << PAGE_SHIFT;
 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
+		mutex_lock(&dma->pfn_list_lock);
 		vfio_unpin_page_external(dma, iova, do_accounting);
+		mutex_unlock(&dma->pfn_list_lock);
 		phys_pfn[j] = 0;
 	}
 pin_done:
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 	return ret;
 }
 
@@ -622,7 +633,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 	if (!iommu->v2)
 		return -EACCES;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 
 	do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
 	for (i = 0; i < npage; i++) {
@@ -633,11 +644,13 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
 		if (!dma)
 			goto unpin_exit;
+		mutex_lock(&dma->pfn_list_lock);
 		vfio_unpin_page_external(dma, iova, do_accounting);
+		mutex_unlock(&dma->pfn_list_lock);
 	}
 
 unpin_exit:
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 	return i > npage ? npage : (i > 0 ? i : -EINVAL);
 }
 
@@ -1109,6 +1122,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 	dma->iova = iova;
 	dma->vaddr = vaddr;
 	dma->prot = prot;
+	mutex_init(&dma->pfn_list_lock);
 
 	/*
 	 * We need to be able to both add to a task's locked memory and test


  parent reply index

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-16 18:17 [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Alex Williamson
2020-01-16 18:17 ` [RFC PATCH 1/3] vfio/type1: Convert vfio_iommu.lock from mutex to rwsem Alex Williamson
2020-01-16 18:18 ` [RFC PATCH 2/3] vfio/type1: Replace obvious read lock instances Alex Williamson
2020-01-16 18:18 ` Alex Williamson [this message]
2020-01-17  1:10 ` [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Yan Zhao

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=157919869385.21002.5744246004583751102.stgit@gimli.home \
    --to=alex.williamson@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=yan.y.zhao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

KVM Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/kvm/0 kvm/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 kvm kvm/ https://lore.kernel.org/kvm \
		kvm@vger.kernel.org
	public-inbox-index kvm

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.kvm


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git