KVM Archive on lore.kernel.org
 help / color / Atom feed
* [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention
@ 2020-01-16 18:17 Alex Williamson
  2020-01-16 18:17 ` [RFC PATCH 1/3] vfio/type1: Convert vfio_iommu.lock from mutex to rwsem Alex Williamson
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Alex Williamson @ 2020-01-16 18:17 UTC (permalink / raw)
  To: yan.y.zhao; +Cc: kvm, linux-kernel

Hi Yan,

I wonder if this might reduce the lock contention you're seeing in the
vfio_dma_rw series.  These are only compile tested on my end, so I hope
they're not too broken to test.  Thanks,

Alex

---

Alex Williamson (3):
      vfio/type1: Convert vfio_iommu.lock from mutex to rwsem
      vfio/type1: Replace obvious read lock instances
      vfio/type1: Introduce pfn_list mutex


 drivers/vfio/vfio_iommu_type1.c |   67 ++++++++++++++++++++++++---------------
 1 file changed, 41 insertions(+), 26 deletions(-)


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [RFC PATCH 1/3] vfio/type1: Convert vfio_iommu.lock from mutex to rwsem
  2020-01-16 18:17 [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Alex Williamson
@ 2020-01-16 18:17 ` Alex Williamson
  2020-01-16 18:18 ` [RFC PATCH 2/3] vfio/type1: Replace obvious read lock instances Alex Williamson
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: Alex Williamson @ 2020-01-16 18:17 UTC (permalink / raw)
  To: yan.y.zhao; +Cc: kvm, linux-kernel

As a first step reducing lock contention, maintain the same locking
granularity using a rwsem rather than a mutex.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
 drivers/vfio/vfio_iommu_type1.c |   51 ++++++++++++++++++++-------------------
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ada8e6cdb88..7ae58350af5b 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -28,6 +28,7 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/rbtree.h>
+#include <linux/rwsem.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/mm.h>
 #include <linux/slab.h>
@@ -64,7 +65,7 @@ struct vfio_iommu {
 	struct list_head	domain_list;
 	struct list_head	iova_list;
 	struct vfio_domain	*external_domain; /* domain for external user */
-	struct mutex		lock;
+	struct rw_semaphore	lock;
 	struct rb_root		dma_list;
 	struct blocking_notifier_head notifier;
 	unsigned int		dma_avail;
@@ -538,7 +539,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	if (!iommu->v2)
 		return -EACCES;
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	/* Fail if notifier list is empty */
 	if (!iommu->notifier.head) {
@@ -602,7 +603,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 		phys_pfn[j] = 0;
 	}
 pin_done:
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 	return ret;
 }
 
@@ -621,7 +622,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 	if (!iommu->v2)
 		return -EACCES;
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
 	for (i = 0; i < npage; i++) {
@@ -636,7 +637,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 	}
 
 unpin_exit:
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 	return i > npage ? npage : (i > 0 ? i : -EINVAL);
 }
 
@@ -829,10 +830,10 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
 	struct vfio_domain *domain;
 	unsigned long bitmap = ULONG_MAX;
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 	list_for_each_entry(domain, &iommu->domain_list, next)
 		bitmap &= domain->domain->pgsize_bitmap;
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 
 	/*
 	 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
@@ -870,7 +871,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
 
 	WARN_ON(mask & PAGE_MASK);
 again:
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	/*
 	 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
@@ -945,7 +946,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
 			 * Vendor drivers MUST unpin pages in response to an
 			 * invalidation.
 			 */
-			mutex_unlock(&iommu->lock);
+			up_write(&iommu->lock);
 			blocking_notifier_call_chain(&iommu->notifier,
 						    VFIO_IOMMU_NOTIFY_DMA_UNMAP,
 						    &nb_unmap);
@@ -956,7 +957,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
 	}
 
 unlock:
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 
 	/* Report how much was unmapped */
 	unmap->size = unmapped;
@@ -1081,7 +1082,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 	if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
 		return -EINVAL;
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	if (vfio_find_dma(iommu, iova, size)) {
 		ret = -EEXIST;
@@ -1150,7 +1151,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 		ret = vfio_pin_map_dma(iommu, dma, size);
 
 out_unlock:
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 	return ret;
 }
 
@@ -1645,18 +1646,18 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 	LIST_HEAD(iova_copy);
 	LIST_HEAD(group_resv_regions);
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	list_for_each_entry(d, &iommu->domain_list, next) {
 		if (find_iommu_group(d, iommu_group)) {
-			mutex_unlock(&iommu->lock);
+			up_write(&iommu->lock);
 			return -EINVAL;
 		}
 	}
 
 	if (iommu->external_domain) {
 		if (find_iommu_group(iommu->external_domain, iommu_group)) {
-			mutex_unlock(&iommu->lock);
+			up_write(&iommu->lock);
 			return -EINVAL;
 		}
 	}
@@ -1693,7 +1694,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 
 			list_add(&group->next,
 				 &iommu->external_domain->group_list);
-			mutex_unlock(&iommu->lock);
+			up_write(&iommu->lock);
 
 			return 0;
 		}
@@ -1815,7 +1816,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 done:
 	/* Delete the old one and insert new iova list */
 	vfio_iommu_iova_insert_copy(iommu, &iova_copy);
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 	vfio_iommu_resv_free(&group_resv_regions);
 
 	return 0;
@@ -1829,7 +1830,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 out_free:
 	kfree(domain);
 	kfree(group);
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 	return ret;
 }
 
@@ -1969,7 +1970,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	struct vfio_group *group;
 	LIST_HEAD(iova_copy);
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	if (iommu->external_domain) {
 		group = find_iommu_group(iommu->external_domain, iommu_group);
@@ -2033,7 +2034,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 		vfio_iommu_iova_free(&iova_copy);
 
 detach_group_done:
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 }
 
 static void *vfio_iommu_type1_open(unsigned long arg)
@@ -2062,7 +2063,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
 	INIT_LIST_HEAD(&iommu->iova_list);
 	iommu->dma_list = RB_ROOT;
 	iommu->dma_avail = dma_entry_limit;
-	mutex_init(&iommu->lock);
+	init_rwsem(&iommu->lock);
 	BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
 
 	return iommu;
@@ -2114,14 +2115,14 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
 	struct vfio_domain *domain;
 	int ret = 1;
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		if (!(domain->prot & IOMMU_CACHE)) {
 			ret = 0;
 			break;
 		}
 	}
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 
 	return ret;
 }
@@ -2155,7 +2156,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
 	size_t size;
 	int iovas = 0, i = 0, ret;
 
-	mutex_lock(&iommu->lock);
+	down_write(&iommu->lock);
 
 	list_for_each_entry(iova, &iommu->iova_list, list)
 		iovas++;
@@ -2189,7 +2190,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
 
 	kfree(cap_iovas);
 out_unlock:
-	mutex_unlock(&iommu->lock);
+	up_write(&iommu->lock);
 	return ret;
 }
 


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [RFC PATCH 2/3] vfio/type1: Replace obvious read lock instances
  2020-01-16 18:17 [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Alex Williamson
  2020-01-16 18:17 ` [RFC PATCH 1/3] vfio/type1: Convert vfio_iommu.lock from mutex to rwsem Alex Williamson
@ 2020-01-16 18:18 ` Alex Williamson
  2020-01-16 18:18 ` [RFC PATCH 3/3] vfio/type1: Introduce pfn_list mutex Alex Williamson
  2020-01-17  1:10 ` [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Yan Zhao
  3 siblings, 0 replies; 8+ messages in thread
From: Alex Williamson @ 2020-01-16 18:18 UTC (permalink / raw)
  To: yan.y.zhao; +Cc: kvm, linux-kernel

Replace some instances where no internal state is changed to read locks.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
 drivers/vfio/vfio_iommu_type1.c |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7ae58350af5b..e78067cc74b3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -830,10 +830,10 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
 	struct vfio_domain *domain;
 	unsigned long bitmap = ULONG_MAX;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 	list_for_each_entry(domain, &iommu->domain_list, next)
 		bitmap &= domain->domain->pgsize_bitmap;
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 
 	/*
 	 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
@@ -2115,14 +2115,14 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
 	struct vfio_domain *domain;
 	int ret = 1;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		if (!(domain->prot & IOMMU_CACHE)) {
 			ret = 0;
 			break;
 		}
 	}
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 
 	return ret;
 }
@@ -2156,7 +2156,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
 	size_t size;
 	int iovas = 0, i = 0, ret;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 
 	list_for_each_entry(iova, &iommu->iova_list, list)
 		iovas++;
@@ -2190,7 +2190,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
 
 	kfree(cap_iovas);
 out_unlock:
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 	return ret;
 }
 


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [RFC PATCH 3/3] vfio/type1: Introduce pfn_list mutex
  2020-01-16 18:17 [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Alex Williamson
  2020-01-16 18:17 ` [RFC PATCH 1/3] vfio/type1: Convert vfio_iommu.lock from mutex to rwsem Alex Williamson
  2020-01-16 18:18 ` [RFC PATCH 2/3] vfio/type1: Replace obvious read lock instances Alex Williamson
@ 2020-01-16 18:18 ` Alex Williamson
  2020-01-17  1:10 ` [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Yan Zhao
  3 siblings, 0 replies; 8+ messages in thread
From: Alex Williamson @ 2020-01-16 18:18 UTC (permalink / raw)
  To: yan.y.zhao; +Cc: kvm, linux-kernel

We can promote external page {un}pinning to a reader lock, allowing
concurrency since these don't change the vfio_iommu state.  We do need
to protect the vpfn list per vfio_dma in place of that serialization
though.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
 drivers/vfio/vfio_iommu_type1.c |   24 +++++++++++++++++++-----
 1 file changed, 19 insertions(+), 5 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e78067cc74b3..ea63306c16f7 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -90,6 +90,7 @@ struct vfio_dma {
 	bool			iommu_mapped;
 	bool			lock_cap;	/* capable(CAP_IPC_LOCK) */
 	struct task_struct	*task;
+	struct mutex		pfn_list_lock;
 	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
 };
 
@@ -539,7 +540,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	if (!iommu->v2)
 		return -EACCES;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 
 	/* Fail if notifier list is empty */
 	if (!iommu->notifier.head) {
@@ -570,8 +571,11 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 			goto pin_unwind;
 		}
 
+		mutex_lock(&dma->pfn_list_lock);
+
 		vpfn = vfio_iova_get_vfio_pfn(dma, iova);
 		if (vpfn) {
+			mutex_unlock(&dma->pfn_list_lock);
 			phys_pfn[i] = vpfn->pfn;
 			continue;
 		}
@@ -579,14 +583,19 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 		remote_vaddr = dma->vaddr + iova - dma->iova;
 		ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
 					     do_accounting);
-		if (ret)
+		if (ret) {
+			mutex_unlock(&dma->pfn_list_lock);
 			goto pin_unwind;
+		}
 
 		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
 		if (ret) {
 			vfio_unpin_page_external(dma, iova, do_accounting);
+			mutex_unlock(&dma->pfn_list_lock);
 			goto pin_unwind;
 		}
+
+		mutex_unlock(&dma->pfn_list_lock);
 	}
 
 	ret = i;
@@ -599,11 +608,13 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 
 		iova = user_pfn[j] << PAGE_SHIFT;
 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
+		mutex_lock(&dma->pfn_list_lock);
 		vfio_unpin_page_external(dma, iova, do_accounting);
+		mutex_unlock(&dma->pfn_list_lock);
 		phys_pfn[j] = 0;
 	}
 pin_done:
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 	return ret;
 }
 
@@ -622,7 +633,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 	if (!iommu->v2)
 		return -EACCES;
 
-	down_write(&iommu->lock);
+	down_read(&iommu->lock);
 
 	do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
 	for (i = 0; i < npage; i++) {
@@ -633,11 +644,13 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
 		if (!dma)
 			goto unpin_exit;
+		mutex_lock(&dma->pfn_list_lock);
 		vfio_unpin_page_external(dma, iova, do_accounting);
+		mutex_unlock(&dma->pfn_list_lock);
 	}
 
 unpin_exit:
-	up_write(&iommu->lock);
+	up_read(&iommu->lock);
 	return i > npage ? npage : (i > 0 ? i : -EINVAL);
 }
 
@@ -1109,6 +1122,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 	dma->iova = iova;
 	dma->vaddr = vaddr;
 	dma->prot = prot;
+	mutex_init(&dma->pfn_list_lock);
 
 	/*
 	 * We need to be able to both add to a task's locked memory and test


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention
  2020-01-16 18:17 [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Alex Williamson
                   ` (2 preceding siblings ...)
  2020-01-16 18:18 ` [RFC PATCH 3/3] vfio/type1: Introduce pfn_list mutex Alex Williamson
@ 2020-01-17  1:10 ` Yan Zhao
  2020-02-19  9:04   ` Yan Zhao
  3 siblings, 1 reply; 8+ messages in thread
From: Yan Zhao @ 2020-01-17  1:10 UTC (permalink / raw)
  To: Alex Williamson; +Cc: kvm, linux-kernel

Thank you, Alex!
I'll try it and let you know the result soon. :)

On Fri, Jan 17, 2020 at 02:17:49AM +0800, Alex Williamson wrote:
> Hi Yan,
> 
> I wonder if this might reduce the lock contention you're seeing in the
> vfio_dma_rw series.  These are only compile tested on my end, so I hope
> they're not too broken to test.  Thanks,
> 
> Alex
> 
> ---
> 
> Alex Williamson (3):
>       vfio/type1: Convert vfio_iommu.lock from mutex to rwsem
>       vfio/type1: Replace obvious read lock instances
>       vfio/type1: Introduce pfn_list mutex
> 
> 
>  drivers/vfio/vfio_iommu_type1.c |   67 ++++++++++++++++++++++++---------------
>  1 file changed, 41 insertions(+), 26 deletions(-)
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention
  2020-01-17  1:10 ` [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Yan Zhao
@ 2020-02-19  9:04   ` Yan Zhao
  2020-02-19 20:52     ` Alex Williamson
  0 siblings, 1 reply; 8+ messages in thread
From: Yan Zhao @ 2020-02-19  9:04 UTC (permalink / raw)
  To: Alex Williamson; +Cc: kvm, linux-kernel

On Fri, Jan 17, 2020 at 09:10:51AM +0800, Yan Zhao wrote:
> Thank you, Alex!
> I'll try it and let you know the result soon. :)
> 
> On Fri, Jan 17, 2020 at 02:17:49AM +0800, Alex Williamson wrote:
> > Hi Yan,
> > 
> > I wonder if this might reduce the lock contention you're seeing in the
> > vfio_dma_rw series.  These are only compile tested on my end, so I hope
> > they're not too broken to test.  Thanks,
> > 
> > Alex
> > 
> > ---
> > 
> > Alex Williamson (3):
> >       vfio/type1: Convert vfio_iommu.lock from mutex to rwsem
> >       vfio/type1: Replace obvious read lock instances
> >       vfio/type1: Introduce pfn_list mutex
> > 
> > 
> >  drivers/vfio/vfio_iommu_type1.c |   67 ++++++++++++++++++++++++---------------
> >  1 file changed, 41 insertions(+), 26 deletions(-)
> >

hi Alex
I have finished testing of this series.
It's quite stable and passed our MTBF testing :)

However, after comparing the performance data obtained from several
benchmarks in guests (see below),
it seems that this series does not bring in obvious benefit.
(at least to cases we have tested, and though I cannot fully explain it yet).
So, do you think it's good for me not to include this series into my next
version of "use vfio_dma_rw to read/write IOVAs from CPU side"?


B: stands for baseline code, where mutex is used for vfio_iommu.lock
B+S: applied rwsem patches to convert vfio_iommu.lock from mutex to
rwsem.

==== comparison: benchmark scores ====
(1) with 1 VM:

 score  |     glmark2    |   lightsmark    |   openarena
-----------------------------------------------------------
      B | 1248 (100%)    | 219.70 (100%)   | 114.9 (100%)
    B+S | 1252 (100.3%)  | 222.76 (101.2%) | 114.8 ( 99.9%)


(2) with 2 VMs:

 score  |     glmark2    |   lightsmark    |   openarena                                       
-----------------------------------------------------------                                    
      B | 812   (100%)   | 211.46 (100%)   | 115.3 (100%)                                      
    B+S | 812.8 (100.1%) | 212.96 (100.7%) | 114.9 (99.6%) 


==== comparison: average cycles spent on vfio_iommu.lock =====
(1) with 1 VM:

 cycles | glmark2   | lightsmark | openarena | VM boot up
---------------------------------------------------------
      B | 107       | 113        | 110       | 107
    B+S | 112 (+5)  | 111  (-2)  | 108 (-2)  | 104 (-3)

Note:
a. during VM boot up, for rwsem, there are 24921 reads vs 67 writes
(372:1)
b. for the mesured 3 benchmarks, no write for rwsem.


(2) with 2 VMs:

 cycles | glmark2   | lightsmark | openarena | VM boot up
----------------------------------------------------------
      B | 113       | 119        | 112       | 119
    B+S | 118 (+5)  | 138  (+19) | 110 (-2)  | 114 (-5)


similar results obtained after applying patches of vfio_dma_rw.

B: stands for baseline code, where mutex is used for vfio_iommu.lock
B+V: baseline code + patches to convert from using kvm_read/write_guest
to using vfio_dma_rw
B+V+S: baseline code + patches to using vfio_dma_rw + patches to use
rwsem

==== comparison: benchmark scores =====
(1) with 1 VM:

 score  |     glmark2    |   lightsmark    |   openarena
----------------------------------------------------------
    B+V | 1244 (100%)    | 222.18 (100%)   | 114.4 (100%)
  B+V+S | 1241 ( 99.8%)  | 223.90 (100.8%) | 114.6 (100.2%)

(2) with 2 VMs:

        |     glmark2    |   lightsmark    |   openarena
----------------------------------------------------------
    B+V | 811.2 (100%)   | 211.20 (100%)   | 115.4 (100%)
  B+V+S | 811   (99.98%) | 211.81 (100.3%) | 115.5 (100.1%)


==== comparison: average cycles spent on vfio_dma_rw =====
(1) with 1 VM:

cycles  |    glmark2  | lightsmark | openarena
--------------------------------------------------
    B+V | 1396        | 1592       | 1351 
  B+V+S | 1415 (+19 ) | 1650 (+58) | 1357 (+6)

(2) with 2 VMs:

cycles  |    glmark2  | lightsmark | openarena
--------------------------------------------------
    B+V | 1974        | 2024       | 1636
  B+V+S | 1979 (+5)   | 2051 (+27) | 1644 (+8)


==== comparison: average cycles spent on vfio_iommu.lock =====
(1) with 1 VM:

 cycles | glmark2   | lightsmark | openarena | VM boot up
---------------------------------------------------------
    B+V | 137       | 139        | 156       | 124
  B+V+S | 142 (+5)  | 143 (+4)   | 149 (-7)  | 114 (-10)

(2) with 2 VMs:

 cycles | glmark2   | lightsmark | openarena | VM boot up
---------------------------------------------------------
    B+V | 153       | 148        | 146       | 111
  B+V+S | 155 (+2)  | 157 (+9)   | 156 (+10) | 118 (+7)


P.S.
You may find some inconsistency when comparing to the test result I sent
at https://lkml.org/lkml/2020/1/14/1486. It is because I had to changed
my test machine for personal reason and also because I made lightsmark not
to sync on vblank events.


Thanks
Yan



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention
  2020-02-19  9:04   ` Yan Zhao
@ 2020-02-19 20:52     ` Alex Williamson
  2020-02-20  4:38       ` Yan Zhao
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Williamson @ 2020-02-19 20:52 UTC (permalink / raw)
  To: Yan Zhao; +Cc: kvm, linux-kernel

On Wed, 19 Feb 2020 04:04:17 -0500
Yan Zhao <yan.y.zhao@intel.com> wrote:

> On Fri, Jan 17, 2020 at 09:10:51AM +0800, Yan Zhao wrote:
> > Thank you, Alex!
> > I'll try it and let you know the result soon. :)
> > 
> > On Fri, Jan 17, 2020 at 02:17:49AM +0800, Alex Williamson wrote:  
> > > Hi Yan,
> > > 
> > > I wonder if this might reduce the lock contention you're seeing in the
> > > vfio_dma_rw series.  These are only compile tested on my end, so I hope
> > > they're not too broken to test.  Thanks,
> > > 
> > > Alex
> > > 
> > > ---
> > > 
> > > Alex Williamson (3):
> > >       vfio/type1: Convert vfio_iommu.lock from mutex to rwsem
> > >       vfio/type1: Replace obvious read lock instances
> > >       vfio/type1: Introduce pfn_list mutex
> > > 
> > > 
> > >  drivers/vfio/vfio_iommu_type1.c |   67 ++++++++++++++++++++++++---------------
> > >  1 file changed, 41 insertions(+), 26 deletions(-)
> > >  
> 
> hi Alex
> I have finished testing of this series.
> It's quite stable and passed our MTBF testing :)
> 
> However, after comparing the performance data obtained from several
> benchmarks in guests (see below),
> it seems that this series does not bring in obvious benefit.
> (at least to cases we have tested, and though I cannot fully explain it yet).
> So, do you think it's good for me not to include this series into my next
> version of "use vfio_dma_rw to read/write IOVAs from CPU side"?

Yes, I would not include it in your series.  No reason to bloat your
series for a feature that doesn't clearly show an improvement.  Thanks
for the additional testing, we can revive them if this lock ever
resurfaces.  I'm was actually more hopeful that holding an external
group reference might provide a better performance improvement, the
lookup on every vfio_dma_rw is not very efficient.  Thanks,

Alex


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention
  2020-02-19 20:52     ` Alex Williamson
@ 2020-02-20  4:38       ` Yan Zhao
  0 siblings, 0 replies; 8+ messages in thread
From: Yan Zhao @ 2020-02-20  4:38 UTC (permalink / raw)
  To: Alex Williamson; +Cc: kvm, linux-kernel

On Thu, Feb 20, 2020 at 04:52:47AM +0800, Alex Williamson wrote:
> On Wed, 19 Feb 2020 04:04:17 -0500
> Yan Zhao <yan.y.zhao@intel.com> wrote:
> 
> > On Fri, Jan 17, 2020 at 09:10:51AM +0800, Yan Zhao wrote:
> > > Thank you, Alex!
> > > I'll try it and let you know the result soon. :)
> > > 
> > > On Fri, Jan 17, 2020 at 02:17:49AM +0800, Alex Williamson wrote:  
> > > > Hi Yan,
> > > > 
> > > > I wonder if this might reduce the lock contention you're seeing in the
> > > > vfio_dma_rw series.  These are only compile tested on my end, so I hope
> > > > they're not too broken to test.  Thanks,
> > > > 
> > > > Alex
> > > > 
> > > > ---
> > > > 
> > > > Alex Williamson (3):
> > > >       vfio/type1: Convert vfio_iommu.lock from mutex to rwsem
> > > >       vfio/type1: Replace obvious read lock instances
> > > >       vfio/type1: Introduce pfn_list mutex
> > > > 
> > > > 
> > > >  drivers/vfio/vfio_iommu_type1.c |   67 ++++++++++++++++++++++++---------------
> > > >  1 file changed, 41 insertions(+), 26 deletions(-)
> > > >  
> > 
> > hi Alex
> > I have finished testing of this series.
> > It's quite stable and passed our MTBF testing :)
> > 
> > However, after comparing the performance data obtained from several
> > benchmarks in guests (see below),
> > it seems that this series does not bring in obvious benefit.
> > (at least to cases we have tested, and though I cannot fully explain it yet).
> > So, do you think it's good for me not to include this series into my next
> > version of "use vfio_dma_rw to read/write IOVAs from CPU side"?
> 
> Yes, I would not include it in your series.  No reason to bloat your
> series for a feature that doesn't clearly show an improvement.  Thanks
> for the additional testing, we can revive them if this lock ever
> resurfaces.  I'm was actually more hopeful that holding an external
> group reference might provide a better performance improvement, the
> lookup on every vfio_dma_rw is not very efficient.  Thanks,
> 
got it. thanks~

Yan

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, back to index

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-16 18:17 [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Alex Williamson
2020-01-16 18:17 ` [RFC PATCH 1/3] vfio/type1: Convert vfio_iommu.lock from mutex to rwsem Alex Williamson
2020-01-16 18:18 ` [RFC PATCH 2/3] vfio/type1: Replace obvious read lock instances Alex Williamson
2020-01-16 18:18 ` [RFC PATCH 3/3] vfio/type1: Introduce pfn_list mutex Alex Williamson
2020-01-17  1:10 ` [RFC PATCH 0/3] vfio/type1: Reduce vfio_iommu.lock contention Yan Zhao
2020-02-19  9:04   ` Yan Zhao
2020-02-19 20:52     ` Alex Williamson
2020-02-20  4:38       ` Yan Zhao

KVM Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/kvm/0 kvm/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 kvm kvm/ https://lore.kernel.org/kvm \
		kvm@vger.kernel.org
	public-inbox-index kvm

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.kvm


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git