All of lore.kernel.org
 help / color / mirror / Atom feed
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>, <kvm@vger.kernel.org>,
	<kvmarm@lists.cs.columbia.edu>,
	Alex Williamson <alex.williamson@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>, Marc Zyngier <maz@kernel.org>,
	Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>
Cc: Joerg Roedel <joro@8bytes.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Mark Brown <broonie@kernel.org>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	Andrew Morton <akpm@linux-foundation.org>,
	Alexios Zavras <alexios.zavras@intel.com>,
	<wanghaibin.wang@huawei.com>, <jiangkunkun@huawei.com>,
	Keqian Zhu <zhukeqian1@huawei.com>
Subject: [PATCH 3/7] vfio: iommu_type1: Make an explicit "promote" semantic
Date: Thu, 10 Dec 2020 15:34:21 +0800	[thread overview]
Message-ID: <20201210073425.25960-4-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20201210073425.25960-1-zhukeqian1@huawei.com>

When we want to promote pinned_page_scope of vfio_iommu, we
should call the "update" function to visit all vfio_group,
but when we want to downgrade it, we can set the flag directly.

Giving above, we can give an explicit "promote" semantic to
that function. BTW, if vfio_iommu has been promoted, then it
can return early.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c52bcefba96b..bd9a94590ebc 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 					       struct iommu_group *iommu_group);
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
 /*
  * This code handles mapping and unmapping of user data buffers
  * into DMA'ble space using the IOMMU
@@ -719,7 +719,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	group = vfio_iommu_find_iommu_group(iommu, iommu_group);
 	if (!group->pinned_page_dirty_scope) {
 		group->pinned_page_dirty_scope = true;
-		update_pinned_page_dirty_scope(iommu);
+		promote_pinned_page_dirty_scope(iommu);
 	}
 
 	goto pin_done;
@@ -1633,27 +1633,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 	return group;
 }
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
 {
 	struct vfio_domain *domain;
 	struct vfio_group *group;
 
+	if (iommu->pinned_page_dirty_scope)
+		return;
+
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
 	if (iommu->external_domain) {
 		domain = iommu->external_domain;
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
@@ -2348,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	struct vfio_iommu *iommu = iommu_data;
 	struct vfio_domain *domain;
 	struct vfio_group *group;
-	bool update_dirty_scope = false;
+	bool promote_dirty_scope = false;
 	LIST_HEAD(iova_copy);
 
 	mutex_lock(&iommu->lock);
@@ -2356,7 +2355,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	if (iommu->external_domain) {
 		group = find_iommu_group(iommu->external_domain, iommu_group);
 		if (group) {
-			update_dirty_scope = !group->pinned_page_dirty_scope;
+			promote_dirty_scope = !group->pinned_page_dirty_scope;
 			list_del(&group->next);
 			kfree(group);
 
@@ -2386,7 +2385,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 			continue;
 
 		vfio_iommu_detach_group(domain, group);
-		update_dirty_scope = !group->pinned_page_dirty_scope;
+		promote_dirty_scope = !group->pinned_page_dirty_scope;
 		list_del(&group->next);
 		kfree(group);
 		/*
@@ -2422,8 +2421,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	 * Removal of a group without dirty tracking may allow the iommu scope
 	 * to be promoted.
 	 */
-	if (update_dirty_scope)
-		update_pinned_page_dirty_scope(iommu);
+	if (promote_dirty_scope)
+		promote_pinned_page_dirty_scope(iommu);
 	mutex_unlock(&iommu->lock);
 }
 
-- 
2.23.0


WARNING: multiple messages have this Message-ID (diff)
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>, <kvm@vger.kernel.org>,
	<kvmarm@lists.cs.columbia.edu>,
	Alex Williamson <alex.williamson@redhat.com>,
	 Cornelia Huck <cohuck@redhat.com>, Marc Zyngier <maz@kernel.org>,
	Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	jiangkunkun@huawei.com,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Alexios Zavras <alexios.zavras@intel.com>,
	Mark Brown <broonie@kernel.org>,
	James Morse <james.morse@arm.com>,
	wanghaibin.wang@huawei.com, Thomas Gleixner <tglx@linutronix.de>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Julien Thierry <julien.thierry.kdev@gmail.com>
Subject: [PATCH 3/7] vfio: iommu_type1: Make an explicit "promote" semantic
Date: Thu, 10 Dec 2020 15:34:21 +0800	[thread overview]
Message-ID: <20201210073425.25960-4-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20201210073425.25960-1-zhukeqian1@huawei.com>

When we want to promote pinned_page_scope of vfio_iommu, we
should call the "update" function to visit all vfio_group,
but when we want to downgrade it, we can set the flag directly.

Giving above, we can give an explicit "promote" semantic to
that function. BTW, if vfio_iommu has been promoted, then it
can return early.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c52bcefba96b..bd9a94590ebc 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 					       struct iommu_group *iommu_group);
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
 /*
  * This code handles mapping and unmapping of user data buffers
  * into DMA'ble space using the IOMMU
@@ -719,7 +719,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	group = vfio_iommu_find_iommu_group(iommu, iommu_group);
 	if (!group->pinned_page_dirty_scope) {
 		group->pinned_page_dirty_scope = true;
-		update_pinned_page_dirty_scope(iommu);
+		promote_pinned_page_dirty_scope(iommu);
 	}
 
 	goto pin_done;
@@ -1633,27 +1633,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 	return group;
 }
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
 {
 	struct vfio_domain *domain;
 	struct vfio_group *group;
 
+	if (iommu->pinned_page_dirty_scope)
+		return;
+
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
 	if (iommu->external_domain) {
 		domain = iommu->external_domain;
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
@@ -2348,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	struct vfio_iommu *iommu = iommu_data;
 	struct vfio_domain *domain;
 	struct vfio_group *group;
-	bool update_dirty_scope = false;
+	bool promote_dirty_scope = false;
 	LIST_HEAD(iova_copy);
 
 	mutex_lock(&iommu->lock);
@@ -2356,7 +2355,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	if (iommu->external_domain) {
 		group = find_iommu_group(iommu->external_domain, iommu_group);
 		if (group) {
-			update_dirty_scope = !group->pinned_page_dirty_scope;
+			promote_dirty_scope = !group->pinned_page_dirty_scope;
 			list_del(&group->next);
 			kfree(group);
 
@@ -2386,7 +2385,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 			continue;
 
 		vfio_iommu_detach_group(domain, group);
-		update_dirty_scope = !group->pinned_page_dirty_scope;
+		promote_dirty_scope = !group->pinned_page_dirty_scope;
 		list_del(&group->next);
 		kfree(group);
 		/*
@@ -2422,8 +2421,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	 * Removal of a group without dirty tracking may allow the iommu scope
 	 * to be promoted.
 	 */
-	if (update_dirty_scope)
-		update_pinned_page_dirty_scope(iommu);
+	if (promote_dirty_scope)
+		promote_pinned_page_dirty_scope(iommu);
 	mutex_unlock(&iommu->lock);
 }
 
-- 
2.23.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>, <kvm@vger.kernel.org>,
	<kvmarm@lists.cs.columbia.edu>,
	Alex Williamson <alex.williamson@redhat.com>,
	 Cornelia Huck <cohuck@redhat.com>, Marc Zyngier <maz@kernel.org>,
	Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>,
	Joerg Roedel <joro@8bytes.org>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Alexios Zavras <alexios.zavras@intel.com>,
	Mark Brown <broonie@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Andrew Morton <akpm@linux-foundation.org>
Subject: [PATCH 3/7] vfio: iommu_type1: Make an explicit "promote" semantic
Date: Thu, 10 Dec 2020 15:34:21 +0800	[thread overview]
Message-ID: <20201210073425.25960-4-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20201210073425.25960-1-zhukeqian1@huawei.com>

When we want to promote pinned_page_scope of vfio_iommu, we
should call the "update" function to visit all vfio_group,
but when we want to downgrade it, we can set the flag directly.

Giving above, we can give an explicit "promote" semantic to
that function. BTW, if vfio_iommu has been promoted, then it
can return early.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c52bcefba96b..bd9a94590ebc 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 					       struct iommu_group *iommu_group);
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
 /*
  * This code handles mapping and unmapping of user data buffers
  * into DMA'ble space using the IOMMU
@@ -719,7 +719,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	group = vfio_iommu_find_iommu_group(iommu, iommu_group);
 	if (!group->pinned_page_dirty_scope) {
 		group->pinned_page_dirty_scope = true;
-		update_pinned_page_dirty_scope(iommu);
+		promote_pinned_page_dirty_scope(iommu);
 	}
 
 	goto pin_done;
@@ -1633,27 +1633,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 	return group;
 }
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
 {
 	struct vfio_domain *domain;
 	struct vfio_group *group;
 
+	if (iommu->pinned_page_dirty_scope)
+		return;
+
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
 	if (iommu->external_domain) {
 		domain = iommu->external_domain;
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
@@ -2348,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	struct vfio_iommu *iommu = iommu_data;
 	struct vfio_domain *domain;
 	struct vfio_group *group;
-	bool update_dirty_scope = false;
+	bool promote_dirty_scope = false;
 	LIST_HEAD(iova_copy);
 
 	mutex_lock(&iommu->lock);
@@ -2356,7 +2355,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	if (iommu->external_domain) {
 		group = find_iommu_group(iommu->external_domain, iommu_group);
 		if (group) {
-			update_dirty_scope = !group->pinned_page_dirty_scope;
+			promote_dirty_scope = !group->pinned_page_dirty_scope;
 			list_del(&group->next);
 			kfree(group);
 
@@ -2386,7 +2385,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 			continue;
 
 		vfio_iommu_detach_group(domain, group);
-		update_dirty_scope = !group->pinned_page_dirty_scope;
+		promote_dirty_scope = !group->pinned_page_dirty_scope;
 		list_del(&group->next);
 		kfree(group);
 		/*
@@ -2422,8 +2421,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	 * Removal of a group without dirty tracking may allow the iommu scope
 	 * to be promoted.
 	 */
-	if (update_dirty_scope)
-		update_pinned_page_dirty_scope(iommu);
+	if (promote_dirty_scope)
+		promote_pinned_page_dirty_scope(iommu);
 	mutex_unlock(&iommu->lock);
 }
 
-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>, <kvm@vger.kernel.org>,
	<kvmarm@lists.cs.columbia.edu>,
	Alex Williamson <alex.williamson@redhat.com>,
	 Cornelia Huck <cohuck@redhat.com>, Marc Zyngier <maz@kernel.org>,
	Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Joerg Roedel <joro@8bytes.org>,
	jiangkunkun@huawei.com,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Alexios Zavras <alexios.zavras@intel.com>,
	Mark Brown <broonie@kernel.org>,
	James Morse <james.morse@arm.com>,
	wanghaibin.wang@huawei.com, Thomas Gleixner <tglx@linutronix.de>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Julien Thierry <julien.thierry.kdev@gmail.com>
Subject: [PATCH 3/7] vfio: iommu_type1: Make an explicit "promote" semantic
Date: Thu, 10 Dec 2020 15:34:21 +0800	[thread overview]
Message-ID: <20201210073425.25960-4-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20201210073425.25960-1-zhukeqian1@huawei.com>

When we want to promote pinned_page_scope of vfio_iommu, we
should call the "update" function to visit all vfio_group,
but when we want to downgrade it, we can set the flag directly.

Giving above, we can give an explicit "promote" semantic to
that function. BTW, if vfio_iommu has been promoted, then it
can return early.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c52bcefba96b..bd9a94590ebc 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 					       struct iommu_group *iommu_group);
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
 /*
  * This code handles mapping and unmapping of user data buffers
  * into DMA'ble space using the IOMMU
@@ -719,7 +719,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
 	group = vfio_iommu_find_iommu_group(iommu, iommu_group);
 	if (!group->pinned_page_dirty_scope) {
 		group->pinned_page_dirty_scope = true;
-		update_pinned_page_dirty_scope(iommu);
+		promote_pinned_page_dirty_scope(iommu);
 	}
 
 	goto pin_done;
@@ -1633,27 +1633,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
 	return group;
 }
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
 {
 	struct vfio_domain *domain;
 	struct vfio_group *group;
 
+	if (iommu->pinned_page_dirty_scope)
+		return;
+
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
 	if (iommu->external_domain) {
 		domain = iommu->external_domain;
 		list_for_each_entry(group, &domain->group_list, next) {
-			if (!group->pinned_page_dirty_scope) {
-				iommu->pinned_page_dirty_scope = false;
+			if (!group->pinned_page_dirty_scope)
 				return;
-			}
 		}
 	}
 
@@ -2348,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	struct vfio_iommu *iommu = iommu_data;
 	struct vfio_domain *domain;
 	struct vfio_group *group;
-	bool update_dirty_scope = false;
+	bool promote_dirty_scope = false;
 	LIST_HEAD(iova_copy);
 
 	mutex_lock(&iommu->lock);
@@ -2356,7 +2355,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	if (iommu->external_domain) {
 		group = find_iommu_group(iommu->external_domain, iommu_group);
 		if (group) {
-			update_dirty_scope = !group->pinned_page_dirty_scope;
+			promote_dirty_scope = !group->pinned_page_dirty_scope;
 			list_del(&group->next);
 			kfree(group);
 
@@ -2386,7 +2385,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 			continue;
 
 		vfio_iommu_detach_group(domain, group);
-		update_dirty_scope = !group->pinned_page_dirty_scope;
+		promote_dirty_scope = !group->pinned_page_dirty_scope;
 		list_del(&group->next);
 		kfree(group);
 		/*
@@ -2422,8 +2421,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
 	 * Removal of a group without dirty tracking may allow the iommu scope
 	 * to be promoted.
 	 */
-	if (update_dirty_scope)
-		update_pinned_page_dirty_scope(iommu);
+	if (promote_dirty_scope)
+		promote_pinned_page_dirty_scope(iommu);
 	mutex_unlock(&iommu->lock);
 }
 
-- 
2.23.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-12-10  7:37 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10  7:34 [PATCH 0/7] vfio: iommu_type1: Some fixes and optimization Keqian Zhu
2020-12-10  7:34 ` Keqian Zhu
2020-12-10  7:34 ` Keqian Zhu
2020-12-10  7:34 ` Keqian Zhu
2020-12-10  7:34 ` [PATCH 1/7] vfio: iommu_type1: Clear added dirty bit when unwind pin Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10 19:16   ` Alex Williamson
2020-12-10 19:16     ` Alex Williamson
2020-12-10 19:16     ` Alex Williamson
2020-12-10 19:16     ` Alex Williamson
2020-12-11  6:51     ` zhukeqian
2020-12-11  6:51       ` zhukeqian
2020-12-11  6:51       ` zhukeqian
2020-12-11  6:51       ` zhukeqian
2020-12-15  0:16       ` Alex Williamson
2020-12-15  0:16         ` Alex Williamson
2020-12-15  0:16         ` Alex Williamson
2020-12-15  0:16         ` Alex Williamson
2020-12-16  7:22   ` [kbuild] " Dan Carpenter
2020-12-16  7:22     ` Dan Carpenter
2020-12-16  7:22     ` Dan Carpenter
2020-12-16  7:22     ` [kbuild] " Dan Carpenter
2020-12-16  7:22     ` Dan Carpenter
2020-12-16  7:22     ` Dan Carpenter
2020-12-10  7:34 ` [PATCH 2/7] vfio: iommu_type1: Initially set the pinned_page_dirty_scope Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34 ` Keqian Zhu [this message]
2020-12-10  7:34   ` [PATCH 3/7] vfio: iommu_type1: Make an explicit "promote" semantic Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34 ` [PATCH 4/7] vfio: iommu_type1: Fix missing dirty page when promote pinned_scope Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-15  0:04   ` Alex Williamson
2020-12-15  0:04     ` Alex Williamson
2020-12-15  0:04     ` Alex Williamson
2020-12-15  0:04     ` Alex Williamson
2020-12-15  9:37     ` zhukeqian
2020-12-15  9:37       ` zhukeqian
2020-12-15  9:37       ` zhukeqian
2020-12-15  9:37       ` zhukeqian
2020-12-15 15:53       ` Alex Williamson
2020-12-15 15:53         ` Alex Williamson
2020-12-15 15:53         ` Alex Williamson
2020-12-15 15:53         ` Alex Williamson
2020-12-18  8:21         ` Keqian Zhu
2020-12-18  8:21           ` Keqian Zhu
2020-12-18  8:21           ` Keqian Zhu
2020-12-18  8:21           ` Keqian Zhu
2020-12-10  7:34 ` [PATCH 5/7] vfio: iommu_type1: Drop parameter "pgsize" of vfio_dma_bitmap_alloc_all Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34 ` [PATCH 6/7] vfio: iommu_type1: Drop parameter "pgsize" of vfio_iova_dirty_bitmap Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34 ` [PATCH 7/7] vfio: iommu_type1: Drop parameter "pgsize" of update_user_bitmap Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu
2020-12-10  7:34   ` Keqian Zhu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201210073425.25960-4-zhukeqian1@huawei.com \
    --to=zhukeqian1@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex.williamson@redhat.com \
    --cc=alexios.zavras@intel.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=cohuck@redhat.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=james.morse@arm.com \
    --cc=jiangkunkun@huawei.com \
    --cc=joro@8bytes.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tglx@linutronix.de \
    --cc=wanghaibin.wang@huawei.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.