linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path"
@ 2020-10-30  7:45 Si-Wei Liu
  2020-10-30  7:45 ` [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework) Si-Wei Liu
  2020-11-03  8:53 ` [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path" Jason Wang
  0 siblings, 2 replies; 12+ messages in thread
From: Si-Wei Liu @ 2020-10-30  7:45 UTC (permalink / raw)
  To: mst, jasowang, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization

This reverts commit 7ed9e3d97c32d969caded2dfb6e67c1a2cc5a0b1.

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 drivers/vhost/vdpa.c | 119 +++++++++++++++++++++------------------------------
 1 file changed, 48 insertions(+), 71 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index a2dbc85..b6d9016 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -588,19 +588,21 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 	struct vhost_dev *dev = &v->vdev;
 	struct vhost_iotlb *iotlb = dev->iotlb;
 	struct page **page_list;
-	struct vm_area_struct **vmas;
+	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
 	unsigned int gup_flags = FOLL_LONGTERM;
-	unsigned long map_pfn, last_pfn = 0;
-	unsigned long npages, lock_limit;
-	unsigned long i, nmap = 0;
+	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+	unsigned long locked, lock_limit, pinned, i;
 	u64 iova = msg->iova;
-	long pinned;
 	int ret = 0;
 
 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
 				    msg->iova + msg->size - 1))
 		return -EEXIST;
 
+	page_list = (struct page **) __get_free_page(GFP_KERNEL);
+	if (!page_list)
+		return -ENOMEM;
+
 	if (msg->perm & VHOST_ACCESS_WO)
 		gup_flags |= FOLL_WRITE;
 
@@ -608,86 +610,61 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 	if (!npages)
 		return -EINVAL;
 
-	page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
-			      GFP_KERNEL);
-	if (!page_list || !vmas) {
-		ret = -ENOMEM;
-		goto free;
-	}
-
 	mmap_read_lock(dev->mm);
 
+	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
-		ret = -ENOMEM;
-		goto unlock;
-	}
 
-	pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
-				page_list, vmas);
-	if (npages != pinned) {
-		if (pinned < 0) {
-			ret = pinned;
-		} else {
-			unpin_user_pages(page_list, pinned);
-			ret = -ENOMEM;
-		}
-		goto unlock;
+	if (locked > lock_limit) {
+		ret = -ENOMEM;
+		goto out;
 	}
 
+	cur_base = msg->uaddr & PAGE_MASK;
 	iova &= PAGE_MASK;
-	map_pfn = page_to_pfn(page_list[0]);
-
-	/* One more iteration to avoid extra vdpa_map() call out of loop. */
-	for (i = 0; i <= npages; i++) {
-		unsigned long this_pfn;
-		u64 csize;
-
-		/* The last chunk may have no valid PFN next to it */
-		this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
-
-		if (last_pfn && (this_pfn == -1UL ||
-				 this_pfn != last_pfn + 1)) {
-			/* Pin a contiguous chunk of memory */
-			csize = last_pfn - map_pfn + 1;
-			ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
-					     map_pfn << PAGE_SHIFT,
-					     msg->perm);
-			if (ret) {
-				/*
-				 * Unpin the rest chunks of memory on the
-				 * flight with no corresponding vdpa_map()
-				 * calls having been made yet. On the other
-				 * hand, vdpa_unmap() in the failure path
-				 * is in charge of accounting the number of
-				 * pinned pages for its own.
-				 * This asymmetrical pattern of accounting
-				 * is for efficiency to pin all pages at
-				 * once, while there is no other callsite
-				 * of vdpa_map() than here above.
-				 */
-				unpin_user_pages(&page_list[nmap],
-						 npages - nmap);
-				goto out;
+
+	while (npages) {
+		pinned = min_t(unsigned long, npages, list_size);
+		ret = pin_user_pages(cur_base, pinned,
+				     gup_flags, page_list, NULL);
+		if (ret != pinned)
+			goto out;
+
+		if (!last_pfn)
+			map_pfn = page_to_pfn(page_list[0]);
+
+		for (i = 0; i < ret; i++) {
+			unsigned long this_pfn = page_to_pfn(page_list[i]);
+			u64 csize;
+
+			if (last_pfn && (this_pfn != last_pfn + 1)) {
+				/* Pin a contiguous chunk of memory */
+				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+				if (vhost_vdpa_map(v, iova, csize,
+						   map_pfn << PAGE_SHIFT,
+						   msg->perm))
+					goto out;
+				map_pfn = this_pfn;
+				iova += csize;
 			}
-			atomic64_add(csize, &dev->mm->pinned_vm);
-			nmap += csize;
-			iova += csize << PAGE_SHIFT;
-			map_pfn = this_pfn;
+
+			last_pfn = this_pfn;
 		}
-		last_pfn = this_pfn;
+
+		cur_base += ret << PAGE_SHIFT;
+		npages -= ret;
 	}
 
-	WARN_ON(nmap != npages);
+	/* Pin the rest chunk */
+	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+			     map_pfn << PAGE_SHIFT, msg->perm);
 out:
-	if (ret)
+	if (ret) {
 		vhost_vdpa_unmap(v, msg->iova, msg->size);
-unlock:
+		atomic64_sub(npages, &dev->mm->pinned_vm);
+	}
 	mmap_read_unlock(dev->mm);
-free:
-	kvfree(vmas);
-	kvfree(page_list);
+	free_page((unsigned long)page_list);
 	return ret;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-10-30  7:45 [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path" Si-Wei Liu
@ 2020-10-30  7:45 ` Si-Wei Liu
  2020-11-03 13:00   ` Jason Wang
  2020-11-04  2:42   ` Jason Wang
  2020-11-03  8:53 ` [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path" Jason Wang
  1 sibling, 2 replies; 12+ messages in thread
From: Si-Wei Liu @ 2020-10-30  7:45 UTC (permalink / raw)
  To: mst, jasowang, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization

Pinned pages are not properly accounted particularly when
mapping error occurs on IOTLB update. Clean up dangling
pinned pages for the error path.

The memory usage for bookkeeping pinned pages is reverted
to what it was before: only one single free page is needed.
This helps reduce the host memory demand for VM with a large
amount of memory, or in the situation where host is running
short of free memory.

Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 drivers/vhost/vdpa.c | 64 +++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 46 insertions(+), 18 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index b6d9016..8da8558 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
 
 	if (r)
 		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+	else
+		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
 
 	return r;
 }
@@ -591,14 +593,16 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
 	unsigned int gup_flags = FOLL_LONGTERM;
 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
-	unsigned long locked, lock_limit, pinned, i;
+	unsigned long lock_limit, sz2pin, nchunks, i;
 	u64 iova = msg->iova;
+	long pinned;
 	int ret = 0;
 
 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
 				    msg->iova + msg->size - 1))
 		return -EEXIST;
 
+	/* Limit the use of memory for bookkeeping */
 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
 	if (!page_list)
 		return -ENOMEM;
@@ -607,52 +611,64 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 		gup_flags |= FOLL_WRITE;
 
 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
-	if (!npages)
-		return -EINVAL;
+	if (!npages) {
+		ret = -EINVAL;
+		goto free;
+	}
 
 	mmap_read_lock(dev->mm);
 
-	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
-	if (locked > lock_limit) {
+	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
 		ret = -ENOMEM;
-		goto out;
+		goto unlock;
 	}
 
 	cur_base = msg->uaddr & PAGE_MASK;
 	iova &= PAGE_MASK;
+	nchunks = 0;
 
 	while (npages) {
-		pinned = min_t(unsigned long, npages, list_size);
-		ret = pin_user_pages(cur_base, pinned,
-				     gup_flags, page_list, NULL);
-		if (ret != pinned)
+		sz2pin = min_t(unsigned long, npages, list_size);
+		pinned = pin_user_pages(cur_base, sz2pin,
+					gup_flags, page_list, NULL);
+		if (sz2pin != pinned) {
+			if (pinned < 0) {
+				ret = pinned;
+			} else {
+				unpin_user_pages(page_list, pinned);
+				ret = -ENOMEM;
+			}
 			goto out;
+		}
+		nchunks++;
 
 		if (!last_pfn)
 			map_pfn = page_to_pfn(page_list[0]);
 
-		for (i = 0; i < ret; i++) {
+		for (i = 0; i < pinned; i++) {
 			unsigned long this_pfn = page_to_pfn(page_list[i]);
 			u64 csize;
 
 			if (last_pfn && (this_pfn != last_pfn + 1)) {
 				/* Pin a contiguous chunk of memory */
 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
-				if (vhost_vdpa_map(v, iova, csize,
-						   map_pfn << PAGE_SHIFT,
-						   msg->perm))
+				ret = vhost_vdpa_map(v, iova, csize,
+						     map_pfn << PAGE_SHIFT,
+						     msg->perm);
+				if (ret)
 					goto out;
+
 				map_pfn = this_pfn;
 				iova += csize;
+				nchunks = 0;
 			}
 
 			last_pfn = this_pfn;
 		}
 
-		cur_base += ret << PAGE_SHIFT;
-		npages -= ret;
+		cur_base += pinned << PAGE_SHIFT;
+		npages -= pinned;
 	}
 
 	/* Pin the rest chunk */
@@ -660,10 +676,22 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 			     map_pfn << PAGE_SHIFT, msg->perm);
 out:
 	if (ret) {
+		if (nchunks && last_pfn) {
+			unsigned long pfn;
+
+			/*
+			 * Unpin the outstanding pages which are unmapped.
+			 * Mapped pages are accounted in vdpa_map(), thus
+			 * will be handled by vdpa_unmap().
+			 */
+			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
+				unpin_user_page(pfn_to_page(pfn));
+		}
 		vhost_vdpa_unmap(v, msg->iova, msg->size);
-		atomic64_sub(npages, &dev->mm->pinned_vm);
 	}
+unlock:
 	mmap_read_unlock(dev->mm);
+free:
 	free_page((unsigned long)page_list);
 	return ret;
 }
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path"
  2020-10-30  7:45 [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path" Si-Wei Liu
  2020-10-30  7:45 ` [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework) Si-Wei Liu
@ 2020-11-03  8:53 ` Jason Wang
  1 sibling, 0 replies; 12+ messages in thread
From: Jason Wang @ 2020-11-03  8:53 UTC (permalink / raw)
  To: Si-Wei Liu, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 2020/10/30 下午3:45, Si-Wei Liu wrote:
> This reverts commit 7ed9e3d97c32d969caded2dfb6e67c1a2cc5a0b1.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>   drivers/vhost/vdpa.c | 119 +++++++++++++++++++++------------------------------
>   1 file changed, 48 insertions(+), 71 deletions(-)


I saw this has been reverted there 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/drivers/vhost?id=5e1a3149eec8675c2767cc465903f5e4829de5b0.

:)

Thanks


>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index a2dbc85..b6d9016 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -588,19 +588,21 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   	struct vhost_dev *dev = &v->vdev;
>   	struct vhost_iotlb *iotlb = dev->iotlb;
>   	struct page **page_list;
> -	struct vm_area_struct **vmas;
> +	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>   	unsigned int gup_flags = FOLL_LONGTERM;
> -	unsigned long map_pfn, last_pfn = 0;
> -	unsigned long npages, lock_limit;
> -	unsigned long i, nmap = 0;
> +	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
> +	unsigned long locked, lock_limit, pinned, i;
>   	u64 iova = msg->iova;
> -	long pinned;
>   	int ret = 0;
>   
>   	if (vhost_iotlb_itree_first(iotlb, msg->iova,
>   				    msg->iova + msg->size - 1))
>   		return -EEXIST;
>   
> +	page_list = (struct page **) __get_free_page(GFP_KERNEL);
> +	if (!page_list)
> +		return -ENOMEM;
> +
>   	if (msg->perm & VHOST_ACCESS_WO)
>   		gup_flags |= FOLL_WRITE;
>   
> @@ -608,86 +610,61 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   	if (!npages)
>   		return -EINVAL;
>   
> -	page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> -	vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
> -			      GFP_KERNEL);
> -	if (!page_list || !vmas) {
> -		ret = -ENOMEM;
> -		goto free;
> -	}
> -
>   	mmap_read_lock(dev->mm);
>   
> +	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>   	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> -	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
> -		ret = -ENOMEM;
> -		goto unlock;
> -	}
>   
> -	pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
> -				page_list, vmas);
> -	if (npages != pinned) {
> -		if (pinned < 0) {
> -			ret = pinned;
> -		} else {
> -			unpin_user_pages(page_list, pinned);
> -			ret = -ENOMEM;
> -		}
> -		goto unlock;
> +	if (locked > lock_limit) {
> +		ret = -ENOMEM;
> +		goto out;
>   	}
>   
> +	cur_base = msg->uaddr & PAGE_MASK;
>   	iova &= PAGE_MASK;
> -	map_pfn = page_to_pfn(page_list[0]);
> -
> -	/* One more iteration to avoid extra vdpa_map() call out of loop. */
> -	for (i = 0; i <= npages; i++) {
> -		unsigned long this_pfn;
> -		u64 csize;
> -
> -		/* The last chunk may have no valid PFN next to it */
> -		this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
> -
> -		if (last_pfn && (this_pfn == -1UL ||
> -				 this_pfn != last_pfn + 1)) {
> -			/* Pin a contiguous chunk of memory */
> -			csize = last_pfn - map_pfn + 1;
> -			ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
> -					     map_pfn << PAGE_SHIFT,
> -					     msg->perm);
> -			if (ret) {
> -				/*
> -				 * Unpin the rest chunks of memory on the
> -				 * flight with no corresponding vdpa_map()
> -				 * calls having been made yet. On the other
> -				 * hand, vdpa_unmap() in the failure path
> -				 * is in charge of accounting the number of
> -				 * pinned pages for its own.
> -				 * This asymmetrical pattern of accounting
> -				 * is for efficiency to pin all pages at
> -				 * once, while there is no other callsite
> -				 * of vdpa_map() than here above.
> -				 */
> -				unpin_user_pages(&page_list[nmap],
> -						 npages - nmap);
> -				goto out;
> +
> +	while (npages) {
> +		pinned = min_t(unsigned long, npages, list_size);
> +		ret = pin_user_pages(cur_base, pinned,
> +				     gup_flags, page_list, NULL);
> +		if (ret != pinned)
> +			goto out;
> +
> +		if (!last_pfn)
> +			map_pfn = page_to_pfn(page_list[0]);
> +
> +		for (i = 0; i < ret; i++) {
> +			unsigned long this_pfn = page_to_pfn(page_list[i]);
> +			u64 csize;
> +
> +			if (last_pfn && (this_pfn != last_pfn + 1)) {
> +				/* Pin a contiguous chunk of memory */
> +				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
> +				if (vhost_vdpa_map(v, iova, csize,
> +						   map_pfn << PAGE_SHIFT,
> +						   msg->perm))
> +					goto out;
> +				map_pfn = this_pfn;
> +				iova += csize;
>   			}
> -			atomic64_add(csize, &dev->mm->pinned_vm);
> -			nmap += csize;
> -			iova += csize << PAGE_SHIFT;
> -			map_pfn = this_pfn;
> +
> +			last_pfn = this_pfn;
>   		}
> -		last_pfn = this_pfn;
> +
> +		cur_base += ret << PAGE_SHIFT;
> +		npages -= ret;
>   	}
>   
> -	WARN_ON(nmap != npages);
> +	/* Pin the rest chunk */
> +	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
> +			     map_pfn << PAGE_SHIFT, msg->perm);
>   out:
> -	if (ret)
> +	if (ret) {
>   		vhost_vdpa_unmap(v, msg->iova, msg->size);
> -unlock:
> +		atomic64_sub(npages, &dev->mm->pinned_vm);
> +	}
>   	mmap_read_unlock(dev->mm);
> -free:
> -	kvfree(vmas);
> -	kvfree(page_list);
> +	free_page((unsigned long)page_list);
>   	return ret;
>   }
>   


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-10-30  7:45 ` [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework) Si-Wei Liu
@ 2020-11-03 13:00   ` Jason Wang
  2020-11-04  1:06     ` si-wei liu
  2020-11-04  2:42   ` Jason Wang
  1 sibling, 1 reply; 12+ messages in thread
From: Jason Wang @ 2020-11-03 13:00 UTC (permalink / raw)
  To: Si-Wei Liu, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 2020/10/30 下午3:45, Si-Wei Liu wrote:
> Pinned pages are not properly accounted particularly when
> mapping error occurs on IOTLB update. Clean up dangling
> pinned pages for the error path.
>
> The memory usage for bookkeeping pinned pages is reverted
> to what it was before: only one single free page is needed.
> This helps reduce the host memory demand for VM with a large
> amount of memory, or in the situation where host is running
> short of free memory.
>
> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>   drivers/vhost/vdpa.c | 64 +++++++++++++++++++++++++++++++++++++---------------
>   1 file changed, 46 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index b6d9016..8da8558 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>   
>   	if (r)
>   		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
> +	else
> +		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>   
>   	return r;
>   }
> @@ -591,14 +593,16 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>   	unsigned int gup_flags = FOLL_LONGTERM;
>   	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
> -	unsigned long locked, lock_limit, pinned, i;
> +	unsigned long lock_limit, sz2pin, nchunks, i;
>   	u64 iova = msg->iova;
> +	long pinned;
>   	int ret = 0;
>   
>   	if (vhost_iotlb_itree_first(iotlb, msg->iova,
>   				    msg->iova + msg->size - 1))
>   		return -EEXIST;
>   
> +	/* Limit the use of memory for bookkeeping */
>   	page_list = (struct page **) __get_free_page(GFP_KERNEL);
>   	if (!page_list)
>   		return -ENOMEM;
> @@ -607,52 +611,64 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   		gup_flags |= FOLL_WRITE;
>   
>   	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
> -	if (!npages)
> -		return -EINVAL;
> +	if (!npages) {
> +		ret = -EINVAL;
> +		goto free;
> +	}
>   
>   	mmap_read_lock(dev->mm);
>   
> -	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>   	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> -
> -	if (locked > lock_limit) {
> +	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>   		ret = -ENOMEM;
> -		goto out;
> +		goto unlock;
>   	}
>   
>   	cur_base = msg->uaddr & PAGE_MASK;
>   	iova &= PAGE_MASK;
> +	nchunks = 0;
>   
>   	while (npages) {
> -		pinned = min_t(unsigned long, npages, list_size);
> -		ret = pin_user_pages(cur_base, pinned,
> -				     gup_flags, page_list, NULL);
> -		if (ret != pinned)
> +		sz2pin = min_t(unsigned long, npages, list_size);
> +		pinned = pin_user_pages(cur_base, sz2pin,
> +					gup_flags, page_list, NULL);
> +		if (sz2pin != pinned) {
> +			if (pinned < 0) {
> +				ret = pinned;
> +			} else {
> +				unpin_user_pages(page_list, pinned);
> +				ret = -ENOMEM;
> +			}
>   			goto out;
> +		}
> +		nchunks++;
>   
>   		if (!last_pfn)
>   			map_pfn = page_to_pfn(page_list[0]);
>   
> -		for (i = 0; i < ret; i++) {
> +		for (i = 0; i < pinned; i++) {
>   			unsigned long this_pfn = page_to_pfn(page_list[i]);
>   			u64 csize;
>   
>   			if (last_pfn && (this_pfn != last_pfn + 1)) {
>   				/* Pin a contiguous chunk of memory */
>   				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
> -				if (vhost_vdpa_map(v, iova, csize,
> -						   map_pfn << PAGE_SHIFT,
> -						   msg->perm))
> +				ret = vhost_vdpa_map(v, iova, csize,
> +						     map_pfn << PAGE_SHIFT,
> +						     msg->perm);
> +				if (ret)
>   					goto out;
> +
>   				map_pfn = this_pfn;
>   				iova += csize;
> +				nchunks = 0;
>   			}
>   
>   			last_pfn = this_pfn;
>   		}
>   
> -		cur_base += ret << PAGE_SHIFT;
> -		npages -= ret;
> +		cur_base += pinned << PAGE_SHIFT;
> +		npages -= pinned;
>   	}
>   
>   	/* Pin the rest chunk */
> @@ -660,10 +676,22 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   			     map_pfn << PAGE_SHIFT, msg->perm);
>   out:
>   	if (ret) {
> +		if (nchunks && last_pfn) {
> +			unsigned long pfn;
> +
> +			/*
> +			 * Unpin the outstanding pages which are unmapped.
> +			 * Mapped pages are accounted in vdpa_map(), thus
> +			 * will be handled by vdpa_unmap().
> +			 */
> +			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
> +				unpin_user_page(pfn_to_page(pfn));
> +		}
>   		vhost_vdpa_unmap(v, msg->iova, msg->size);


I want to know what's wrong with current code.

We call vhost_vdpa_unmap() on error which calls vhost_vdpa_iotlb_unmap() 
that will unpin and reduce the pinned_vm.

Thanks


> -		atomic64_sub(npages, &dev->mm->pinned_vm);
>   	}
> +unlock:
>   	mmap_read_unlock(dev->mm);
> +free:
>   	free_page((unsigned long)page_list);
>   	return ret;
>   }


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-03 13:00   ` Jason Wang
@ 2020-11-04  1:06     ` si-wei liu
  2020-11-04  1:08       ` si-wei liu
  0 siblings, 1 reply; 12+ messages in thread
From: si-wei liu @ 2020-11-04  1:06 UTC (permalink / raw)
  To: Jason Wang, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 11/3/2020 5:00 AM, Jason Wang wrote:
>
> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>> Pinned pages are not properly accounted particularly when
>> mapping error occurs on IOTLB update. Clean up dangling
>> pinned pages for the error path.
>>
>> The memory usage for bookkeeping pinned pages is reverted
>> to what it was before: only one single free page is needed.
>> This helps reduce the host memory demand for VM with a large
>> amount of memory, or in the situation where host is running
>> short of free memory.
>>
>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>> ---
>>   drivers/vhost/vdpa.c | 64 
>> +++++++++++++++++++++++++++++++++++++---------------
>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>> index b6d9016..8da8558 100644
>> --- a/drivers/vhost/vdpa.c
>> +++ b/drivers/vhost/vdpa.c
>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>         if (r)
>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>> +    else
>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>         return r;
>>   }
>> @@ -591,14 +593,16 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>       unsigned int gup_flags = FOLL_LONGTERM;
>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>> -    unsigned long locked, lock_limit, pinned, i;
>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>       u64 iova = msg->iova;
>> +    long pinned;
>>       int ret = 0;
>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>                       msg->iova + msg->size - 1))
>>           return -EEXIST;
>>   +    /* Limit the use of memory for bookkeeping */
>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>       if (!page_list)
>>           return -ENOMEM;
>> @@ -607,52 +611,64 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>           gup_flags |= FOLL_WRITE;
>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>> PAGE_SHIFT;
>> -    if (!npages)
>> -        return -EINVAL;
>> +    if (!npages) {
>> +        ret = -EINVAL;
>> +        goto free;
>> +    }
>>         mmap_read_lock(dev->mm);
>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>> -
>> -    if (locked > lock_limit) {
>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>           ret = -ENOMEM;
>> -        goto out;
>> +        goto unlock;
>>       }
>>         cur_base = msg->uaddr & PAGE_MASK;
>>       iova &= PAGE_MASK;
>> +    nchunks = 0;
>>         while (npages) {
>> -        pinned = min_t(unsigned long, npages, list_size);
>> -        ret = pin_user_pages(cur_base, pinned,
>> -                     gup_flags, page_list, NULL);
>> -        if (ret != pinned)
>> +        sz2pin = min_t(unsigned long, npages, list_size);
>> +        pinned = pin_user_pages(cur_base, sz2pin,
>> +                    gup_flags, page_list, NULL);
>> +        if (sz2pin != pinned) {
>> +            if (pinned < 0) {
>> +                ret = pinned;
>> +            } else {
>> +                unpin_user_pages(page_list, pinned);
>> +                ret = -ENOMEM;
>> +            }
>>               goto out;
>> +        }
>> +        nchunks++;
>>             if (!last_pfn)
>>               map_pfn = page_to_pfn(page_list[0]);
>>   -        for (i = 0; i < ret; i++) {
>> +        for (i = 0; i < pinned; i++) {
>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>               u64 csize;
>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>                   /* Pin a contiguous chunk of memory */
>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>> -                if (vhost_vdpa_map(v, iova, csize,
>> -                           map_pfn << PAGE_SHIFT,
>> -                           msg->perm))
>> +                ret = vhost_vdpa_map(v, iova, csize,
>> +                             map_pfn << PAGE_SHIFT,
>> +                             msg->perm);
>> +                if (ret)
>>                       goto out;
>> +
>>                   map_pfn = this_pfn;
>>                   iova += csize;
>> +                nchunks = 0;
>>               }
>>                 last_pfn = this_pfn;
>>           }
>>   -        cur_base += ret << PAGE_SHIFT;
>> -        npages -= ret;
>> +        cur_base += pinned << PAGE_SHIFT;
>> +        npages -= pinned;
>>       }
>>         /* Pin the rest chunk */
>> @@ -660,10 +676,22 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>   out:
>>       if (ret) {
>> +        if (nchunks && last_pfn) {
>> +            unsigned long pfn;
>> +
>> +            /*
>> +             * Unpin the outstanding pages which are unmapped.
>> +             * Mapped pages are accounted in vdpa_map(), thus
>> +             * will be handled by vdpa_unmap().
>> +             */
>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>> +                unpin_user_page(pfn_to_page(pfn));
>> +        }
>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>
>
> I want to know what's wrong with current code.
>
> We call vhost_vdpa_unmap() on error which calls 
> vhost_vdpa_iotlb_unmap() that will unpin and reduce the pinned_vm.
Think about the case where vhost_vdpa_map() fails in the middle after 
making a few successful ones. In the current code, the 
vhost_vdpa_iotlb_unmap() unpins what had been mapped, but does not unpin 
those that have not yet been mapped. These outstanding pinned pages will 
be leaked after leaving the vhost_vdpa_map() function.

Also, the subtraction accounting at the end of the function is incorrect 
in that case: @npages is deduced by @pinned in each iteration. That's 
why I moved the accounting to vhost_vdpa_map() to be symmetric with 
vhost_vdpa_unmap().


-Siwei
>
> Thanks
>
>
>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>       }
>> +unlock:
>>       mmap_read_unlock(dev->mm);
>> +free:
>>       free_page((unsigned long)page_list);
>>       return ret;
>>   }
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-04  1:06     ` si-wei liu
@ 2020-11-04  1:08       ` si-wei liu
  2020-11-04  1:58         ` Jason Wang
  0 siblings, 1 reply; 12+ messages in thread
From: si-wei liu @ 2020-11-04  1:08 UTC (permalink / raw)
  To: Jason Wang, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 11/3/2020 5:06 PM, si-wei liu wrote:
>
> On 11/3/2020 5:00 AM, Jason Wang wrote:
>>
>> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>>> Pinned pages are not properly accounted particularly when
>>> mapping error occurs on IOTLB update. Clean up dangling
>>> pinned pages for the error path.
>>>
>>> The memory usage for bookkeeping pinned pages is reverted
>>> to what it was before: only one single free page is needed.
>>> This helps reduce the host memory demand for VM with a large
>>> amount of memory, or in the situation where host is running
>>> short of free memory.
>>>
>>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>>> ---
>>>   drivers/vhost/vdpa.c | 64 
>>> +++++++++++++++++++++++++++++++++++++---------------
>>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>>
>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>> index b6d9016..8da8558 100644
>>> --- a/drivers/vhost/vdpa.c
>>> +++ b/drivers/vhost/vdpa.c
>>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>>         if (r)
>>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>>> +    else
>>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>>         return r;
>>>   }
>>> @@ -591,14 +593,16 @@ static int 
>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>>       unsigned int gup_flags = FOLL_LONGTERM;
>>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>>> -    unsigned long locked, lock_limit, pinned, i;
>>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>>       u64 iova = msg->iova;
>>> +    long pinned;
>>>       int ret = 0;
>>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>>                       msg->iova + msg->size - 1))
>>>           return -EEXIST;
>>>   +    /* Limit the use of memory for bookkeeping */
>>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>>       if (!page_list)
>>>           return -ENOMEM;
>>> @@ -607,52 +611,64 @@ static int 
>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>           gup_flags |= FOLL_WRITE;
>>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>>> PAGE_SHIFT;
>>> -    if (!npages)
>>> -        return -EINVAL;
>>> +    if (!npages) {
>>> +        ret = -EINVAL;
>>> +        goto free;
>>> +    }
>>>         mmap_read_lock(dev->mm);
>>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>> -
>>> -    if (locked > lock_limit) {
>>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>>           ret = -ENOMEM;
>>> -        goto out;
>>> +        goto unlock;
>>>       }
>>>         cur_base = msg->uaddr & PAGE_MASK;
>>>       iova &= PAGE_MASK;
>>> +    nchunks = 0;
>>>         while (npages) {
>>> -        pinned = min_t(unsigned long, npages, list_size);
>>> -        ret = pin_user_pages(cur_base, pinned,
>>> -                     gup_flags, page_list, NULL);
>>> -        if (ret != pinned)
>>> +        sz2pin = min_t(unsigned long, npages, list_size);
>>> +        pinned = pin_user_pages(cur_base, sz2pin,
>>> +                    gup_flags, page_list, NULL);
>>> +        if (sz2pin != pinned) {
>>> +            if (pinned < 0) {
>>> +                ret = pinned;
>>> +            } else {
>>> +                unpin_user_pages(page_list, pinned);
>>> +                ret = -ENOMEM;
>>> +            }
>>>               goto out;
>>> +        }
>>> +        nchunks++;
>>>             if (!last_pfn)
>>>               map_pfn = page_to_pfn(page_list[0]);
>>>   -        for (i = 0; i < ret; i++) {
>>> +        for (i = 0; i < pinned; i++) {
>>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>>               u64 csize;
>>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>>                   /* Pin a contiguous chunk of memory */
>>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>>> -                if (vhost_vdpa_map(v, iova, csize,
>>> -                           map_pfn << PAGE_SHIFT,
>>> -                           msg->perm))
>>> +                ret = vhost_vdpa_map(v, iova, csize,
>>> +                             map_pfn << PAGE_SHIFT,
>>> +                             msg->perm);
>>> +                if (ret)
>>>                       goto out;
>>> +
>>>                   map_pfn = this_pfn;
>>>                   iova += csize;
>>> +                nchunks = 0;
>>>               }
>>>                 last_pfn = this_pfn;
>>>           }
>>>   -        cur_base += ret << PAGE_SHIFT;
>>> -        npages -= ret;
>>> +        cur_base += pinned << PAGE_SHIFT;
>>> +        npages -= pinned;
>>>       }
>>>         /* Pin the rest chunk */
>>> @@ -660,10 +676,22 @@ static int 
>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>>   out:
>>>       if (ret) {
>>> +        if (nchunks && last_pfn) {
>>> +            unsigned long pfn;
>>> +
>>> +            /*
>>> +             * Unpin the outstanding pages which are unmapped.
>>> +             * Mapped pages are accounted in vdpa_map(), thus
>>> +             * will be handled by vdpa_unmap().
>>> +             */
>>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>>> +                unpin_user_page(pfn_to_page(pfn));
>>> +        }
>>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>>
>>
>> I want to know what's wrong with current code.
>>
>> We call vhost_vdpa_unmap() on error which calls 
>> vhost_vdpa_iotlb_unmap() that will unpin and reduce the pinned_vm.
> Think about the case where vhost_vdpa_map() fails in the middle after 
> making a few successful ones. In the current code, the 
> vhost_vdpa_iotlb_unmap() unpins what had been mapped, but does not 
> unpin those that have not yet been mapped. These outstanding pinned 
> pages will be leaked after leaving the vhost_vdpa_map() function.
Typo: ... leaving the vhost_vdpa_process_iotlb_update() function.
>
> Also, the subtraction accounting at the end of the function is 
> incorrect in that case: @npages is deduced by @pinned in each 
> iteration. That's why I moved the accounting to vhost_vdpa_map() to be 
> symmetric with vhost_vdpa_unmap().
>
>
> -Siwei
>>
>> Thanks
>>
>>
>>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>>       }
>>> +unlock:
>>>       mmap_read_unlock(dev->mm);
>>> +free:
>>>       free_page((unsigned long)page_list);
>>>       return ret;
>>>   }
>>
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-04  1:08       ` si-wei liu
@ 2020-11-04  1:58         ` Jason Wang
  2020-11-04  2:14           ` si-wei liu
  0 siblings, 1 reply; 12+ messages in thread
From: Jason Wang @ 2020-11-04  1:58 UTC (permalink / raw)
  To: si-wei liu, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 2020/11/4 上午9:08, si-wei liu wrote:
>
> On 11/3/2020 5:06 PM, si-wei liu wrote:
>>
>> On 11/3/2020 5:00 AM, Jason Wang wrote:
>>>
>>> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>>>> Pinned pages are not properly accounted particularly when
>>>> mapping error occurs on IOTLB update. Clean up dangling
>>>> pinned pages for the error path.
>>>>
>>>> The memory usage for bookkeeping pinned pages is reverted
>>>> to what it was before: only one single free page is needed.
>>>> This helps reduce the host memory demand for VM with a large
>>>> amount of memory, or in the situation where host is running
>>>> short of free memory.
>>>>
>>>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>>>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>>>> ---
>>>>   drivers/vhost/vdpa.c | 64 
>>>> +++++++++++++++++++++++++++++++++++++---------------
>>>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>>> index b6d9016..8da8558 100644
>>>> --- a/drivers/vhost/vdpa.c
>>>> +++ b/drivers/vhost/vdpa.c
>>>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>>>         if (r)
>>>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>>>> +    else
>>>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>>>         return r;
>>>>   }
>>>> @@ -591,14 +593,16 @@ static int 
>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>>>       unsigned int gup_flags = FOLL_LONGTERM;
>>>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>>>> -    unsigned long locked, lock_limit, pinned, i;
>>>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>>>       u64 iova = msg->iova;
>>>> +    long pinned;
>>>>       int ret = 0;
>>>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>>>                       msg->iova + msg->size - 1))
>>>>           return -EEXIST;
>>>>   +    /* Limit the use of memory for bookkeeping */
>>>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>>>       if (!page_list)
>>>>           return -ENOMEM;
>>>> @@ -607,52 +611,64 @@ static int 
>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>           gup_flags |= FOLL_WRITE;
>>>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>>>> PAGE_SHIFT;
>>>> -    if (!npages)
>>>> -        return -EINVAL;
>>>> +    if (!npages) {
>>>> +        ret = -EINVAL;
>>>> +        goto free;
>>>> +    }
>>>>         mmap_read_lock(dev->mm);
>>>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>>> -
>>>> -    if (locked > lock_limit) {
>>>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>>>           ret = -ENOMEM;
>>>> -        goto out;
>>>> +        goto unlock;
>>>>       }
>>>>         cur_base = msg->uaddr & PAGE_MASK;
>>>>       iova &= PAGE_MASK;
>>>> +    nchunks = 0;
>>>>         while (npages) {
>>>> -        pinned = min_t(unsigned long, npages, list_size);
>>>> -        ret = pin_user_pages(cur_base, pinned,
>>>> -                     gup_flags, page_list, NULL);
>>>> -        if (ret != pinned)
>>>> +        sz2pin = min_t(unsigned long, npages, list_size);
>>>> +        pinned = pin_user_pages(cur_base, sz2pin,
>>>> +                    gup_flags, page_list, NULL);
>>>> +        if (sz2pin != pinned) {
>>>> +            if (pinned < 0) {
>>>> +                ret = pinned;
>>>> +            } else {
>>>> +                unpin_user_pages(page_list, pinned);
>>>> +                ret = -ENOMEM;
>>>> +            }
>>>>               goto out;
>>>> +        }
>>>> +        nchunks++;
>>>>             if (!last_pfn)
>>>>               map_pfn = page_to_pfn(page_list[0]);
>>>>   -        for (i = 0; i < ret; i++) {
>>>> +        for (i = 0; i < pinned; i++) {
>>>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>>>               u64 csize;
>>>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>>>                   /* Pin a contiguous chunk of memory */
>>>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>>>> -                if (vhost_vdpa_map(v, iova, csize,
>>>> -                           map_pfn << PAGE_SHIFT,
>>>> -                           msg->perm))
>>>> +                ret = vhost_vdpa_map(v, iova, csize,
>>>> +                             map_pfn << PAGE_SHIFT,
>>>> +                             msg->perm);
>>>> +                if (ret)
>>>>                       goto out;
>>>> +
>>>>                   map_pfn = this_pfn;
>>>>                   iova += csize;
>>>> +                nchunks = 0;
>>>>               }
>>>>                 last_pfn = this_pfn;
>>>>           }
>>>>   -        cur_base += ret << PAGE_SHIFT;
>>>> -        npages -= ret;
>>>> +        cur_base += pinned << PAGE_SHIFT;
>>>> +        npages -= pinned;
>>>>       }
>>>>         /* Pin the rest chunk */
>>>> @@ -660,10 +676,22 @@ static int 
>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>>>   out:
>>>>       if (ret) {
>>>> +        if (nchunks && last_pfn) {
>>>> +            unsigned long pfn;
>>>> +
>>>> +            /*
>>>> +             * Unpin the outstanding pages which are unmapped.
>>>> +             * Mapped pages are accounted in vdpa_map(), thus
>>>> +             * will be handled by vdpa_unmap().
>>>> +             */
>>>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>>>> +                unpin_user_page(pfn_to_page(pfn));
>>>> +        }
>>>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>>>
>>>
>>> I want to know what's wrong with current code.
>>>
>>> We call vhost_vdpa_unmap() on error which calls 
>>> vhost_vdpa_iotlb_unmap() that will unpin and reduce the pinned_vm.
>> Think about the case where vhost_vdpa_map() fails in the middle after 
>> making a few successful ones. In the current code, the 
>> vhost_vdpa_iotlb_unmap() unpins what had been mapped, but does not 
>> unpin those that have not yet been mapped. These outstanding pinned 
>> pages will be leaked after leaving the vhost_vdpa_map() function.
> Typo: ... leaving the vhost_vdpa_process_iotlb_update() function.
>>
>> Also, the subtraction accounting at the end of the function is 
>> incorrect in that case: @npages is deduced by @pinned in each 
>> iteration. That's why I moved the accounting to vhost_vdpa_map() to 
>> be symmetric with vhost_vdpa_unmap().
>>

I see, then I wonder if it would have more easy to read code if we do 
(un)pinning/accouting only in vhost_vdpa_map()/vhost_vdpa_unmap()?

Thanks


>>
>> -Siwei
>>>
>>> Thanks
>>>
>>>
>>>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>>>       }
>>>> +unlock:
>>>>       mmap_read_unlock(dev->mm);
>>>> +free:
>>>>       free_page((unsigned long)page_list);
>>>>       return ret;
>>>>   }
>>>
>>
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-04  1:58         ` Jason Wang
@ 2020-11-04  2:14           ` si-wei liu
  0 siblings, 0 replies; 12+ messages in thread
From: si-wei liu @ 2020-11-04  2:14 UTC (permalink / raw)
  To: Jason Wang, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 11/3/2020 5:58 PM, Jason Wang wrote:
>
> On 2020/11/4 上午9:08, si-wei liu wrote:
>>
>> On 11/3/2020 5:06 PM, si-wei liu wrote:
>>>
>>> On 11/3/2020 5:00 AM, Jason Wang wrote:
>>>>
>>>> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>>>>> Pinned pages are not properly accounted particularly when
>>>>> mapping error occurs on IOTLB update. Clean up dangling
>>>>> pinned pages for the error path.
>>>>>
>>>>> The memory usage for bookkeeping pinned pages is reverted
>>>>> to what it was before: only one single free page is needed.
>>>>> This helps reduce the host memory demand for VM with a large
>>>>> amount of memory, or in the situation where host is running
>>>>> short of free memory.
>>>>>
>>>>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>>>>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>>>>> ---
>>>>>   drivers/vhost/vdpa.c | 64 
>>>>> +++++++++++++++++++++++++++++++++++++---------------
>>>>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>>>>
>>>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>>>> index b6d9016..8da8558 100644
>>>>> --- a/drivers/vhost/vdpa.c
>>>>> +++ b/drivers/vhost/vdpa.c
>>>>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>>>>         if (r)
>>>>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>>>>> +    else
>>>>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>>>>         return r;
>>>>>   }
>>>>> @@ -591,14 +593,16 @@ static int 
>>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>>>>       unsigned int gup_flags = FOLL_LONGTERM;
>>>>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>>>>> -    unsigned long locked, lock_limit, pinned, i;
>>>>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>>>>       u64 iova = msg->iova;
>>>>> +    long pinned;
>>>>>       int ret = 0;
>>>>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>>>>                       msg->iova + msg->size - 1))
>>>>>           return -EEXIST;
>>>>>   +    /* Limit the use of memory for bookkeeping */
>>>>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>>>>       if (!page_list)
>>>>>           return -ENOMEM;
>>>>> @@ -607,52 +611,64 @@ static int 
>>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>>           gup_flags |= FOLL_WRITE;
>>>>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>>>>> PAGE_SHIFT;
>>>>> -    if (!npages)
>>>>> -        return -EINVAL;
>>>>> +    if (!npages) {
>>>>> +        ret = -EINVAL;
>>>>> +        goto free;
>>>>> +    }
>>>>>         mmap_read_lock(dev->mm);
>>>>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>>>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>>>> -
>>>>> -    if (locked > lock_limit) {
>>>>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>>>>           ret = -ENOMEM;
>>>>> -        goto out;
>>>>> +        goto unlock;
>>>>>       }
>>>>>         cur_base = msg->uaddr & PAGE_MASK;
>>>>>       iova &= PAGE_MASK;
>>>>> +    nchunks = 0;
>>>>>         while (npages) {
>>>>> -        pinned = min_t(unsigned long, npages, list_size);
>>>>> -        ret = pin_user_pages(cur_base, pinned,
>>>>> -                     gup_flags, page_list, NULL);
>>>>> -        if (ret != pinned)
>>>>> +        sz2pin = min_t(unsigned long, npages, list_size);
>>>>> +        pinned = pin_user_pages(cur_base, sz2pin,
>>>>> +                    gup_flags, page_list, NULL);
>>>>> +        if (sz2pin != pinned) {
>>>>> +            if (pinned < 0) {
>>>>> +                ret = pinned;
>>>>> +            } else {
>>>>> +                unpin_user_pages(page_list, pinned);
>>>>> +                ret = -ENOMEM;
>>>>> +            }
>>>>>               goto out;
>>>>> +        }
>>>>> +        nchunks++;
>>>>>             if (!last_pfn)
>>>>>               map_pfn = page_to_pfn(page_list[0]);
>>>>>   -        for (i = 0; i < ret; i++) {
>>>>> +        for (i = 0; i < pinned; i++) {
>>>>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>>>>               u64 csize;
>>>>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>>>>                   /* Pin a contiguous chunk of memory */
>>>>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>>>>> -                if (vhost_vdpa_map(v, iova, csize,
>>>>> -                           map_pfn << PAGE_SHIFT,
>>>>> -                           msg->perm))
>>>>> +                ret = vhost_vdpa_map(v, iova, csize,
>>>>> +                             map_pfn << PAGE_SHIFT,
>>>>> +                             msg->perm);
>>>>> +                if (ret)
>>>>>                       goto out;
>>>>> +
>>>>>                   map_pfn = this_pfn;
>>>>>                   iova += csize;
>>>>> +                nchunks = 0;
>>>>>               }
>>>>>                 last_pfn = this_pfn;
>>>>>           }
>>>>>   -        cur_base += ret << PAGE_SHIFT;
>>>>> -        npages -= ret;
>>>>> +        cur_base += pinned << PAGE_SHIFT;
>>>>> +        npages -= pinned;
>>>>>       }
>>>>>         /* Pin the rest chunk */
>>>>> @@ -660,10 +676,22 @@ static int 
>>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>>>>   out:
>>>>>       if (ret) {
>>>>> +        if (nchunks && last_pfn) {
>>>>> +            unsigned long pfn;
>>>>> +
>>>>> +            /*
>>>>> +             * Unpin the outstanding pages which are unmapped.
>>>>> +             * Mapped pages are accounted in vdpa_map(), thus
>>>>> +             * will be handled by vdpa_unmap().
>>>>> +             */
>>>>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>>>>> +                unpin_user_page(pfn_to_page(pfn));
>>>>> +        }
>>>>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>>>>
>>>>
>>>> I want to know what's wrong with current code.
>>>>
>>>> We call vhost_vdpa_unmap() on error which calls 
>>>> vhost_vdpa_iotlb_unmap() that will unpin and reduce the pinned_vm.
>>> Think about the case where vhost_vdpa_map() fails in the middle 
>>> after making a few successful ones. In the current code, the 
>>> vhost_vdpa_iotlb_unmap() unpins what had been mapped, but does not 
>>> unpin those that have not yet been mapped. These outstanding pinned 
>>> pages will be leaked after leaving the vhost_vdpa_map() function.
>> Typo: ... leaving the vhost_vdpa_process_iotlb_update() function.
>>>
>>> Also, the subtraction accounting at the end of the function is 
>>> incorrect in that case: @npages is deduced by @pinned in each 
>>> iteration. That's why I moved the accounting to vhost_vdpa_map() to 
>>> be symmetric with vhost_vdpa_unmap().
>>>
>
> I see, then I wonder if it would have more easy to read code if we do 
> (un)pinning/accouting only in vhost_vdpa_map()/vhost_vdpa_unmap()?

Yes. That's what I've done in my new code. Though, the caller still has 
to unpin the outstanding pages that aren't accounted for in 
vhost_vdpa_map().

-Siwei


>
> Thanks
>
>
>>>
>>> -Siwei
>>>>
>>>> Thanks
>>>>
>>>>
>>>>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>>>>       }
>>>>> +unlock:
>>>>>       mmap_read_unlock(dev->mm);
>>>>> +free:
>>>>>       free_page((unsigned long)page_list);
>>>>>       return ret;
>>>>>   }
>>>>
>>>
>>
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-10-30  7:45 ` [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework) Si-Wei Liu
  2020-11-03 13:00   ` Jason Wang
@ 2020-11-04  2:42   ` Jason Wang
  2020-11-04 23:40     ` si-wei liu
  1 sibling, 1 reply; 12+ messages in thread
From: Jason Wang @ 2020-11-04  2:42 UTC (permalink / raw)
  To: Si-Wei Liu, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 2020/10/30 下午3:45, Si-Wei Liu wrote:
> Pinned pages are not properly accounted particularly when
> mapping error occurs on IOTLB update. Clean up dangling
> pinned pages for the error path.
>
> The memory usage for bookkeeping pinned pages is reverted
> to what it was before: only one single free page is needed.
> This helps reduce the host memory demand for VM with a large
> amount of memory, or in the situation where host is running
> short of free memory.
>
> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>   drivers/vhost/vdpa.c | 64 +++++++++++++++++++++++++++++++++++++---------------
>   1 file changed, 46 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index b6d9016..8da8558 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>   
>   	if (r)
>   		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
> +	else
> +		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>   
>   	return r;
>   }
> @@ -591,14 +593,16 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>   	unsigned int gup_flags = FOLL_LONGTERM;
>   	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
> -	unsigned long locked, lock_limit, pinned, i;
> +	unsigned long lock_limit, sz2pin, nchunks, i;
>   	u64 iova = msg->iova;
> +	long pinned;
>   	int ret = 0;
>   
>   	if (vhost_iotlb_itree_first(iotlb, msg->iova,
>   				    msg->iova + msg->size - 1))
>   		return -EEXIST;
>   
> +	/* Limit the use of memory for bookkeeping */
>   	page_list = (struct page **) __get_free_page(GFP_KERNEL);
>   	if (!page_list)
>   		return -ENOMEM;
> @@ -607,52 +611,64 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   		gup_flags |= FOLL_WRITE;
>   
>   	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
> -	if (!npages)
> -		return -EINVAL;
> +	if (!npages) {
> +		ret = -EINVAL;
> +		goto free;
> +	}
>   
>   	mmap_read_lock(dev->mm);
>   
> -	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>   	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> -
> -	if (locked > lock_limit) {
> +	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>   		ret = -ENOMEM;
> -		goto out;
> +		goto unlock;
>   	}
>   
>   	cur_base = msg->uaddr & PAGE_MASK;
>   	iova &= PAGE_MASK;
> +	nchunks = 0;
>   
>   	while (npages) {
> -		pinned = min_t(unsigned long, npages, list_size);
> -		ret = pin_user_pages(cur_base, pinned,
> -				     gup_flags, page_list, NULL);
> -		if (ret != pinned)
> +		sz2pin = min_t(unsigned long, npages, list_size);
> +		pinned = pin_user_pages(cur_base, sz2pin,
> +					gup_flags, page_list, NULL);
> +		if (sz2pin != pinned) {
> +			if (pinned < 0) {
> +				ret = pinned;
> +			} else {
> +				unpin_user_pages(page_list, pinned);
> +				ret = -ENOMEM;
> +			}
>   			goto out;
> +		}
> +		nchunks++;
>   
>   		if (!last_pfn)
>   			map_pfn = page_to_pfn(page_list[0]);
>   
> -		for (i = 0; i < ret; i++) {
> +		for (i = 0; i < pinned; i++) {
>   			unsigned long this_pfn = page_to_pfn(page_list[i]);
>   			u64 csize;
>   
>   			if (last_pfn && (this_pfn != last_pfn + 1)) {
>   				/* Pin a contiguous chunk of memory */
>   				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
> -				if (vhost_vdpa_map(v, iova, csize,
> -						   map_pfn << PAGE_SHIFT,
> -						   msg->perm))
> +				ret = vhost_vdpa_map(v, iova, csize,
> +						     map_pfn << PAGE_SHIFT,
> +						     msg->perm);
> +				if (ret)
>   					goto out;
> +
>   				map_pfn = this_pfn;
>   				iova += csize;
> +				nchunks = 0;
>   			}
>   
>   			last_pfn = this_pfn;
>   		}
>   
> -		cur_base += ret << PAGE_SHIFT;
> -		npages -= ret;
> +		cur_base += pinned << PAGE_SHIFT;
> +		npages -= pinned;
>   	}
>   
>   	/* Pin the rest chunk */
> @@ -660,10 +676,22 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>   			     map_pfn << PAGE_SHIFT, msg->perm);
>   out:
>   	if (ret) {
> +		if (nchunks && last_pfn) {


Can we decrease npages where you did "nchunks++" then we can check 
npages here instead?

Thanks


> +			unsigned long pfn;
> +
> +			/*
> +			 * Unpin the outstanding pages which are unmapped.
> +			 * Mapped pages are accounted in vdpa_map(), thus
> +			 * will be handled by vdpa_unmap().
> +			 */
> +			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
> +				unpin_user_page(pfn_to_page(pfn));
> +		}
>   		vhost_vdpa_unmap(v, msg->iova, msg->size);
> -		atomic64_sub(npages, &dev->mm->pinned_vm);
>   	}
> +unlock:
>   	mmap_read_unlock(dev->mm);
> +free:
>   	free_page((unsigned long)page_list);
>   	return ret;
>   }


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-04  2:42   ` Jason Wang
@ 2020-11-04 23:40     ` si-wei liu
  2020-11-05  3:12       ` Jason Wang
  0 siblings, 1 reply; 12+ messages in thread
From: si-wei liu @ 2020-11-04 23:40 UTC (permalink / raw)
  To: Jason Wang, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 11/3/2020 6:42 PM, Jason Wang wrote:
>
> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>> Pinned pages are not properly accounted particularly when
>> mapping error occurs on IOTLB update. Clean up dangling
>> pinned pages for the error path.
>>
>> The memory usage for bookkeeping pinned pages is reverted
>> to what it was before: only one single free page is needed.
>> This helps reduce the host memory demand for VM with a large
>> amount of memory, or in the situation where host is running
>> short of free memory.
>>
>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>> ---
>>   drivers/vhost/vdpa.c | 64 
>> +++++++++++++++++++++++++++++++++++++---------------
>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>> index b6d9016..8da8558 100644
>> --- a/drivers/vhost/vdpa.c
>> +++ b/drivers/vhost/vdpa.c
>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>         if (r)
>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>> +    else
>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>         return r;
>>   }
>> @@ -591,14 +593,16 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>       unsigned int gup_flags = FOLL_LONGTERM;
>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>> -    unsigned long locked, lock_limit, pinned, i;
>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>       u64 iova = msg->iova;
>> +    long pinned;
>>       int ret = 0;
>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>                       msg->iova + msg->size - 1))
>>           return -EEXIST;
>>   +    /* Limit the use of memory for bookkeeping */
>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>       if (!page_list)
>>           return -ENOMEM;
>> @@ -607,52 +611,64 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>           gup_flags |= FOLL_WRITE;
>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>> PAGE_SHIFT;
>> -    if (!npages)
>> -        return -EINVAL;
>> +    if (!npages) {
>> +        ret = -EINVAL;
>> +        goto free;
>> +    }
>>         mmap_read_lock(dev->mm);
>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>> -
>> -    if (locked > lock_limit) {
>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>           ret = -ENOMEM;
>> -        goto out;
>> +        goto unlock;
>>       }
>>         cur_base = msg->uaddr & PAGE_MASK;
>>       iova &= PAGE_MASK;
>> +    nchunks = 0;
>>         while (npages) {
>> -        pinned = min_t(unsigned long, npages, list_size);
>> -        ret = pin_user_pages(cur_base, pinned,
>> -                     gup_flags, page_list, NULL);
>> -        if (ret != pinned)
>> +        sz2pin = min_t(unsigned long, npages, list_size);
>> +        pinned = pin_user_pages(cur_base, sz2pin,
>> +                    gup_flags, page_list, NULL);
>> +        if (sz2pin != pinned) {
>> +            if (pinned < 0) {
>> +                ret = pinned;
>> +            } else {
>> +                unpin_user_pages(page_list, pinned);
>> +                ret = -ENOMEM;
>> +            }
>>               goto out;
>> +        }
>> +        nchunks++;
>>             if (!last_pfn)
>>               map_pfn = page_to_pfn(page_list[0]);
>>   -        for (i = 0; i < ret; i++) {
>> +        for (i = 0; i < pinned; i++) {
>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>               u64 csize;
>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>                   /* Pin a contiguous chunk of memory */
>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>> -                if (vhost_vdpa_map(v, iova, csize,
>> -                           map_pfn << PAGE_SHIFT,
>> -                           msg->perm))
>> +                ret = vhost_vdpa_map(v, iova, csize,
>> +                             map_pfn << PAGE_SHIFT,
>> +                             msg->perm);
>> +                if (ret)
>>                       goto out;
>> +
>>                   map_pfn = this_pfn;
>>                   iova += csize;
>> +                nchunks = 0;
>>               }
>>                 last_pfn = this_pfn;
>>           }
>>   -        cur_base += ret << PAGE_SHIFT;
>> -        npages -= ret;
>> +        cur_base += pinned << PAGE_SHIFT;
>> +        npages -= pinned;
>>       }
>>         /* Pin the rest chunk */
>> @@ -660,10 +676,22 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>   out:
>>       if (ret) {
>> +        if (nchunks && last_pfn) {
>
>
> Can we decrease npages where you did "nchunks++" then we can check 
> npages here instead?
Hmmm, I am not sure I get what you want... @nchunks gets reset to 0 
whenever a certain range of pinned pages is successfully mapped. The 
conditional (when nchunks is non-zero) here indicates if there's any 
_outstanding_ pinned page that has to clean up in the error handling 
path. While the decrement of @npages may not occur when resetting the 
@nchunks counter, rendering incorrect cleanup in the error path.

BTW while reviewing it I got noticed of an error in my code. There might 
be still page pinning leak from wherever the vhost_vdpa_map() error 
occurs towards the end of page_list. I will post a v2 to fix this.

Regards,
-Siwei

--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -656,8 +656,19 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
                                 ret = vhost_vdpa_map(v, iova, csize,
                                                      map_pfn << PAGE_SHIFT,
                                                      msg->perm);
-                               if (ret)
+                               if (ret) {
+                                       /*
+                                        * Unpin the pages that are left 
unmapped
+                                        * from this point on in the current
+                                        * page_list. The remaining 
outstanding
+                                        * ones which may stride across 
several
+                                        * chunks will be covered in the 
common
+                                        * error path subsequently.
+                                        */
+ unpin_user_pages(&page_list[i],
+                                                        pinned - i);
                                         goto out;
+                               }

                                 map_pfn = this_pfn;
                                 iova += csize;



>
> Thanks
>
>
>> +            unsigned long pfn;
>> +
>> +            /*
>> +             * Unpin the outstanding pages which are unmapped.
>> +             * Mapped pages are accounted in vdpa_map(), thus
>> +             * will be handled by vdpa_unmap().
>> +             */
>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>> +                unpin_user_page(pfn_to_page(pfn));
>> +        }
>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>       }
>> +unlock:
>>       mmap_read_unlock(dev->mm);
>> +free:
>>       free_page((unsigned long)page_list);
>>       return ret;
>>   }
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-04 23:40     ` si-wei liu
@ 2020-11-05  3:12       ` Jason Wang
  2020-11-05 22:40         ` si-wei liu
  0 siblings, 1 reply; 12+ messages in thread
From: Jason Wang @ 2020-11-05  3:12 UTC (permalink / raw)
  To: si-wei liu, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 2020/11/5 上午7:40, si-wei liu wrote:
>
> On 11/3/2020 6:42 PM, Jason Wang wrote:
>>
>> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>>> Pinned pages are not properly accounted particularly when
>>> mapping error occurs on IOTLB update. Clean up dangling
>>> pinned pages for the error path.
>>>
>>> The memory usage for bookkeeping pinned pages is reverted
>>> to what it was before: only one single free page is needed.
>>> This helps reduce the host memory demand for VM with a large
>>> amount of memory, or in the situation where host is running
>>> short of free memory.
>>>
>>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>>> ---
>>>   drivers/vhost/vdpa.c | 64 
>>> +++++++++++++++++++++++++++++++++++++---------------
>>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>>
>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>> index b6d9016..8da8558 100644
>>> --- a/drivers/vhost/vdpa.c
>>> +++ b/drivers/vhost/vdpa.c
>>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>>         if (r)
>>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>>> +    else
>>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>>         return r;
>>>   }
>>> @@ -591,14 +593,16 @@ static int 
>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>>       unsigned int gup_flags = FOLL_LONGTERM;
>>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>>> -    unsigned long locked, lock_limit, pinned, i;
>>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>>       u64 iova = msg->iova;
>>> +    long pinned;
>>>       int ret = 0;
>>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>>                       msg->iova + msg->size - 1))
>>>           return -EEXIST;
>>>   +    /* Limit the use of memory for bookkeeping */
>>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>>       if (!page_list)
>>>           return -ENOMEM;
>>> @@ -607,52 +611,64 @@ static int 
>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>           gup_flags |= FOLL_WRITE;
>>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>>> PAGE_SHIFT;
>>> -    if (!npages)
>>> -        return -EINVAL;
>>> +    if (!npages) {
>>> +        ret = -EINVAL;
>>> +        goto free;
>>> +    }
>>>         mmap_read_lock(dev->mm);
>>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>> -
>>> -    if (locked > lock_limit) {
>>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>>           ret = -ENOMEM;
>>> -        goto out;
>>> +        goto unlock;
>>>       }
>>>         cur_base = msg->uaddr & PAGE_MASK;
>>>       iova &= PAGE_MASK;
>>> +    nchunks = 0;
>>>         while (npages) {
>>> -        pinned = min_t(unsigned long, npages, list_size);
>>> -        ret = pin_user_pages(cur_base, pinned,
>>> -                     gup_flags, page_list, NULL);
>>> -        if (ret != pinned)
>>> +        sz2pin = min_t(unsigned long, npages, list_size);
>>> +        pinned = pin_user_pages(cur_base, sz2pin,
>>> +                    gup_flags, page_list, NULL);
>>> +        if (sz2pin != pinned) {
>>> +            if (pinned < 0) {
>>> +                ret = pinned;
>>> +            } else {
>>> +                unpin_user_pages(page_list, pinned);
>>> +                ret = -ENOMEM;
>>> +            }
>>>               goto out;
>>> +        }
>>> +        nchunks++;
>>>             if (!last_pfn)
>>>               map_pfn = page_to_pfn(page_list[0]);
>>>   -        for (i = 0; i < ret; i++) {
>>> +        for (i = 0; i < pinned; i++) {
>>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>>               u64 csize;
>>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>>                   /* Pin a contiguous chunk of memory */
>>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>>> -                if (vhost_vdpa_map(v, iova, csize,
>>> -                           map_pfn << PAGE_SHIFT,
>>> -                           msg->perm))
>>> +                ret = vhost_vdpa_map(v, iova, csize,
>>> +                             map_pfn << PAGE_SHIFT,
>>> +                             msg->perm);
>>> +                if (ret)
>>>                       goto out;
>>> +
>>>                   map_pfn = this_pfn;
>>>                   iova += csize;
>>> +                nchunks = 0;
>>>               }
>>>                 last_pfn = this_pfn;
>>>           }
>>>   -        cur_base += ret << PAGE_SHIFT;
>>> -        npages -= ret;
>>> +        cur_base += pinned << PAGE_SHIFT;
>>> +        npages -= pinned;
>>>       }
>>>         /* Pin the rest chunk */
>>> @@ -660,10 +676,22 @@ static int 
>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>>   out:
>>>       if (ret) {
>>> +        if (nchunks && last_pfn) {
>>
>>
>> Can we decrease npages where you did "nchunks++" then we can check 
>> npages here instead?
> Hmmm, I am not sure I get what you want... @nchunks gets reset to 0 
> whenever a certain range of pinned pages is successfully mapped. The 
> conditional (when nchunks is non-zero) here indicates if there's any 
> _outstanding_ pinned page that has to clean up in the error handling 
> path. While the decrement of @npages may not occur when resetting the 
> @nchunks counter, rendering incorrect cleanup in the error path.


Yes, I meant e can decrease npages where you did "nchunks++". Anyhow, 
it's just a optimization to avoid a local variable which is not a must.


>
> BTW while reviewing it I got noticed of an error in my code. There 
> might be still page pinning leak from wherever the vhost_vdpa_map() 
> error occurs towards the end of page_list. I will post a v2 to fix this.
>

Sure, will review.

Thanks


> Regards,
> -Siwei
>
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -656,8 +656,19 @@ static int vhost_vdpa_process_iotlb_update(struct 
> vhost_vdpa *v,
>                                 ret = vhost_vdpa_map(v, iova, csize,
>                                                      map_pfn << 
> PAGE_SHIFT,
> msg->perm);
> -                               if (ret)
> +                               if (ret) {
> +                                       /*
> +                                        * Unpin the pages that are 
> left unmapped
> +                                        * from this point on in the 
> current
> +                                        * page_list. The remaining 
> outstanding
> +                                        * ones which may stride 
> across several
> +                                        * chunks will be covered in 
> the common
> +                                        * error path subsequently.
> +                                        */
> + unpin_user_pages(&page_list[i],
> +                                                        pinned - i);
>                                         goto out;
> +                               }
>
>                                 map_pfn = this_pfn;
>                                 iova += csize;
>
>
>
>>
>> Thanks
>>
>>
>>> +            unsigned long pfn;
>>> +
>>> +            /*
>>> +             * Unpin the outstanding pages which are unmapped.
>>> +             * Mapped pages are accounted in vdpa_map(), thus
>>> +             * will be handled by vdpa_unmap().
>>> +             */
>>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>>> +                unpin_user_page(pfn_to_page(pfn));
>>> +        }
>>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>>       }
>>> +unlock:
>>>       mmap_read_unlock(dev->mm);
>>> +free:
>>>       free_page((unsigned long)page_list);
>>>       return ret;
>>>   }
>>
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework)
  2020-11-05  3:12       ` Jason Wang
@ 2020-11-05 22:40         ` si-wei liu
  0 siblings, 0 replies; 12+ messages in thread
From: si-wei liu @ 2020-11-05 22:40 UTC (permalink / raw)
  To: Jason Wang, mst, lingshan.zhu
  Cc: joao.m.martins, boris.ostrovsky, linux-kernel, virtualization


On 11/4/2020 7:12 PM, Jason Wang wrote:
>
> On 2020/11/5 上午7:40, si-wei liu wrote:
>>
>> On 11/3/2020 6:42 PM, Jason Wang wrote:
>>>
>>> On 2020/10/30 下午3:45, Si-Wei Liu wrote:
>>>> Pinned pages are not properly accounted particularly when
>>>> mapping error occurs on IOTLB update. Clean up dangling
>>>> pinned pages for the error path.
>>>>
>>>> The memory usage for bookkeeping pinned pages is reverted
>>>> to what it was before: only one single free page is needed.
>>>> This helps reduce the host memory demand for VM with a large
>>>> amount of memory, or in the situation where host is running
>>>> short of free memory.
>>>>
>>>> Fixes: 4c8cf31885f6 ("vhost: introduce vDPA-based backend")
>>>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>>>> ---
>>>>   drivers/vhost/vdpa.c | 64 
>>>> +++++++++++++++++++++++++++++++++++++---------------
>>>>   1 file changed, 46 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>>> index b6d9016..8da8558 100644
>>>> --- a/drivers/vhost/vdpa.c
>>>> +++ b/drivers/vhost/vdpa.c
>>>> @@ -560,6 +560,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>>>         if (r)
>>>>           vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
>>>> +    else
>>>> +        atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
>>>>         return r;
>>>>   }
>>>> @@ -591,14 +593,16 @@ static int 
>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>       unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>>>       unsigned int gup_flags = FOLL_LONGTERM;
>>>>       unsigned long npages, cur_base, map_pfn, last_pfn = 0;
>>>> -    unsigned long locked, lock_limit, pinned, i;
>>>> +    unsigned long lock_limit, sz2pin, nchunks, i;
>>>>       u64 iova = msg->iova;
>>>> +    long pinned;
>>>>       int ret = 0;
>>>>         if (vhost_iotlb_itree_first(iotlb, msg->iova,
>>>>                       msg->iova + msg->size - 1))
>>>>           return -EEXIST;
>>>>   +    /* Limit the use of memory for bookkeeping */
>>>>       page_list = (struct page **) __get_free_page(GFP_KERNEL);
>>>>       if (!page_list)
>>>>           return -ENOMEM;
>>>> @@ -607,52 +611,64 @@ static int 
>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>           gup_flags |= FOLL_WRITE;
>>>>         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> 
>>>> PAGE_SHIFT;
>>>> -    if (!npages)
>>>> -        return -EINVAL;
>>>> +    if (!npages) {
>>>> +        ret = -EINVAL;
>>>> +        goto free;
>>>> +    }
>>>>         mmap_read_lock(dev->mm);
>>>>   -    locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
>>>>       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>>> -
>>>> -    if (locked > lock_limit) {
>>>> +    if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
>>>>           ret = -ENOMEM;
>>>> -        goto out;
>>>> +        goto unlock;
>>>>       }
>>>>         cur_base = msg->uaddr & PAGE_MASK;
>>>>       iova &= PAGE_MASK;
>>>> +    nchunks = 0;
>>>>         while (npages) {
>>>> -        pinned = min_t(unsigned long, npages, list_size);
>>>> -        ret = pin_user_pages(cur_base, pinned,
>>>> -                     gup_flags, page_list, NULL);
>>>> -        if (ret != pinned)
>>>> +        sz2pin = min_t(unsigned long, npages, list_size);
>>>> +        pinned = pin_user_pages(cur_base, sz2pin,
>>>> +                    gup_flags, page_list, NULL);
>>>> +        if (sz2pin != pinned) {
>>>> +            if (pinned < 0) {
>>>> +                ret = pinned;
>>>> +            } else {
>>>> +                unpin_user_pages(page_list, pinned);
>>>> +                ret = -ENOMEM;
>>>> +            }
>>>>               goto out;
>>>> +        }
>>>> +        nchunks++;
>>>>             if (!last_pfn)
>>>>               map_pfn = page_to_pfn(page_list[0]);
>>>>   -        for (i = 0; i < ret; i++) {
>>>> +        for (i = 0; i < pinned; i++) {
>>>>               unsigned long this_pfn = page_to_pfn(page_list[i]);
>>>>               u64 csize;
>>>>                 if (last_pfn && (this_pfn != last_pfn + 1)) {
>>>>                   /* Pin a contiguous chunk of memory */
>>>>                   csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>>>> -                if (vhost_vdpa_map(v, iova, csize,
>>>> -                           map_pfn << PAGE_SHIFT,
>>>> -                           msg->perm))
>>>> +                ret = vhost_vdpa_map(v, iova, csize,
>>>> +                             map_pfn << PAGE_SHIFT,
>>>> +                             msg->perm);
>>>> +                if (ret)
>>>>                       goto out;
>>>> +
>>>>                   map_pfn = this_pfn;
>>>>                   iova += csize;
>>>> +                nchunks = 0;
>>>>               }
>>>>                 last_pfn = this_pfn;
>>>>           }
>>>>   -        cur_base += ret << PAGE_SHIFT;
>>>> -        npages -= ret;
>>>> +        cur_base += pinned << PAGE_SHIFT;
>>>> +        npages -= pinned;
>>>>       }
>>>>         /* Pin the rest chunk */
>>>> @@ -660,10 +676,22 @@ static int 
>>>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>>>                    map_pfn << PAGE_SHIFT, msg->perm);
>>>>   out:
>>>>       if (ret) {
>>>> +        if (nchunks && last_pfn) {
>>>
>>>
>>> Can we decrease npages where you did "nchunks++" then we can check 
>>> npages here instead?
>> Hmmm, I am not sure I get what you want... @nchunks gets reset to 0 
>> whenever a certain range of pinned pages is successfully mapped. The 
>> conditional (when nchunks is non-zero) here indicates if there's any 
>> _outstanding_ pinned page that has to clean up in the error handling 
>> path. While the decrement of @npages may not occur when resetting the 
>> @nchunks counter, rendering incorrect cleanup in the error path.
>
>
> Yes, I meant e can decrease npages where you did "nchunks++". Anyhow, 
> it's just a optimization to avoid a local variable which is not a must.

To me that opportunity doesn't exist. @nchunks and @npages track 
different kind of things. @npages is not interchangeable to represent 
all possible error cases.

-Siwei

>
>
>>
>> BTW while reviewing it I got noticed of an error in my code. There 
>> might be still page pinning leak from wherever the vhost_vdpa_map() 
>> error occurs towards the end of page_list. I will post a v2 to fix this.
>>
>
> Sure, will review.
>
> Thanks
>
>
>> Regards,
>> -Siwei
>>
>> --- a/drivers/vhost/vdpa.c
>> +++ b/drivers/vhost/vdpa.c
>> @@ -656,8 +656,19 @@ static int 
>> vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>                                 ret = vhost_vdpa_map(v, iova, csize,
>>                                                      map_pfn << 
>> PAGE_SHIFT,
>> msg->perm);
>> -                               if (ret)
>> +                               if (ret) {
>> +                                       /*
>> +                                        * Unpin the pages that are 
>> left unmapped
>> +                                        * from this point on in the 
>> current
>> +                                        * page_list. The remaining 
>> outstanding
>> +                                        * ones which may stride 
>> across several
>> +                                        * chunks will be covered in 
>> the common
>> +                                        * error path subsequently.
>> +                                        */
>> + unpin_user_pages(&page_list[i],
>> +                                                        pinned - i);
>>                                         goto out;
>> +                               }
>>
>>                                 map_pfn = this_pfn;
>>                                 iova += csize;
>>
>>
>>
>>>
>>> Thanks
>>>
>>>
>>>> +            unsigned long pfn;
>>>> +
>>>> +            /*
>>>> +             * Unpin the outstanding pages which are unmapped.
>>>> +             * Mapped pages are accounted in vdpa_map(), thus
>>>> +             * will be handled by vdpa_unmap().
>>>> +             */
>>>> +            for (pfn = map_pfn; pfn <= last_pfn; pfn++)
>>>> +                unpin_user_page(pfn_to_page(pfn));
>>>> +        }
>>>>           vhost_vdpa_unmap(v, msg->iova, msg->size);
>>>> -        atomic64_sub(npages, &dev->mm->pinned_vm);
>>>>       }
>>>> +unlock:
>>>>       mmap_read_unlock(dev->mm);
>>>> +free:
>>>>       free_page((unsigned long)page_list);
>>>>       return ret;
>>>>   }
>>>
>>
>


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-11-05 22:40 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-30  7:45 [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path" Si-Wei Liu
2020-10-30  7:45 ` [PATCH 2/2] vhost-vdpa: fix page pinning leakage in error path (rework) Si-Wei Liu
2020-11-03 13:00   ` Jason Wang
2020-11-04  1:06     ` si-wei liu
2020-11-04  1:08       ` si-wei liu
2020-11-04  1:58         ` Jason Wang
2020-11-04  2:14           ` si-wei liu
2020-11-04  2:42   ` Jason Wang
2020-11-04 23:40     ` si-wei liu
2020-11-05  3:12       ` Jason Wang
2020-11-05 22:40         ` si-wei liu
2020-11-03  8:53 ` [PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path" Jason Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).