* [PATCH] vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
@ 2021-08-02 1:37 ` Cai Huoqing
0 siblings, 0 replies; 4+ messages in thread
From: Cai Huoqing @ 2021-08-02 1:37 UTC (permalink / raw)
To: mst, jasowang; +Cc: kvm, virtualization, Cai Huoqing
it's a nice refactor to make use of
PFN_PHYS/PFN_UP/PFN_DOWN helper macro
Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
---
drivers/vhost/vdpa.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 210ab35a7ebf..1f6dd6ad0f8b 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
unsigned long pfn, pinned;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
- pinned = map->size >> PAGE_SHIFT;
- for (pfn = map->addr >> PAGE_SHIFT;
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
- atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_iotlb_map_free(iotlb, map);
}
}
@@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
if (r)
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
else
- atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
return r;
}
@@ -630,7 +630,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (msg->perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
- npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
@@ -638,7 +638,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
mmap_read_lock(dev->mm);
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
@@ -672,9 +672,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
- csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
- map_pfn << PAGE_SHIFT,
+ PFN_PHYS(map_pfn),
msg->perm);
if (ret) {
/*
@@ -698,13 +698,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
last_pfn = this_pfn;
}
- cur_base += pinned << PAGE_SHIFT;
+ cur_base += PFN_PHYS(pinned);
npages -= pinned;
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT, msg->perm);
+ ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), msg->perm);
out:
if (ret) {
if (nchunks) {
@@ -944,7 +944,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
- notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+ PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;
--
2.25.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH] vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
@ 2021-08-02 1:37 ` Cai Huoqing
0 siblings, 0 replies; 4+ messages in thread
From: Cai Huoqing @ 2021-08-02 1:37 UTC (permalink / raw)
To: mst, jasowang; +Cc: kvm, virtualization
it's a nice refactor to make use of
PFN_PHYS/PFN_UP/PFN_DOWN helper macro
Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
---
drivers/vhost/vdpa.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 210ab35a7ebf..1f6dd6ad0f8b 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
unsigned long pfn, pinned;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
- pinned = map->size >> PAGE_SHIFT;
- for (pfn = map->addr >> PAGE_SHIFT;
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
- atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_iotlb_map_free(iotlb, map);
}
}
@@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
if (r)
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
else
- atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
return r;
}
@@ -630,7 +630,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (msg->perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
- npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
@@ -638,7 +638,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
mmap_read_lock(dev->mm);
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
@@ -672,9 +672,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
- csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
- map_pfn << PAGE_SHIFT,
+ PFN_PHYS(map_pfn),
msg->perm);
if (ret) {
/*
@@ -698,13 +698,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
last_pfn = this_pfn;
}
- cur_base += pinned << PAGE_SHIFT;
+ cur_base += PFN_PHYS(pinned);
npages -= pinned;
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT, msg->perm);
+ ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), msg->perm);
out:
if (ret) {
if (nchunks) {
@@ -944,7 +944,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
- notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+ PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;
--
2.25.1
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
2021-08-02 1:37 ` Cai Huoqing
@ 2021-08-02 5:55 ` Jason Wang
-1 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2021-08-02 5:55 UTC (permalink / raw)
To: Cai Huoqing, mst; +Cc: kvm, virtualization
在 2021/8/2 上午9:37, Cai Huoqing 写道:
> it's a nice refactor to make use of
> PFN_PHYS/PFN_UP/PFN_DOWN helper macro
>
> Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
Acked-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/vhost/vdpa.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index 210ab35a7ebf..1f6dd6ad0f8b 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
> unsigned long pfn, pinned;
>
> while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
> - pinned = map->size >> PAGE_SHIFT;
> - for (pfn = map->addr >> PAGE_SHIFT;
> + pinned = PFN_DOWN(map->size);
> + for (pfn = PFN_DOWN(map->addr);
> pinned > 0; pfn++, pinned--) {
> page = pfn_to_page(pfn);
> if (map->perm & VHOST_ACCESS_WO)
> set_page_dirty_lock(page);
> unpin_user_page(page);
> }
> - atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
> + atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
> vhost_iotlb_map_free(iotlb, map);
> }
> }
> @@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
> if (r)
> vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
> else
> - atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
> + atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
>
> return r;
> }
> @@ -630,7 +630,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> if (msg->perm & VHOST_ACCESS_WO)
> gup_flags |= FOLL_WRITE;
>
> - npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
> + npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
> if (!npages) {
> ret = -EINVAL;
> goto free;
> @@ -638,7 +638,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>
> mmap_read_lock(dev->mm);
>
> - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> + lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
> if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
> ret = -ENOMEM;
> goto unlock;
> @@ -672,9 +672,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>
> if (last_pfn && (this_pfn != last_pfn + 1)) {
> /* Pin a contiguous chunk of memory */
> - csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
> + csize = PFN_PHYS(last_pfn - map_pfn + 1);
> ret = vhost_vdpa_map(v, iova, csize,
> - map_pfn << PAGE_SHIFT,
> + PFN_PHYS(map_pfn),
> msg->perm);
> if (ret) {
> /*
> @@ -698,13 +698,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> last_pfn = this_pfn;
> }
>
> - cur_base += pinned << PAGE_SHIFT;
> + cur_base += PFN_PHYS(pinned);
> npages -= pinned;
> }
>
> /* Pin the rest chunk */
> - ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
> - map_pfn << PAGE_SHIFT, msg->perm);
> + ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
> + PFN_PHYS(map_pfn), msg->perm);
> out:
> if (ret) {
> if (nchunks) {
> @@ -944,7 +944,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
>
> vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
> - notify.addr >> PAGE_SHIFT, PAGE_SIZE,
> + PFN_DOWN(notify.addr), PAGE_SIZE,
> vma->vm_page_prot))
> return VM_FAULT_SIGBUS;
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
@ 2021-08-02 5:55 ` Jason Wang
0 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2021-08-02 5:55 UTC (permalink / raw)
To: Cai Huoqing, mst; +Cc: kvm, virtualization
在 2021/8/2 上午9:37, Cai Huoqing 写道:
> it's a nice refactor to make use of
> PFN_PHYS/PFN_UP/PFN_DOWN helper macro
>
> Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
Acked-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/vhost/vdpa.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index 210ab35a7ebf..1f6dd6ad0f8b 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
> unsigned long pfn, pinned;
>
> while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
> - pinned = map->size >> PAGE_SHIFT;
> - for (pfn = map->addr >> PAGE_SHIFT;
> + pinned = PFN_DOWN(map->size);
> + for (pfn = PFN_DOWN(map->addr);
> pinned > 0; pfn++, pinned--) {
> page = pfn_to_page(pfn);
> if (map->perm & VHOST_ACCESS_WO)
> set_page_dirty_lock(page);
> unpin_user_page(page);
> }
> - atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
> + atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
> vhost_iotlb_map_free(iotlb, map);
> }
> }
> @@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
> if (r)
> vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
> else
> - atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
> + atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
>
> return r;
> }
> @@ -630,7 +630,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> if (msg->perm & VHOST_ACCESS_WO)
> gup_flags |= FOLL_WRITE;
>
> - npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
> + npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
> if (!npages) {
> ret = -EINVAL;
> goto free;
> @@ -638,7 +638,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>
> mmap_read_lock(dev->mm);
>
> - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> + lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
> if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
> ret = -ENOMEM;
> goto unlock;
> @@ -672,9 +672,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>
> if (last_pfn && (this_pfn != last_pfn + 1)) {
> /* Pin a contiguous chunk of memory */
> - csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
> + csize = PFN_PHYS(last_pfn - map_pfn + 1);
> ret = vhost_vdpa_map(v, iova, csize,
> - map_pfn << PAGE_SHIFT,
> + PFN_PHYS(map_pfn),
> msg->perm);
> if (ret) {
> /*
> @@ -698,13 +698,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> last_pfn = this_pfn;
> }
>
> - cur_base += pinned << PAGE_SHIFT;
> + cur_base += PFN_PHYS(pinned);
> npages -= pinned;
> }
>
> /* Pin the rest chunk */
> - ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
> - map_pfn << PAGE_SHIFT, msg->perm);
> + ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
> + PFN_PHYS(map_pfn), msg->perm);
> out:
> if (ret) {
> if (nchunks) {
> @@ -944,7 +944,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
>
> vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
> - notify.addr >> PAGE_SHIFT, PAGE_SIZE,
> + PFN_DOWN(notify.addr), PAGE_SIZE,
> vma->vm_page_prot))
> return VM_FAULT_SIGBUS;
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-08-02 5:55 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-02 1:37 [PATCH] vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro Cai Huoqing
2021-08-02 1:37 ` Cai Huoqing
2021-08-02 5:55 ` Jason Wang
2021-08-02 5:55 ` Jason Wang
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.