linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Stefano Garzarella <sgarzare@redhat.com>,
	virtualization@lists.linux-foundation.org
Cc: linux-kernel@vger.kernel.org, Max Gurtovoy <mgurtovoy@nvidia.com>,
	Laurent Vivier <lvivier@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>
Subject: Re: [PATCH] vdpa_sim: use iova module to allocate IOVA addresses
Date: Wed, 23 Dec 2020 11:43:40 +0800	[thread overview]
Message-ID: <cb5a0fac-25c2-b684-5ab6-b3339c13fe32@redhat.com> (raw)
In-Reply-To: <20201222174551.133678-1-sgarzare@redhat.com>


On 2020/12/23 上午1:45, Stefano Garzarella wrote:
> The identical mapping used until now created issues when mapping
> different virtual pages with the same physical address.
> To solve this issue, we can use the iova module, to handle the IOVA
> allocation.
> For semplicity we use an IOVA allocator with byte granularity.


Should be simplicity, so did one comment below.


>
> We add two new functions, vdpasim_map_range() and vdpasim_unmap_range(),
> to handle the IOVA allocation and the registration into the IOMMU/IOTLB.
> These functions are used by dma_map_ops callbacks.
>
> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>


Few nits, but:

Acked-by: Jason Wang <jasowang@redhat.com>


> ---
>   drivers/vdpa/vdpa_sim/vdpa_sim.h |   2 +
>   drivers/vdpa/vdpa_sim/vdpa_sim.c | 108 +++++++++++++++++++------------
>   drivers/vdpa/Kconfig             |   1 +
>   3 files changed, 69 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
> index b02142293d5b..6efe205e583e 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
> @@ -6,6 +6,7 @@
>   #ifndef _VDPA_SIM_H
>   #define _VDPA_SIM_H
>   
> +#include <linux/iova.h>
>   #include <linux/vringh.h>
>   #include <linux/vdpa.h>
>   #include <linux/virtio_byteorder.h>
> @@ -55,6 +56,7 @@ struct vdpasim {
>   	/* virtio config according to device type */
>   	void *config;
>   	struct vhost_iotlb *iommu;
> +	struct iova_domain iova;
>   	void *buffer;
>   	u32 status;
>   	u32 generation;
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> index b3fcc67bfdf0..341b9daf2ea4 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> @@ -17,6 +17,7 @@
>   #include <linux/vringh.h>
>   #include <linux/vdpa.h>
>   #include <linux/vhost_iotlb.h>
> +#include <linux/iova.h>
>   
>   #include "vdpa_sim.h"
>   
> @@ -128,30 +129,57 @@ static int dir_to_perm(enum dma_data_direction dir)
>   	return perm;
>   }
>   
> +static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
> +				    size_t size, unsigned int perm)
> +{
> +	struct iova *iova;
> +	dma_addr_t dma_addr;
> +	int ret;
> +
> +	/* We set the limit_pfn to the maximum (~0UL - 1) */
> +	iova = alloc_iova(&vdpasim->iova, size, ~0UL - 1, true);


Let's use ULONG_MAX?


> +	if (!iova)
> +		return DMA_MAPPING_ERROR;
> +
> +	dma_addr = iova_dma_addr(&vdpasim->iova, iova);
> +
> +	spin_lock(&vdpasim->iommu_lock);
> +	ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
> +				    (u64)dma_addr + size - 1, (u64)paddr, perm);
> +	spin_unlock(&vdpasim->iommu_lock);
> +
> +	if (ret) {
> +		__free_iova(&vdpasim->iova, iova);
> +		return DMA_MAPPING_ERROR;
> +	}
> +
> +	return dma_addr;
> +}
> +
> +static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
> +				size_t size)
> +{
> +	spin_lock(&vdpasim->iommu_lock);
> +	vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
> +			      (u64)dma_addr + size - 1);
> +	spin_unlock(&vdpasim->iommu_lock);
> +
> +	free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
> +}
> +
>   static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
>   				   unsigned long offset, size_t size,
>   				   enum dma_data_direction dir,
>   				   unsigned long attrs)
>   {
>   	struct vdpasim *vdpasim = dev_to_sim(dev);
> -	struct vhost_iotlb *iommu = vdpasim->iommu;
> -	u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
> -	int ret, perm = dir_to_perm(dir);
> +	phys_addr_t paddr = page_to_phys(page) + offset;
> +	int perm = dir_to_perm(dir);
>   
>   	if (perm < 0)
>   		return DMA_MAPPING_ERROR;
>   
> -	/* For simplicity, use identical mapping to avoid e.g iova
> -	 * allocator.
> -	 */
> -	spin_lock(&vdpasim->iommu_lock);
> -	ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
> -				    pa, dir_to_perm(dir));
> -	spin_unlock(&vdpasim->iommu_lock);
> -	if (ret)
> -		return DMA_MAPPING_ERROR;
> -
> -	return (dma_addr_t)(pa);
> +	return vdpasim_map_range(vdpasim, paddr, size, perm);
>   }
>   
>   static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
> @@ -159,12 +187,8 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
>   			       unsigned long attrs)
>   {
>   	struct vdpasim *vdpasim = dev_to_sim(dev);
> -	struct vhost_iotlb *iommu = vdpasim->iommu;
>   
> -	spin_lock(&vdpasim->iommu_lock);
> -	vhost_iotlb_del_range(iommu, (u64)dma_addr,
> -			      (u64)dma_addr + size - 1);
> -	spin_unlock(&vdpasim->iommu_lock);
> +	vdpasim_unmap_range(vdpasim, dma_addr, size);
>   }
>   
>   static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
> @@ -172,27 +196,22 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
>   				    unsigned long attrs)
>   {
>   	struct vdpasim *vdpasim = dev_to_sim(dev);
> -	struct vhost_iotlb *iommu = vdpasim->iommu;
> -	void *addr = kmalloc(size, flag);
> -	int ret;
> +	phys_addr_t paddr;
> +	void *addr;
>   
> -	spin_lock(&vdpasim->iommu_lock);
> +	addr = kmalloc(size, flag);
>   	if (!addr) {
>   		*dma_addr = DMA_MAPPING_ERROR;
> -	} else {
> -		u64 pa = virt_to_phys(addr);
> -
> -		ret = vhost_iotlb_add_range(iommu, (u64)pa,
> -					    (u64)pa + size - 1,
> -					    pa, VHOST_MAP_RW);
> -		if (ret) {
> -			*dma_addr = DMA_MAPPING_ERROR;
> -			kfree(addr);
> -			addr = NULL;
> -		} else
> -			*dma_addr = (dma_addr_t)pa;
> +		return NULL;
> +	}
> +
> +	paddr = virt_to_phys(addr);
> +
> +	*dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
> +	if (*dma_addr == DMA_MAPPING_ERROR) {
> +		kfree(addr);
> +		return NULL;
>   	}
> -	spin_unlock(&vdpasim->iommu_lock);
>   
>   	return addr;
>   }
> @@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
>   				  unsigned long attrs)
>   {
>   	struct vdpasim *vdpasim = dev_to_sim(dev);
> -	struct vhost_iotlb *iommu = vdpasim->iommu;
>   
> -	spin_lock(&vdpasim->iommu_lock);
> -	vhost_iotlb_del_range(iommu, (u64)dma_addr,
> -			      (u64)dma_addr + size - 1);
> -	spin_unlock(&vdpasim->iommu_lock);
> +	vdpasim_unmap_range(vdpasim, dma_addr, size);
>   
> -	kfree(phys_to_virt((uintptr_t)dma_addr));
> +	kfree(vaddr);
>   }
>   
>   static const struct dma_map_ops vdpasim_dma_ops = {
> @@ -270,6 +285,13 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
>   	for (i = 0; i < dev_attr->nvqs; i++)
>   		vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
>   
> +	ret = iova_cache_get();
> +	if (ret)
> +		goto err_iommu;
> +
> +	/* For semplicity we use an IOVA allocator with byte granularity */


Should be simplicity.

Thanks


> +	init_iova_domain(&vdpasim->iova, 1, 0);
> +
>   	vdpasim->vdpa.dma_dev = dev;
>   
>   	return vdpasim;
> @@ -540,6 +562,8 @@ static void vdpasim_free(struct vdpa_device *vdpa)
>   	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>   
>   	cancel_work_sync(&vdpasim->work);
> +	put_iova_domain(&vdpasim->iova);
> +	iova_cache_put();
>   	kvfree(vdpasim->buffer);
>   	if (vdpasim->iommu)
>   		vhost_iotlb_free(vdpasim->iommu);
> diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
> index 92a6396f8a73..8965e3717231 100644
> --- a/drivers/vdpa/Kconfig
> +++ b/drivers/vdpa/Kconfig
> @@ -13,6 +13,7 @@ config VDPA_SIM
>   	depends on RUNTIME_TESTING_MENU && HAS_DMA
>   	select DMA_OPS
>   	select VHOST_RING
> +	select IOMMU_IOVA
>   	help
>   	  Enable this module to support vDPA device simulators. These devices
>   	  are used for testing, prototyping and development of vDPA.


  reply	other threads:[~2020-12-23  3:45 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-22 17:45 [PATCH] vdpa_sim: use iova module to allocate IOVA addresses Stefano Garzarella
2020-12-23  3:43 ` Jason Wang [this message]
2020-12-23  7:53   ` Stefano Garzarella

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=cb5a0fac-25c2-b684-5ab6-b3339c13fe32@redhat.com \
    --to=jasowang@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lvivier@redhat.com \
    --cc=mgurtovoy@nvidia.com \
    --cc=mst@redhat.com \
    --cc=sgarzare@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).