All of lore.kernel.org
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: Oleksandr Tyshchenko <olekstysh@gmail.com>
Cc: xen-devel@lists.xenproject.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Russell King <linux@armlinux.org.uk>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Juergen Gross <jgross@suse.com>, Julien Grall <julien@xen.org>
Subject: Re: [RFC PATCH 4/6] virtio: Various updates to xen-virtio DMA ops layer
Date: Fri, 15 Apr 2022 15:02:23 -0700 (PDT)	[thread overview]
Message-ID: <alpine.DEB.2.22.394.2204151302350.915916@ubuntu-linux-20-04-desktop> (raw)
In-Reply-To: <1649963973-22879-5-git-send-email-olekstysh@gmail.com>

[-- Attachment #1: Type: text/plain, Size: 11759 bytes --]

On Thu, 14 Apr 2022, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> In the context of current patch do the following:
> 1. Update code to support virtio-mmio devices
> 2. Introduce struct xen_virtio_data and account passed virtio devices
>    (using list) as we need to store some per-device data
> 3. Add multi-page support for xen_virtio_dma_map(unmap)_page callbacks
> 4. Harden code against malicious backend
> 5. Change to use alloc_pages_exact() instead of __get_free_pages()
> 6. Introduce locking scheme to protect mappings (I am not 100% sure
>    whether per-device lock is really needed)
> 7. Handle virtio device's DMA mask
> 8. Retrieve the ID of backend domain from DT for virtio-mmio device
>    instead of hardcoding it.
> 
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> ---
>  arch/arm/xen/enlighten.c |  11 +++
>  drivers/xen/Kconfig      |   2 +-
>  drivers/xen/xen-virtio.c | 200 ++++++++++++++++++++++++++++++++++++++++++-----
>  include/xen/xen-ops.h    |   5 ++
>  4 files changed, 196 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
> index ec5b082..870d92f 100644
> --- a/arch/arm/xen/enlighten.c
> +++ b/arch/arm/xen/enlighten.c
> @@ -409,6 +409,17 @@ int __init arch_xen_unpopulated_init(struct resource **res)
>  }
>  #endif
>  
> +#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> +int arch_has_restricted_virtio_memory_access(void)
> +{
> +	if (IS_ENABLED(CONFIG_XEN_HVM_VIRTIO_GRANT) && xen_hvm_domain())
> +		return 1;

Instead of xen_hvm_domain(), you can just use xen_domain(). Also there
is no need for the #ifdef
CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS, given that:

CONFIG_XEN_HVM_VIRTIO_GRANT depends on XEN_VIRTIO which selects
ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS


> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
> +#endif
> +
>  static void __init xen_dt_guest_init(void)
>  {
>  	struct device_node *xen_node;
> diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
> index fc61f7a..56afe6a 100644
> --- a/drivers/xen/Kconfig
> +++ b/drivers/xen/Kconfig
> @@ -347,7 +347,7 @@ config XEN_VIRTIO
>  
>  config XEN_HVM_VIRTIO_GRANT
>  	bool "Require virtio for fully virtualized guests to use grant mappings"
> -	depends on XEN_VIRTIO && X86_64
> +	depends on XEN_VIRTIO && (X86_64 || ARM || ARM64)

you can remove the architectural dependencies


>  	default y
>  	help
>  	  Require virtio for fully virtualized guests to use grant mappings.
> diff --git a/drivers/xen/xen-virtio.c b/drivers/xen/xen-virtio.c
> index cfd5eda..c5b2ec9 100644
> --- a/drivers/xen/xen-virtio.c
> +++ b/drivers/xen/xen-virtio.c
> @@ -7,12 +7,26 @@
>  
>  #include <linux/module.h>
>  #include <linux/dma-map-ops.h>
> +#include <linux/of.h>
>  #include <linux/pci.h>
>  #include <linux/pfn.h>
>  #include <linux/virtio_config.h>
>  #include <xen/xen.h>
>  #include <xen/grant_table.h>
>  
> +struct xen_virtio_data {
> +	/* The ID of backend domain */
> +	domid_t dev_domid;
> +	struct device *dev;
> +	struct list_head list;
> +	spinlock_t lock;
> +	/* Is device behaving sane? */
> +	bool broken;

If you moved "broken" after "dev_domid" we would save a few bytes for
every allocation due to padding.

Is data->lock only there to protect accesses to "broken"? If so, we
might not need it, but I am not sure.


> +};
> +
> +static LIST_HEAD(xen_virtio_devices);
> +static DEFINE_SPINLOCK(xen_virtio_lock);
> +
>  #define XEN_GRANT_ADDR_OFF	0x8000000000000000ULL
>  
>  static inline dma_addr_t grant_to_dma(grant_ref_t grant)
> @@ -25,6 +39,25 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma)
>  	return (grant_ref_t)((dma & ~XEN_GRANT_ADDR_OFF) >> PAGE_SHIFT);
>  }
>  
> +static struct xen_virtio_data *find_xen_virtio_data(struct device *dev)
> +{
> +	struct xen_virtio_data *data = NULL;
> +	bool found = false;
> +
> +	spin_lock(&xen_virtio_lock);
> +
> +	list_for_each_entry( data, &xen_virtio_devices, list) {
> +		if (data->dev == dev) {
> +			found = true;
> +			break;
> +		}
> +	}
> +
> +	spin_unlock(&xen_virtio_lock);
> +
> +	return found ? data : NULL;
> +}
> +
>  /*
>   * DMA ops for Xen virtio frontends.
>   *
> @@ -43,48 +76,78 @@ static void *xen_virtio_dma_alloc(struct device *dev, size_t size,
>  				  dma_addr_t *dma_handle, gfp_t gfp,
>  				  unsigned long attrs)
>  {
> -	unsigned int n_pages = PFN_UP(size);
> -	unsigned int i;
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	unsigned long pfn;
>  	grant_ref_t grant;
> -	void *ret;
> +	void *ret = NULL;
>  
> -	ret = (void *)__get_free_pages(gfp, get_order(size));
> -	if (!ret)
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
>  		return NULL;
>  
> +	spin_lock(&data->lock);
> +
> +	if (unlikely(data->broken))
> +		goto out;
> +
> +	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
> +	if (!ret)
> +		goto out;
> +
>  	pfn = virt_to_pfn(ret);
>  
>  	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
> -		free_pages((unsigned long)ret, get_order(size));
> -		return NULL;
> +		free_pages_exact(ret, n_pages * PAGE_SIZE);
> +		ret = NULL;
> +		goto out;
>  	}
>  
>  	for (i = 0; i < n_pages; i++) {
> -		gnttab_grant_foreign_access_ref(grant + i, 0,
> +		gnttab_grant_foreign_access_ref(grant + i, data->dev_domid,
>  						pfn_to_gfn(pfn + i), 0);
>  	}
>  
>  	*dma_handle = grant_to_dma(grant);
>  
> +out:
> +	spin_unlock(&data->lock);
> +
>  	return ret;
>  }
>  
>  static void xen_virtio_dma_free(struct device *dev, size_t size, void *vaddr,
>  				dma_addr_t dma_handle, unsigned long attrs)
>  {
> -	unsigned int n_pages = PFN_UP(size);
> -	unsigned int i;
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	grant_ref_t grant;
>  
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
> +		return;
> +
> +	spin_lock(&data->lock);
> +
> +	if (unlikely(data->broken))
> +		goto out;
> +
>  	grant = dma_to_grant(dma_handle);
>  
> -	for (i = 0; i < n_pages; i++)
> -		gnttab_end_foreign_access_ref(grant + i);
> +	for (i = 0; i < n_pages; i++) {
> +		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
> +			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
> +			data->broken = true;
> +			goto out;
> +		}
> +	}
>  
>  	gnttab_free_grant_reference_seq(grant, n_pages);
>  
> -	free_pages((unsigned long)vaddr, get_order(size));
> +	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
> +
> +out:
> +	spin_unlock(&data->lock);
>  }
>  
>  static struct page *xen_virtio_dma_alloc_pages(struct device *dev, size_t size,
> @@ -108,28 +171,71 @@ static dma_addr_t xen_virtio_dma_map_page(struct device *dev, struct page *page,
>  					  enum dma_data_direction dir,
>  					  unsigned long attrs)
>  {
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	grant_ref_t grant;
> +	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
> +
> +	BUG_ON(dir == DMA_NONE);
> +
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
> +		return DMA_MAPPING_ERROR;
> +
> +	spin_lock(&data->lock);
>  
> -	if (gnttab_alloc_grant_references(1, &grant))
> -		return 0;
> +	if (unlikely(data->broken))
> +		goto out;
>  
> -	gnttab_grant_foreign_access_ref(grant, 0, xen_page_to_gfn(page),
> -					dir == DMA_TO_DEVICE);
> +	if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
> +		goto out;
>  
> -	return grant_to_dma(grant) + offset;
> +	for (i = 0; i < n_pages; i++) {
> +		gnttab_grant_foreign_access_ref(grant + i, data->dev_domid,
> +				xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
> +	}
> +
> +	dma_handle = grant_to_dma(grant) + offset;
> +
> +out:
> +	spin_unlock(&data->lock);
> +
> +	return dma_handle;
>  }
>  
>  static void xen_virtio_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
>  				      size_t size, enum dma_data_direction dir,
>  				      unsigned long attrs)
>  {
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	grant_ref_t grant;
>  
> +	BUG_ON(dir == DMA_NONE);
> +
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
> +		return;
> +
> +	spin_lock(&data->lock);
> +
> +	if (unlikely(data->broken))
> +		goto out;
> +
>  	grant = dma_to_grant(dma_handle);
>  
> -	gnttab_end_foreign_access_ref(grant);
> +	for (i = 0; i < n_pages; i++) {
> +		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
> +			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
> +			data->broken = true;
> +			goto out;
> +		}
> +	}
> +
> +	gnttab_free_grant_reference_seq(grant, n_pages);
>  
> -	gnttab_free_grant_reference(grant);
> +out:
> +	spin_unlock(&data->lock);
>  }
>  
>  static int xen_virtio_dma_map_sg(struct device *dev, struct scatterlist *sg,
> @@ -149,7 +255,7 @@ static void xen_virtio_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>  
>  static int xen_virtio_dma_dma_supported(struct device *dev, u64 mask)
>  {
> -	return 1;
> +	return mask == DMA_BIT_MASK(64);
>  }
>  
>  static const struct dma_map_ops xen_virtio_dma_ops = {
> @@ -166,9 +272,61 @@ static const struct dma_map_ops xen_virtio_dma_ops = {
>  	.dma_supported = xen_virtio_dma_dma_supported,
>  };
>  
> +bool xen_is_virtio_device(struct device *dev)
> +{
> +	/* XXX Handle only DT devices for now */
> +	if (!dev->of_node)
> +		return false;
> +
> +	if (!of_device_is_compatible(dev->of_node, "virtio,mmio"))
> +		return false;
> +
> +	return of_property_read_bool(dev->of_node, "xen,dev-domid");
> +}
> +EXPORT_SYMBOL_GPL(xen_is_virtio_device);
> +
>  void xen_virtio_setup_dma_ops(struct device *dev)
>  {
> +	struct xen_virtio_data *data;
> +	uint32_t dev_domid;
> +
> +	data = find_xen_virtio_data(dev);
> +	if (data) {
> +		dev_err(dev, "xen_virtio data is already created\n");
> +		return;
> +	}
> +
> +	if (dev_is_pci(dev)) {
> +		/* XXX Leave it hard wired to dom0 for now */
> +		dev_domid = 0;
> +	} else if (dev->of_node) {
> +		if (of_property_read_u32(dev->of_node, "xen,dev-domid", &dev_domid)) {
> +			dev_err(dev, "xen,dev-domid property is not present\n");
> +			goto err;
> +		}
> +	} else
> +		/* The ACPI case is not supported */
> +		goto err;

If we get here, it means that xen_is_virtio_device returned true, so the
PCI case is actually impossible?

I would rewrite these checks like this:

/* XXX: ACPI and PCI unsupported for now */
if (dev_is_pci(dev) || !dev->of_node) {
	goto err;
}
if (of_property_read_u32(dev->of_node, "xen,dev-domid", &dev_domid)) {
	dev_err(dev, "xen,dev-domid property is not present\n");
	goto err;
}



> +	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
> +	if (!data) {
> +		dev_err(dev, "Сannot allocate xen_virtio data\n");
> +		goto err;
> +	}
> +	data->dev_domid = dev_domid;
> +	data->dev = dev;
> +	spin_lock_init(&data->lock);
> +
> +	spin_lock(&xen_virtio_lock);
> +	list_add(&data->list, &xen_virtio_devices);
> +	spin_unlock(&xen_virtio_lock);
> +
>  	dev->dma_ops = &xen_virtio_dma_ops;
> +
> +	return;
> +
> +err:
> +	dev_err(dev, "Сannot set up xen_virtio DMA ops, retain platform DMA ops\n");
>  }
>  EXPORT_SYMBOL_GPL(xen_virtio_setup_dma_ops);
>  
> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
> index ae3c1bc..fdbcb99 100644
> --- a/include/xen/xen-ops.h
> +++ b/include/xen/xen-ops.h
> @@ -223,10 +223,15 @@ static inline void xen_preemptible_hcall_end(void) { }
>  
>  #ifdef CONFIG_XEN_VIRTIO
>  void xen_virtio_setup_dma_ops(struct device *dev);
> +bool xen_is_virtio_device(struct device *dev);
>  #else
>  static inline void xen_virtio_setup_dma_ops(struct device *dev)
>  {
>  }
> +static inline bool xen_is_virtio_device(struct device *dev)
> +{
> +	return false;
> +}
>  #endif /* CONFIG_XEN_VIRTIO */
>  
>  #endif /* INCLUDE_XEN_OPS_H */

WARNING: multiple messages have this Message-ID (diff)
From: Stefano Stabellini <sstabellini@kernel.org>
To: Oleksandr Tyshchenko <olekstysh@gmail.com>
Cc: xen-devel@lists.xenproject.org, linux-kernel@vger.kernel.org,
	 linux-arm-kernel@lists.infradead.org,
	 Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	 Stefano Stabellini <sstabellini@kernel.org>,
	 Russell King <linux@armlinux.org.uk>,
	 Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	 Juergen Gross <jgross@suse.com>, Julien Grall <julien@xen.org>
Subject: Re: [RFC PATCH 4/6] virtio: Various updates to xen-virtio DMA ops layer
Date: Fri, 15 Apr 2022 15:02:23 -0700 (PDT)	[thread overview]
Message-ID: <alpine.DEB.2.22.394.2204151302350.915916@ubuntu-linux-20-04-desktop> (raw)
In-Reply-To: <1649963973-22879-5-git-send-email-olekstysh@gmail.com>

[-- Attachment #1: Type: text/plain, Size: 11759 bytes --]

On Thu, 14 Apr 2022, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> In the context of current patch do the following:
> 1. Update code to support virtio-mmio devices
> 2. Introduce struct xen_virtio_data and account passed virtio devices
>    (using list) as we need to store some per-device data
> 3. Add multi-page support for xen_virtio_dma_map(unmap)_page callbacks
> 4. Harden code against malicious backend
> 5. Change to use alloc_pages_exact() instead of __get_free_pages()
> 6. Introduce locking scheme to protect mappings (I am not 100% sure
>    whether per-device lock is really needed)
> 7. Handle virtio device's DMA mask
> 8. Retrieve the ID of backend domain from DT for virtio-mmio device
>    instead of hardcoding it.
> 
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> ---
>  arch/arm/xen/enlighten.c |  11 +++
>  drivers/xen/Kconfig      |   2 +-
>  drivers/xen/xen-virtio.c | 200 ++++++++++++++++++++++++++++++++++++++++++-----
>  include/xen/xen-ops.h    |   5 ++
>  4 files changed, 196 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
> index ec5b082..870d92f 100644
> --- a/arch/arm/xen/enlighten.c
> +++ b/arch/arm/xen/enlighten.c
> @@ -409,6 +409,17 @@ int __init arch_xen_unpopulated_init(struct resource **res)
>  }
>  #endif
>  
> +#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> +int arch_has_restricted_virtio_memory_access(void)
> +{
> +	if (IS_ENABLED(CONFIG_XEN_HVM_VIRTIO_GRANT) && xen_hvm_domain())
> +		return 1;

Instead of xen_hvm_domain(), you can just use xen_domain(). Also there
is no need for the #ifdef
CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS, given that:

CONFIG_XEN_HVM_VIRTIO_GRANT depends on XEN_VIRTIO which selects
ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS


> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
> +#endif
> +
>  static void __init xen_dt_guest_init(void)
>  {
>  	struct device_node *xen_node;
> diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
> index fc61f7a..56afe6a 100644
> --- a/drivers/xen/Kconfig
> +++ b/drivers/xen/Kconfig
> @@ -347,7 +347,7 @@ config XEN_VIRTIO
>  
>  config XEN_HVM_VIRTIO_GRANT
>  	bool "Require virtio for fully virtualized guests to use grant mappings"
> -	depends on XEN_VIRTIO && X86_64
> +	depends on XEN_VIRTIO && (X86_64 || ARM || ARM64)

you can remove the architectural dependencies


>  	default y
>  	help
>  	  Require virtio for fully virtualized guests to use grant mappings.
> diff --git a/drivers/xen/xen-virtio.c b/drivers/xen/xen-virtio.c
> index cfd5eda..c5b2ec9 100644
> --- a/drivers/xen/xen-virtio.c
> +++ b/drivers/xen/xen-virtio.c
> @@ -7,12 +7,26 @@
>  
>  #include <linux/module.h>
>  #include <linux/dma-map-ops.h>
> +#include <linux/of.h>
>  #include <linux/pci.h>
>  #include <linux/pfn.h>
>  #include <linux/virtio_config.h>
>  #include <xen/xen.h>
>  #include <xen/grant_table.h>
>  
> +struct xen_virtio_data {
> +	/* The ID of backend domain */
> +	domid_t dev_domid;
> +	struct device *dev;
> +	struct list_head list;
> +	spinlock_t lock;
> +	/* Is device behaving sane? */
> +	bool broken;

If you moved "broken" after "dev_domid" we would save a few bytes for
every allocation due to padding.

Is data->lock only there to protect accesses to "broken"? If so, we
might not need it, but I am not sure.


> +};
> +
> +static LIST_HEAD(xen_virtio_devices);
> +static DEFINE_SPINLOCK(xen_virtio_lock);
> +
>  #define XEN_GRANT_ADDR_OFF	0x8000000000000000ULL
>  
>  static inline dma_addr_t grant_to_dma(grant_ref_t grant)
> @@ -25,6 +39,25 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma)
>  	return (grant_ref_t)((dma & ~XEN_GRANT_ADDR_OFF) >> PAGE_SHIFT);
>  }
>  
> +static struct xen_virtio_data *find_xen_virtio_data(struct device *dev)
> +{
> +	struct xen_virtio_data *data = NULL;
> +	bool found = false;
> +
> +	spin_lock(&xen_virtio_lock);
> +
> +	list_for_each_entry( data, &xen_virtio_devices, list) {
> +		if (data->dev == dev) {
> +			found = true;
> +			break;
> +		}
> +	}
> +
> +	spin_unlock(&xen_virtio_lock);
> +
> +	return found ? data : NULL;
> +}
> +
>  /*
>   * DMA ops for Xen virtio frontends.
>   *
> @@ -43,48 +76,78 @@ static void *xen_virtio_dma_alloc(struct device *dev, size_t size,
>  				  dma_addr_t *dma_handle, gfp_t gfp,
>  				  unsigned long attrs)
>  {
> -	unsigned int n_pages = PFN_UP(size);
> -	unsigned int i;
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	unsigned long pfn;
>  	grant_ref_t grant;
> -	void *ret;
> +	void *ret = NULL;
>  
> -	ret = (void *)__get_free_pages(gfp, get_order(size));
> -	if (!ret)
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
>  		return NULL;
>  
> +	spin_lock(&data->lock);
> +
> +	if (unlikely(data->broken))
> +		goto out;
> +
> +	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
> +	if (!ret)
> +		goto out;
> +
>  	pfn = virt_to_pfn(ret);
>  
>  	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
> -		free_pages((unsigned long)ret, get_order(size));
> -		return NULL;
> +		free_pages_exact(ret, n_pages * PAGE_SIZE);
> +		ret = NULL;
> +		goto out;
>  	}
>  
>  	for (i = 0; i < n_pages; i++) {
> -		gnttab_grant_foreign_access_ref(grant + i, 0,
> +		gnttab_grant_foreign_access_ref(grant + i, data->dev_domid,
>  						pfn_to_gfn(pfn + i), 0);
>  	}
>  
>  	*dma_handle = grant_to_dma(grant);
>  
> +out:
> +	spin_unlock(&data->lock);
> +
>  	return ret;
>  }
>  
>  static void xen_virtio_dma_free(struct device *dev, size_t size, void *vaddr,
>  				dma_addr_t dma_handle, unsigned long attrs)
>  {
> -	unsigned int n_pages = PFN_UP(size);
> -	unsigned int i;
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	grant_ref_t grant;
>  
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
> +		return;
> +
> +	spin_lock(&data->lock);
> +
> +	if (unlikely(data->broken))
> +		goto out;
> +
>  	grant = dma_to_grant(dma_handle);
>  
> -	for (i = 0; i < n_pages; i++)
> -		gnttab_end_foreign_access_ref(grant + i);
> +	for (i = 0; i < n_pages; i++) {
> +		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
> +			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
> +			data->broken = true;
> +			goto out;
> +		}
> +	}
>  
>  	gnttab_free_grant_reference_seq(grant, n_pages);
>  
> -	free_pages((unsigned long)vaddr, get_order(size));
> +	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
> +
> +out:
> +	spin_unlock(&data->lock);
>  }
>  
>  static struct page *xen_virtio_dma_alloc_pages(struct device *dev, size_t size,
> @@ -108,28 +171,71 @@ static dma_addr_t xen_virtio_dma_map_page(struct device *dev, struct page *page,
>  					  enum dma_data_direction dir,
>  					  unsigned long attrs)
>  {
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	grant_ref_t grant;
> +	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
> +
> +	BUG_ON(dir == DMA_NONE);
> +
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
> +		return DMA_MAPPING_ERROR;
> +
> +	spin_lock(&data->lock);
>  
> -	if (gnttab_alloc_grant_references(1, &grant))
> -		return 0;
> +	if (unlikely(data->broken))
> +		goto out;
>  
> -	gnttab_grant_foreign_access_ref(grant, 0, xen_page_to_gfn(page),
> -					dir == DMA_TO_DEVICE);
> +	if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
> +		goto out;
>  
> -	return grant_to_dma(grant) + offset;
> +	for (i = 0; i < n_pages; i++) {
> +		gnttab_grant_foreign_access_ref(grant + i, data->dev_domid,
> +				xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
> +	}
> +
> +	dma_handle = grant_to_dma(grant) + offset;
> +
> +out:
> +	spin_unlock(&data->lock);
> +
> +	return dma_handle;
>  }
>  
>  static void xen_virtio_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
>  				      size_t size, enum dma_data_direction dir,
>  				      unsigned long attrs)
>  {
> +	struct xen_virtio_data *data;
> +	unsigned int i, n_pages = PFN_UP(size);
>  	grant_ref_t grant;
>  
> +	BUG_ON(dir == DMA_NONE);
> +
> +	data = find_xen_virtio_data(dev);
> +	if (!data)
> +		return;
> +
> +	spin_lock(&data->lock);
> +
> +	if (unlikely(data->broken))
> +		goto out;
> +
>  	grant = dma_to_grant(dma_handle);
>  
> -	gnttab_end_foreign_access_ref(grant);
> +	for (i = 0; i < n_pages; i++) {
> +		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
> +			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
> +			data->broken = true;
> +			goto out;
> +		}
> +	}
> +
> +	gnttab_free_grant_reference_seq(grant, n_pages);
>  
> -	gnttab_free_grant_reference(grant);
> +out:
> +	spin_unlock(&data->lock);
>  }
>  
>  static int xen_virtio_dma_map_sg(struct device *dev, struct scatterlist *sg,
> @@ -149,7 +255,7 @@ static void xen_virtio_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>  
>  static int xen_virtio_dma_dma_supported(struct device *dev, u64 mask)
>  {
> -	return 1;
> +	return mask == DMA_BIT_MASK(64);
>  }
>  
>  static const struct dma_map_ops xen_virtio_dma_ops = {
> @@ -166,9 +272,61 @@ static const struct dma_map_ops xen_virtio_dma_ops = {
>  	.dma_supported = xen_virtio_dma_dma_supported,
>  };
>  
> +bool xen_is_virtio_device(struct device *dev)
> +{
> +	/* XXX Handle only DT devices for now */
> +	if (!dev->of_node)
> +		return false;
> +
> +	if (!of_device_is_compatible(dev->of_node, "virtio,mmio"))
> +		return false;
> +
> +	return of_property_read_bool(dev->of_node, "xen,dev-domid");
> +}
> +EXPORT_SYMBOL_GPL(xen_is_virtio_device);
> +
>  void xen_virtio_setup_dma_ops(struct device *dev)
>  {
> +	struct xen_virtio_data *data;
> +	uint32_t dev_domid;
> +
> +	data = find_xen_virtio_data(dev);
> +	if (data) {
> +		dev_err(dev, "xen_virtio data is already created\n");
> +		return;
> +	}
> +
> +	if (dev_is_pci(dev)) {
> +		/* XXX Leave it hard wired to dom0 for now */
> +		dev_domid = 0;
> +	} else if (dev->of_node) {
> +		if (of_property_read_u32(dev->of_node, "xen,dev-domid", &dev_domid)) {
> +			dev_err(dev, "xen,dev-domid property is not present\n");
> +			goto err;
> +		}
> +	} else
> +		/* The ACPI case is not supported */
> +		goto err;

If we get here, it means that xen_is_virtio_device returned true, so the
PCI case is actually impossible?

I would rewrite these checks like this:

/* XXX: ACPI and PCI unsupported for now */
if (dev_is_pci(dev) || !dev->of_node) {
	goto err;
}
if (of_property_read_u32(dev->of_node, "xen,dev-domid", &dev_domid)) {
	dev_err(dev, "xen,dev-domid property is not present\n");
	goto err;
}



> +	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
> +	if (!data) {
> +		dev_err(dev, "Сannot allocate xen_virtio data\n");
> +		goto err;
> +	}
> +	data->dev_domid = dev_domid;
> +	data->dev = dev;
> +	spin_lock_init(&data->lock);
> +
> +	spin_lock(&xen_virtio_lock);
> +	list_add(&data->list, &xen_virtio_devices);
> +	spin_unlock(&xen_virtio_lock);
> +
>  	dev->dma_ops = &xen_virtio_dma_ops;
> +
> +	return;
> +
> +err:
> +	dev_err(dev, "Сannot set up xen_virtio DMA ops, retain platform DMA ops\n");
>  }
>  EXPORT_SYMBOL_GPL(xen_virtio_setup_dma_ops);
>  
> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
> index ae3c1bc..fdbcb99 100644
> --- a/include/xen/xen-ops.h
> +++ b/include/xen/xen-ops.h
> @@ -223,10 +223,15 @@ static inline void xen_preemptible_hcall_end(void) { }
>  
>  #ifdef CONFIG_XEN_VIRTIO
>  void xen_virtio_setup_dma_ops(struct device *dev);
> +bool xen_is_virtio_device(struct device *dev);
>  #else
>  static inline void xen_virtio_setup_dma_ops(struct device *dev)
>  {
>  }
> +static inline bool xen_is_virtio_device(struct device *dev)
> +{
> +	return false;
> +}
>  #endif /* CONFIG_XEN_VIRTIO */
>  
>  #endif /* INCLUDE_XEN_OPS_H */

[-- Attachment #2: Type: text/plain, Size: 176 bytes --]

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2022-04-15 22:02 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-14 19:19 [RFC PATCH 0/6] virtio: Solution to restrict memory access under Xen using xen-virtio DMA ops layer Oleksandr Tyshchenko
2022-04-14 19:19 ` Oleksandr Tyshchenko
2022-04-14 19:19 ` [RFC PATCH 1/6] xen/grants: support allocating consecutive grants Oleksandr Tyshchenko
2022-04-14 19:19   ` Oleksandr Tyshchenko
2022-04-14 19:19 ` [RFC PATCH 2/6] virtio: add option to restrict memory access under Xen Oleksandr Tyshchenko
2022-04-14 19:19   ` Oleksandr Tyshchenko
2022-04-14 19:43   ` H. Peter Anvin
2022-04-15 15:20     ` Oleksandr
2022-04-15 15:20       ` Oleksandr
2022-04-15 22:01   ` Stefano Stabellini
2022-04-17 17:02     ` Oleksandr
2022-04-17 17:02       ` Oleksandr
2022-04-18 19:11       ` Stefano Stabellini
2022-04-18 19:11         ` Stefano Stabellini
2022-04-19  6:21         ` Juergen Gross
2022-04-19  6:21           ` Juergen Gross
2022-04-19  6:37           ` Oleksandr
2022-04-19  6:37             ` Oleksandr
2022-04-14 19:19 ` [RFC PATCH 3/6] dt-bindings: xen: Add xen,dev-domid property description for xen-virtio layer Oleksandr Tyshchenko
2022-04-14 19:19   ` [RFC PATCH 3/6] dt-bindings: xen: Add xen, dev-domid " Oleksandr Tyshchenko
2022-04-15 22:01   ` [RFC PATCH 3/6] dt-bindings: xen: Add xen,dev-domid " Stefano Stabellini
2022-04-15 22:01     ` Stefano Stabellini
2022-04-17 17:24     ` Oleksandr
2022-04-17 17:24       ` Oleksandr
2022-04-14 19:19 ` [RFC PATCH 4/6] virtio: Various updates to xen-virtio DMA ops layer Oleksandr Tyshchenko
2022-04-14 19:19   ` Oleksandr Tyshchenko
2022-04-15 22:02   ` Stefano Stabellini [this message]
2022-04-15 22:02     ` Stefano Stabellini
2022-04-17 18:21     ` Oleksandr
2022-04-17 18:21       ` Oleksandr
2022-04-18 19:11       ` Stefano Stabellini
2022-04-18 19:11         ` Stefano Stabellini
2022-04-19  6:58         ` Juergen Gross
2022-04-19  6:58           ` Juergen Gross
2022-04-19  7:07           ` Oleksandr
2022-04-19  7:07             ` Oleksandr
2022-04-16  6:05   ` Christoph Hellwig
2022-04-16  6:05     ` Christoph Hellwig
2022-04-17 18:39     ` Oleksandr
2022-04-17 18:39       ` Oleksandr
2022-04-14 19:19 ` [RFC PATCH 5/6] arm/xen: Introduce xen_setup_dma_ops() Oleksandr Tyshchenko
2022-04-14 19:19   ` Oleksandr Tyshchenko
2022-04-15 22:02   ` Stefano Stabellini
2022-04-15 22:02     ` Stefano Stabellini
2022-04-17 18:43     ` Oleksandr
2022-04-17 18:43       ` Oleksandr
2022-04-14 19:19 ` [RFC PATCH 6/6] arm/xen: Assign xen-virtio DMA ops for virtio devices in Xen guests Oleksandr Tyshchenko
2022-04-14 19:19   ` Oleksandr Tyshchenko
2022-04-15 22:02   ` Stefano Stabellini
2022-04-15 22:02     ` Stefano Stabellini
2022-04-16  6:07     ` Christoph Hellwig
2022-04-16  6:07       ` Christoph Hellwig
2022-04-17 21:05       ` Oleksandr
2022-04-17 21:05         ` Oleksandr
2022-04-18 19:11         ` Stefano Stabellini
2022-04-18 19:11           ` Stefano Stabellini
2022-04-19 12:17           ` Oleksandr
2022-04-19 12:17             ` Oleksandr
2022-04-19 14:48             ` Juergen Gross
2022-04-19 14:48               ` Juergen Gross
2022-04-19 17:11               ` Oleksandr
2022-04-19 17:11                 ` Oleksandr
2022-04-20  0:23                 ` Stefano Stabellini
2022-04-20  0:23                   ` Stefano Stabellini
2022-04-20  9:00                   ` Oleksandr
2022-04-20  9:00                     ` Oleksandr
2022-04-20 22:49                     ` Stefano Stabellini
2022-04-20 22:49                       ` Stefano Stabellini
2022-04-17 19:20     ` Oleksandr
2022-04-17 19:20       ` Oleksandr
2022-04-15  7:41 ` [RFC PATCH 0/6] virtio: Solution to restrict memory access under Xen using xen-virtio DMA ops layer Christoph Hellwig
2022-04-15  7:41   ` Christoph Hellwig
2022-04-15  7:41   ` Christoph Hellwig
2022-04-15 10:04   ` Oleksandr
2022-04-15 10:04     ` Oleksandr
2022-04-15  8:44 ` Michael S. Tsirkin
2022-04-15  8:44   ` Michael S. Tsirkin
2022-04-15  8:44   ` Michael S. Tsirkin
2022-04-15 15:29   ` Oleksandr
2022-04-15 15:29     ` Oleksandr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=alpine.DEB.2.22.394.2204151302350.915916@ubuntu-linux-20-04-desktop \
    --to=sstabellini@kernel.org \
    --cc=boris.ostrovsky@oracle.com \
    --cc=jgross@suse.com \
    --cc=julien@xen.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=olekstysh@gmail.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.