All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sinan Kaya <okaya-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
To: Jean-Philippe Brucker
	<jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-acpi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	devicetree-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: mark.rutland-5wv7dgnIgG8@public.gmane.org,
	ilias.apalodimas-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org,
	catalin.marinas-5wv7dgnIgG8@public.gmane.org,
	xuzaibo-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	will.deacon-5wv7dgnIgG8@public.gmane.org,
	ashok.raj-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org,
	bharatku-gjFFaj9aHVfQT0dZR+AlfA@public.gmane.org,
	rfranz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org,
	lenb-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org,
	robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org,
	bhelgaas-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	dwmw2-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org,
	rjw-LthD3rsA81gm4RdzfppkhA@public.gmane.org,
	sudeep.holla-5wv7dgnIgG8@public.gmane.org,
	christian.koenig-5C7GfCeVMHo@public.gmane.org
Subject: Re: [PATCH 37/37] vfio: Add support for Shared Virtual Addressing
Date: Tue, 27 Feb 2018 20:26:39 -0500	[thread overview]
Message-ID: <1e76c66c-952e-71bd-d831-d3a1ded9559c@codeaurora.org> (raw)
In-Reply-To: <20180212183352.22730-38-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>

On 2/12/2018 1:33 PM, Jean-Philippe Brucker wrote:
> Add two new ioctl for VFIO containers. VFIO_IOMMU_BIND_PROCESS creates a
> bond between a container and a process address space, identified by a
> device-specific ID named PASID. This allows the device to target DMA
> transactions at the process virtual addresses without a need for mapping
> and unmapping buffers explicitly in the IOMMU. The process page tables are
> shared with the IOMMU, and mechanisms such as PCI ATS/PRI are used to
> handle faults. VFIO_IOMMU_UNBIND_PROCESS removes a bond created with
> VFIO_IOMMU_BIND_PROCESS.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
> ---
>  drivers/vfio/vfio_iommu_type1.c | 399 ++++++++++++++++++++++++++++++++++++++++
>  include/uapi/linux/vfio.h       |  76 ++++++++
>  2 files changed, 475 insertions(+)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index e30e29ae4819..cac066f0026b 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -30,6 +30,7 @@
>  #include <linux/iommu.h>
>  #include <linux/module.h>
>  #include <linux/mm.h>
> +#include <linux/ptrace.h>
>  #include <linux/rbtree.h>
>  #include <linux/sched/signal.h>
>  #include <linux/sched/mm.h>
> @@ -60,6 +61,7 @@ MODULE_PARM_DESC(disable_hugepages,
>  
>  struct vfio_iommu {
>  	struct list_head	domain_list;
> +	struct list_head	mm_list;
>  	struct vfio_domain	*external_domain; /* domain for external user */
>  	struct mutex		lock;
>  	struct rb_root		dma_list;
> @@ -90,6 +92,15 @@ struct vfio_dma {
>  struct vfio_group {
>  	struct iommu_group	*iommu_group;
>  	struct list_head	next;
> +	bool			sva_enabled;
> +};
> +
> +struct vfio_mm {
> +#define VFIO_PASID_INVALID	(-1)
> +	spinlock_t		lock;
> +	int			pasid;
> +	struct mm_struct	*mm;
> +	struct list_head	next;
>  };
>  
>  /*
> @@ -1117,6 +1128,157 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
>  	return 0;
>  }
>  
> +static int vfio_iommu_mm_exit(struct device *dev, int pasid, void *data)
> +{
> +	struct vfio_mm *vfio_mm = data;
> +
> +	/*
> +	 * The mm_exit callback cannot block, so we can't take the iommu mutex
> +	 * and remove this vfio_mm from the list. Hopefully the SVA code will
> +	 * relax its locking requirement in the future.
> +	 *
> +	 * We mostly care about attach_group, which will attempt to replay all
> +	 * binds in this container. Ensure that it doesn't touch this defunct mm
> +	 * struct, by clearing the pointer. The structure will be freed when the
> +	 * group is removed from the container.
> +	 */
> +	spin_lock(&vfio_mm->lock);
> +	vfio_mm->mm = NULL;
> +	spin_unlock(&vfio_mm->lock);
> +
> +	return 0;
> +}
> +
> +static int vfio_iommu_sva_init(struct device *dev, void *data)
> +{

data is not getting used.

> +
> +	int ret;
> +
> +	ret = iommu_sva_device_init(dev, IOMMU_SVA_FEAT_PASID |
> +				    IOMMU_SVA_FEAT_IOPF, 0);
> +	if (ret)
> +		return ret;
> +
> +	return iommu_register_mm_exit_handler(dev, vfio_iommu_mm_exit);
> +}
> +
> +static int vfio_iommu_sva_shutdown(struct device *dev, void *data)
> +{
> +	iommu_sva_device_shutdown(dev);
> +	iommu_unregister_mm_exit_handler(dev);
> +
> +	return 0;
> +}
> +
> +static int vfio_iommu_bind_group(struct vfio_iommu *iommu,
> +				 struct vfio_group *group,
> +				 struct vfio_mm *vfio_mm)
> +{
> +	int ret;
> +	int pasid;
> +
> +	if (!group->sva_enabled) {
> +		ret = iommu_group_for_each_dev(group->iommu_group, NULL,
> +					       vfio_iommu_sva_init);
> +		if (ret)
> +			return ret;
> +
> +		group->sva_enabled = true;
> +	}
> +
> +	ret = iommu_sva_bind_group(group->iommu_group, vfio_mm->mm, &pasid,
> +				   IOMMU_SVA_FEAT_PASID | IOMMU_SVA_FEAT_IOPF,
> +				   vfio_mm);
> +	if (ret)
> +		return ret;

don't you need to clean up the work done by vfio_iommu_sva_init() here.

> +
> +	if (WARN_ON(vfio_mm->pasid != VFIO_PASID_INVALID && pasid !=
> +		    vfio_mm->pasid))
> +		return -EFAULT;
> +
> +	vfio_mm->pasid = pasid;
> +
> +	return 0;
> +}
> +
> +static void vfio_iommu_unbind_group(struct vfio_group *group,
> +				    struct vfio_mm *vfio_mm)
> +{
> +	iommu_sva_unbind_group(group->iommu_group, vfio_mm->pasid);
> +}
> +
> +static void vfio_iommu_unbind(struct vfio_iommu *iommu,
> +			      struct vfio_mm *vfio_mm)
> +{
> +	struct vfio_group *group;
> +	struct vfio_domain *domain;
> +
> +	list_for_each_entry(domain, &iommu->domain_list, next)
> +		list_for_each_entry(group, &domain->group_list, next)
> +			vfio_iommu_unbind_group(group, vfio_mm);
> +}
> +
> +static bool vfio_mm_get(struct vfio_mm *vfio_mm)
> +{
> +	bool ret;
> +
> +	spin_lock(&vfio_mm->lock);
> +	ret = vfio_mm->mm && mmget_not_zero(vfio_mm->mm);
> +	spin_unlock(&vfio_mm->lock);
> +
> +	return ret;
> +}
> +
> +static void vfio_mm_put(struct vfio_mm *vfio_mm)
> +{
> +	mmput(vfio_mm->mm);
> +}
> +
> +static int vfio_iommu_replay_bind(struct vfio_iommu *iommu, struct vfio_group *group)
> +{
> +	int ret = 0;
> +	struct vfio_mm *vfio_mm;
> +
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		/*
> +		 * Ensure mm doesn't exit while we're binding it to the new
> +		 * group.
> +		 */
> +		if (!vfio_mm_get(vfio_mm))
> +			continue;
> +		ret = vfio_iommu_bind_group(iommu, group, vfio_mm);
> +		vfio_mm_put(vfio_mm);
> +
> +		if (ret)
> +			goto out_unbind;
> +	}
> +
> +	return 0;
> +
> +out_unbind:
> +	list_for_each_entry_continue_reverse(vfio_mm, &iommu->mm_list, next) {
> +		if (!vfio_mm_get(vfio_mm))
> +			continue;
> +		iommu_sva_unbind_group(group->iommu_group, vfio_mm->pasid);
> +		vfio_mm_put(vfio_mm);
> +	}
> +
> +	return ret;
> +}
> +
> +static void vfio_iommu_free_all_mm(struct vfio_iommu *iommu)
> +{
> +	struct vfio_mm *vfio_mm, *tmp;
> +
> +	/*
> +	 * No need for unbind() here. Since all groups are detached from this
> +	 * iommu, bonds have been removed.
> +	 */
> +	list_for_each_entry_safe(vfio_mm, tmp, &iommu->mm_list, next)
> +		kfree(vfio_mm);
> +	INIT_LIST_HEAD(&iommu->mm_list);
> +}
> +
>  /*
>   * We change our unmap behavior slightly depending on whether the IOMMU
>   * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
> @@ -1301,6 +1463,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  		    d->prot == domain->prot) {
>  			iommu_detach_group(domain->domain, iommu_group);
>  			if (!iommu_attach_group(d->domain, iommu_group)) {
> +				if (vfio_iommu_replay_bind(iommu, group)) {
> +					iommu_detach_group(d->domain, iommu_group);
> +					ret = iommu_attach_group(domain->domain,
> +								 iommu_group);
> +					if (ret)
> +						goto out_domain;
> +					continue;
> +				}
> +
>  				list_add(&group->next, &d->group_list);
>  				iommu_domain_free(domain->domain);
>  				kfree(domain);
> @@ -1321,6 +1492,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  	if (ret)
>  		goto out_detach;
>  
> +	ret = vfio_iommu_replay_bind(iommu, group);
> +	if (ret)
> +		goto out_detach;
> +
>  	if (resv_msi) {
>  		ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
>  		if (ret)
> @@ -1426,6 +1601,11 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>  			continue;
>  
>  		iommu_detach_group(domain->domain, iommu_group);
> +		if (group->sva_enabled) {
> +			iommu_group_for_each_dev(iommu_group, NULL,
> +						 vfio_iommu_sva_shutdown);
> +			group->sva_enabled = false;
> +		}
>  		list_del(&group->next);
>  		kfree(group);
>  		/*
> @@ -1441,6 +1621,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>  					vfio_iommu_unmap_unpin_all(iommu);
>  				else
>  					vfio_iommu_unmap_unpin_reaccount(iommu);
> +				vfio_iommu_free_all_mm(iommu);
>  			}
>  			iommu_domain_free(domain->domain);
>  			list_del(&domain->next);
> @@ -1475,6 +1656,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
>  	}
>  
>  	INIT_LIST_HEAD(&iommu->domain_list);
> +	INIT_LIST_HEAD(&iommu->mm_list);
>  	iommu->dma_list = RB_ROOT;
>  	mutex_init(&iommu->lock);
>  	BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
> @@ -1509,6 +1691,7 @@ static void vfio_iommu_type1_release(void *iommu_data)
>  		kfree(iommu->external_domain);
>  	}
>  
> +	vfio_iommu_free_all_mm(iommu);
>  	vfio_iommu_unmap_unpin_all(iommu);
>  
>  	list_for_each_entry_safe(domain, domain_tmp,
> @@ -1537,6 +1720,184 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
>  	return ret;
>  }
>  
> +static struct mm_struct *vfio_iommu_get_mm_by_vpid(pid_t vpid)
> +{
> +	struct mm_struct *mm;
> +	struct task_struct *task;
> +
> +	rcu_read_lock();
> +	task = find_task_by_vpid(vpid);
> +	if (task)
> +		get_task_struct(task);
> +	rcu_read_unlock();
> +	if (!task)
> +		return ERR_PTR(-ESRCH);
> +
> +	/* Ensure that current has RW access on the mm */
> +	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
> +	put_task_struct(task);
> +
> +	if (!mm)
> +		return ERR_PTR(-ESRCH);
> +
> +	return mm;
> +}
> +
> +static long vfio_iommu_type1_bind_process(struct vfio_iommu *iommu,
> +					  void __user *arg,
> +					  struct vfio_iommu_type1_bind *bind)
> +{
> +	struct vfio_iommu_type1_bind_process params;
> +	struct vfio_domain *domain;
> +	struct vfio_group *group;
> +	struct vfio_mm *vfio_mm;
> +	struct mm_struct *mm;
> +	unsigned long minsz;
> +	int ret = 0;
> +
> +	minsz = sizeof(*bind) + sizeof(params);
> +	if (bind->argsz < minsz)
> +		return -EINVAL;
> +
> +	arg += sizeof(*bind);
> +	if (copy_from_user(&params, arg, sizeof(params)))
> +		return -EFAULT;
> +
> +	if (params.flags & ~VFIO_IOMMU_BIND_PID)
> +		return -EINVAL;
> +
> +	if (params.flags & VFIO_IOMMU_BIND_PID) {
> +		mm = vfio_iommu_get_mm_by_vpid(params.pid);
> +		if (IS_ERR(mm))
> +			return PTR_ERR(mm);
> +	} else {
> +		mm = get_task_mm(current);
> +		if (!mm)
> +			return -EINVAL;
> +	}

I think you can merge mm failure in both states.

> +
> +	mutex_lock(&iommu->lock);
> +	if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
> +		ret = -EINVAL;
> +		goto out_put_mm;
> +	}
> +
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		if (vfio_mm->mm != mm)
> +			continue;
> +
> +		params.pasid = vfio_mm->pasid;
> +
> +		ret = copy_to_user(arg, &params, sizeof(params)) ? -EFAULT : 0;
> +		goto out_put_mm;
> +	}
> +
> +	vfio_mm = kzalloc(sizeof(*vfio_mm), GFP_KERNEL);
> +	if (!vfio_mm) {
> +		ret = -ENOMEM;
> +		goto out_put_mm;
> +	}
> +
> +	vfio_mm->mm = mm;
> +	vfio_mm->pasid = VFIO_PASID_INVALID;
> +	spin_lock_init(&vfio_mm->lock);
> +
> +	list_for_each_entry(domain, &iommu->domain_list, next) {
> +		list_for_each_entry(group, &domain->group_list, next) {
> +			ret = vfio_iommu_bind_group(iommu, group, vfio_mm);
> +			if (ret)
> +				break;
> +		}
> +		if (ret)
> +			break;
> +	}
> +
> +	if (ret) {
> +		/* Undo all binds that already succeeded */
> +		list_for_each_entry_continue_reverse(group, &domain->group_list,
> +						     next)
> +			vfio_iommu_unbind_group(group, vfio_mm);
> +		list_for_each_entry_continue_reverse(domain, &iommu->domain_list,
> +						     next)
> +			list_for_each_entry(group, &domain->group_list, next)
> +				vfio_iommu_unbind_group(group, vfio_mm);
> +		kfree(vfio_mm);
> +	} else {
> +		list_add(&vfio_mm->next, &iommu->mm_list);
> +
> +		params.pasid = vfio_mm->pasid;
> +		ret = copy_to_user(arg, &params, sizeof(params)) ? -EFAULT : 0;
> +		if (ret) {
> +			vfio_iommu_unbind(iommu, vfio_mm);
> +			kfree(vfio_mm);
> +		}
> +	}
> +
> +out_put_mm:
> +	mutex_unlock(&iommu->lock);
> +	mmput(mm);
> +
> +	return ret;
> +}
> +
> +static long vfio_iommu_type1_unbind_process(struct vfio_iommu *iommu,
> +					    void __user *arg,
> +					    struct vfio_iommu_type1_bind *bind)
> +{
> +	int ret = -EINVAL;
> +	unsigned long minsz;
> +	struct mm_struct *mm;
> +	struct vfio_mm *vfio_mm;
> +	struct vfio_iommu_type1_bind_process params;
> +
> +	minsz = sizeof(*bind) + sizeof(params);
> +	if (bind->argsz < minsz)
> +		return -EINVAL;
> +
> +	arg += sizeof(*bind);
> +	if (copy_from_user(&params, arg, sizeof(params)))
> +		return -EFAULT;
> +
> +	if (params.flags & ~VFIO_IOMMU_BIND_PID)
> +		return -EINVAL;
> +
> +	/*
> +	 * We can't simply unbind a foreign process by PASID, because the
> +	 * process might have died and the PASID might have been reallocated to
> +	 * another process. Instead we need to fetch that process mm by PID
> +	 * again to make sure we remove the right vfio_mm. In addition, holding
> +	 * the mm guarantees that mm_users isn't dropped while we unbind and the
> +	 * exit_mm handler doesn't fire. While not strictly necessary, not
> +	 * having to care about that race simplifies everyone's life.
> +	 */
> +	if (params.flags & VFIO_IOMMU_BIND_PID) {
> +		mm = vfio_iommu_get_mm_by_vpid(params.pid);
> +		if (IS_ERR(mm))
> +			return PTR_ERR(mm);
> +	} else {
> +		mm = get_task_mm(current);
> +		if (!mm)
> +			return -EINVAL;
> +	}
> +

I think you can merge mm failure in both states.

> +	ret = -ESRCH;
> +	mutex_lock(&iommu->lock);
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		if (vfio_mm->mm != mm)
> +			continue;
> +

these loops look wierd 
1. for loops + break 
2. for loops + goto

how about closing the for loop here. and then return here if not vfio_mm
not found.


> +		vfio_iommu_unbind(iommu, vfio_mm);
> +		list_del(&vfio_mm->next);
> +		kfree(vfio_mm);
> +		ret = 0;
> +		break;
> +	}
> +	mutex_unlock(&iommu->lock);
> +	mmput(mm);
> +
> +	return ret;
> +}
> +

-- 
Sinan Kaya
Qualcomm Datacenter Technologies, Inc. as an affiliate of Qualcomm Technologies, Inc.
Qualcomm Technologies, Inc. is a member of the Code Aurora Forum, a Linux Foundation Collaborative Project.

WARNING: multiple messages have this Message-ID (diff)
From: Sinan Kaya <okaya@codeaurora.org>
To: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>,
	linux-arm-kernel@lists.infradead.org, linux-pci@vger.kernel.org,
	linux-acpi@vger.kernel.org, devicetree@vger.kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org
Cc: mark.rutland@arm.com, xieyisheng1@huawei.com,
	ilias.apalodimas@linaro.org, catalin.marinas@arm.com,
	xuzaibo@huawei.com, jonathan.cameron@huawei.com,
	will.deacon@arm.com, yi.l.liu@intel.com,
	lorenzo.pieralisi@arm.com, ashok.raj@intel.com, tn@semihalf.com,
	joro@8bytes.org, bharatku@xilinx.com, rfranz@cavium.com,
	lenb@kernel.org, jacob.jun.pan@linux.intel.com,
	alex.williamson@redhat.com, robh+dt@kernel.org,
	thunder.leizhen@huawei.com, bhelgaas@google.com,
	shunyong.yang@hxt-semitech.com, dwmw2@infradead.org,
	liubo95@huawei.com, rjw@rjwysocki.net, jcrouse@codeaurora.org,
	robdclark@gmail.com, hanjun.guo@linaro.org, sudeep.holla@arm.com,
	robin.murphy@arm.com, christian.koenig@amd.com,
	nwatters@codeaurora.org
Subject: Re: [PATCH 37/37] vfio: Add support for Shared Virtual Addressing
Date: Tue, 27 Feb 2018 20:26:39 -0500	[thread overview]
Message-ID: <1e76c66c-952e-71bd-d831-d3a1ded9559c@codeaurora.org> (raw)
In-Reply-To: <20180212183352.22730-38-jean-philippe.brucker@arm.com>

On 2/12/2018 1:33 PM, Jean-Philippe Brucker wrote:
> Add two new ioctl for VFIO containers. VFIO_IOMMU_BIND_PROCESS creates a
> bond between a container and a process address space, identified by a
> device-specific ID named PASID. This allows the device to target DMA
> transactions at the process virtual addresses without a need for mapping
> and unmapping buffers explicitly in the IOMMU. The process page tables are
> shared with the IOMMU, and mechanisms such as PCI ATS/PRI are used to
> handle faults. VFIO_IOMMU_UNBIND_PROCESS removes a bond created with
> VFIO_IOMMU_BIND_PROCESS.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> ---
>  drivers/vfio/vfio_iommu_type1.c | 399 ++++++++++++++++++++++++++++++++++++++++
>  include/uapi/linux/vfio.h       |  76 ++++++++
>  2 files changed, 475 insertions(+)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index e30e29ae4819..cac066f0026b 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -30,6 +30,7 @@
>  #include <linux/iommu.h>
>  #include <linux/module.h>
>  #include <linux/mm.h>
> +#include <linux/ptrace.h>
>  #include <linux/rbtree.h>
>  #include <linux/sched/signal.h>
>  #include <linux/sched/mm.h>
> @@ -60,6 +61,7 @@ MODULE_PARM_DESC(disable_hugepages,
>  
>  struct vfio_iommu {
>  	struct list_head	domain_list;
> +	struct list_head	mm_list;
>  	struct vfio_domain	*external_domain; /* domain for external user */
>  	struct mutex		lock;
>  	struct rb_root		dma_list;
> @@ -90,6 +92,15 @@ struct vfio_dma {
>  struct vfio_group {
>  	struct iommu_group	*iommu_group;
>  	struct list_head	next;
> +	bool			sva_enabled;
> +};
> +
> +struct vfio_mm {
> +#define VFIO_PASID_INVALID	(-1)
> +	spinlock_t		lock;
> +	int			pasid;
> +	struct mm_struct	*mm;
> +	struct list_head	next;
>  };
>  
>  /*
> @@ -1117,6 +1128,157 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
>  	return 0;
>  }
>  
> +static int vfio_iommu_mm_exit(struct device *dev, int pasid, void *data)
> +{
> +	struct vfio_mm *vfio_mm = data;
> +
> +	/*
> +	 * The mm_exit callback cannot block, so we can't take the iommu mutex
> +	 * and remove this vfio_mm from the list. Hopefully the SVA code will
> +	 * relax its locking requirement in the future.
> +	 *
> +	 * We mostly care about attach_group, which will attempt to replay all
> +	 * binds in this container. Ensure that it doesn't touch this defunct mm
> +	 * struct, by clearing the pointer. The structure will be freed when the
> +	 * group is removed from the container.
> +	 */
> +	spin_lock(&vfio_mm->lock);
> +	vfio_mm->mm = NULL;
> +	spin_unlock(&vfio_mm->lock);
> +
> +	return 0;
> +}
> +
> +static int vfio_iommu_sva_init(struct device *dev, void *data)
> +{

data is not getting used.

> +
> +	int ret;
> +
> +	ret = iommu_sva_device_init(dev, IOMMU_SVA_FEAT_PASID |
> +				    IOMMU_SVA_FEAT_IOPF, 0);
> +	if (ret)
> +		return ret;
> +
> +	return iommu_register_mm_exit_handler(dev, vfio_iommu_mm_exit);
> +}
> +
> +static int vfio_iommu_sva_shutdown(struct device *dev, void *data)
> +{
> +	iommu_sva_device_shutdown(dev);
> +	iommu_unregister_mm_exit_handler(dev);
> +
> +	return 0;
> +}
> +
> +static int vfio_iommu_bind_group(struct vfio_iommu *iommu,
> +				 struct vfio_group *group,
> +				 struct vfio_mm *vfio_mm)
> +{
> +	int ret;
> +	int pasid;
> +
> +	if (!group->sva_enabled) {
> +		ret = iommu_group_for_each_dev(group->iommu_group, NULL,
> +					       vfio_iommu_sva_init);
> +		if (ret)
> +			return ret;
> +
> +		group->sva_enabled = true;
> +	}
> +
> +	ret = iommu_sva_bind_group(group->iommu_group, vfio_mm->mm, &pasid,
> +				   IOMMU_SVA_FEAT_PASID | IOMMU_SVA_FEAT_IOPF,
> +				   vfio_mm);
> +	if (ret)
> +		return ret;

don't you need to clean up the work done by vfio_iommu_sva_init() here.

> +
> +	if (WARN_ON(vfio_mm->pasid != VFIO_PASID_INVALID && pasid !=
> +		    vfio_mm->pasid))
> +		return -EFAULT;
> +
> +	vfio_mm->pasid = pasid;
> +
> +	return 0;
> +}
> +
> +static void vfio_iommu_unbind_group(struct vfio_group *group,
> +				    struct vfio_mm *vfio_mm)
> +{
> +	iommu_sva_unbind_group(group->iommu_group, vfio_mm->pasid);
> +}
> +
> +static void vfio_iommu_unbind(struct vfio_iommu *iommu,
> +			      struct vfio_mm *vfio_mm)
> +{
> +	struct vfio_group *group;
> +	struct vfio_domain *domain;
> +
> +	list_for_each_entry(domain, &iommu->domain_list, next)
> +		list_for_each_entry(group, &domain->group_list, next)
> +			vfio_iommu_unbind_group(group, vfio_mm);
> +}
> +
> +static bool vfio_mm_get(struct vfio_mm *vfio_mm)
> +{
> +	bool ret;
> +
> +	spin_lock(&vfio_mm->lock);
> +	ret = vfio_mm->mm && mmget_not_zero(vfio_mm->mm);
> +	spin_unlock(&vfio_mm->lock);
> +
> +	return ret;
> +}
> +
> +static void vfio_mm_put(struct vfio_mm *vfio_mm)
> +{
> +	mmput(vfio_mm->mm);
> +}
> +
> +static int vfio_iommu_replay_bind(struct vfio_iommu *iommu, struct vfio_group *group)
> +{
> +	int ret = 0;
> +	struct vfio_mm *vfio_mm;
> +
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		/*
> +		 * Ensure mm doesn't exit while we're binding it to the new
> +		 * group.
> +		 */
> +		if (!vfio_mm_get(vfio_mm))
> +			continue;
> +		ret = vfio_iommu_bind_group(iommu, group, vfio_mm);
> +		vfio_mm_put(vfio_mm);
> +
> +		if (ret)
> +			goto out_unbind;
> +	}
> +
> +	return 0;
> +
> +out_unbind:
> +	list_for_each_entry_continue_reverse(vfio_mm, &iommu->mm_list, next) {
> +		if (!vfio_mm_get(vfio_mm))
> +			continue;
> +		iommu_sva_unbind_group(group->iommu_group, vfio_mm->pasid);
> +		vfio_mm_put(vfio_mm);
> +	}
> +
> +	return ret;
> +}
> +
> +static void vfio_iommu_free_all_mm(struct vfio_iommu *iommu)
> +{
> +	struct vfio_mm *vfio_mm, *tmp;
> +
> +	/*
> +	 * No need for unbind() here. Since all groups are detached from this
> +	 * iommu, bonds have been removed.
> +	 */
> +	list_for_each_entry_safe(vfio_mm, tmp, &iommu->mm_list, next)
> +		kfree(vfio_mm);
> +	INIT_LIST_HEAD(&iommu->mm_list);
> +}
> +
>  /*
>   * We change our unmap behavior slightly depending on whether the IOMMU
>   * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
> @@ -1301,6 +1463,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  		    d->prot == domain->prot) {
>  			iommu_detach_group(domain->domain, iommu_group);
>  			if (!iommu_attach_group(d->domain, iommu_group)) {
> +				if (vfio_iommu_replay_bind(iommu, group)) {
> +					iommu_detach_group(d->domain, iommu_group);
> +					ret = iommu_attach_group(domain->domain,
> +								 iommu_group);
> +					if (ret)
> +						goto out_domain;
> +					continue;
> +				}
> +
>  				list_add(&group->next, &d->group_list);
>  				iommu_domain_free(domain->domain);
>  				kfree(domain);
> @@ -1321,6 +1492,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  	if (ret)
>  		goto out_detach;
>  
> +	ret = vfio_iommu_replay_bind(iommu, group);
> +	if (ret)
> +		goto out_detach;
> +
>  	if (resv_msi) {
>  		ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
>  		if (ret)
> @@ -1426,6 +1601,11 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>  			continue;
>  
>  		iommu_detach_group(domain->domain, iommu_group);
> +		if (group->sva_enabled) {
> +			iommu_group_for_each_dev(iommu_group, NULL,
> +						 vfio_iommu_sva_shutdown);
> +			group->sva_enabled = false;
> +		}
>  		list_del(&group->next);
>  		kfree(group);
>  		/*
> @@ -1441,6 +1621,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>  					vfio_iommu_unmap_unpin_all(iommu);
>  				else
>  					vfio_iommu_unmap_unpin_reaccount(iommu);
> +				vfio_iommu_free_all_mm(iommu);
>  			}
>  			iommu_domain_free(domain->domain);
>  			list_del(&domain->next);
> @@ -1475,6 +1656,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
>  	}
>  
>  	INIT_LIST_HEAD(&iommu->domain_list);
> +	INIT_LIST_HEAD(&iommu->mm_list);
>  	iommu->dma_list = RB_ROOT;
>  	mutex_init(&iommu->lock);
>  	BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
> @@ -1509,6 +1691,7 @@ static void vfio_iommu_type1_release(void *iommu_data)
>  		kfree(iommu->external_domain);
>  	}
>  
> +	vfio_iommu_free_all_mm(iommu);
>  	vfio_iommu_unmap_unpin_all(iommu);
>  
>  	list_for_each_entry_safe(domain, domain_tmp,
> @@ -1537,6 +1720,184 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
>  	return ret;
>  }
>  
> +static struct mm_struct *vfio_iommu_get_mm_by_vpid(pid_t vpid)
> +{
> +	struct mm_struct *mm;
> +	struct task_struct *task;
> +
> +	rcu_read_lock();
> +	task = find_task_by_vpid(vpid);
> +	if (task)
> +		get_task_struct(task);
> +	rcu_read_unlock();
> +	if (!task)
> +		return ERR_PTR(-ESRCH);
> +
> +	/* Ensure that current has RW access on the mm */
> +	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
> +	put_task_struct(task);
> +
> +	if (!mm)
> +		return ERR_PTR(-ESRCH);
> +
> +	return mm;
> +}
> +
> +static long vfio_iommu_type1_bind_process(struct vfio_iommu *iommu,
> +					  void __user *arg,
> +					  struct vfio_iommu_type1_bind *bind)
> +{
> +	struct vfio_iommu_type1_bind_process params;
> +	struct vfio_domain *domain;
> +	struct vfio_group *group;
> +	struct vfio_mm *vfio_mm;
> +	struct mm_struct *mm;
> +	unsigned long minsz;
> +	int ret = 0;
> +
> +	minsz = sizeof(*bind) + sizeof(params);
> +	if (bind->argsz < minsz)
> +		return -EINVAL;
> +
> +	arg += sizeof(*bind);
> +	if (copy_from_user(&params, arg, sizeof(params)))
> +		return -EFAULT;
> +
> +	if (params.flags & ~VFIO_IOMMU_BIND_PID)
> +		return -EINVAL;
> +
> +	if (params.flags & VFIO_IOMMU_BIND_PID) {
> +		mm = vfio_iommu_get_mm_by_vpid(params.pid);
> +		if (IS_ERR(mm))
> +			return PTR_ERR(mm);
> +	} else {
> +		mm = get_task_mm(current);
> +		if (!mm)
> +			return -EINVAL;
> +	}

I think you can merge mm failure in both states.

> +
> +	mutex_lock(&iommu->lock);
> +	if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
> +		ret = -EINVAL;
> +		goto out_put_mm;
> +	}
> +
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		if (vfio_mm->mm != mm)
> +			continue;
> +
> +		params.pasid = vfio_mm->pasid;
> +
> +		ret = copy_to_user(arg, &params, sizeof(params)) ? -EFAULT : 0;
> +		goto out_put_mm;
> +	}
> +
> +	vfio_mm = kzalloc(sizeof(*vfio_mm), GFP_KERNEL);
> +	if (!vfio_mm) {
> +		ret = -ENOMEM;
> +		goto out_put_mm;
> +	}
> +
> +	vfio_mm->mm = mm;
> +	vfio_mm->pasid = VFIO_PASID_INVALID;
> +	spin_lock_init(&vfio_mm->lock);
> +
> +	list_for_each_entry(domain, &iommu->domain_list, next) {
> +		list_for_each_entry(group, &domain->group_list, next) {
> +			ret = vfio_iommu_bind_group(iommu, group, vfio_mm);
> +			if (ret)
> +				break;
> +		}
> +		if (ret)
> +			break;
> +	}
> +
> +	if (ret) {
> +		/* Undo all binds that already succeeded */
> +		list_for_each_entry_continue_reverse(group, &domain->group_list,
> +						     next)
> +			vfio_iommu_unbind_group(group, vfio_mm);
> +		list_for_each_entry_continue_reverse(domain, &iommu->domain_list,
> +						     next)
> +			list_for_each_entry(group, &domain->group_list, next)
> +				vfio_iommu_unbind_group(group, vfio_mm);
> +		kfree(vfio_mm);
> +	} else {
> +		list_add(&vfio_mm->next, &iommu->mm_list);
> +
> +		params.pasid = vfio_mm->pasid;
> +		ret = copy_to_user(arg, &params, sizeof(params)) ? -EFAULT : 0;
> +		if (ret) {
> +			vfio_iommu_unbind(iommu, vfio_mm);
> +			kfree(vfio_mm);
> +		}
> +	}
> +
> +out_put_mm:
> +	mutex_unlock(&iommu->lock);
> +	mmput(mm);
> +
> +	return ret;
> +}
> +
> +static long vfio_iommu_type1_unbind_process(struct vfio_iommu *iommu,
> +					    void __user *arg,
> +					    struct vfio_iommu_type1_bind *bind)
> +{
> +	int ret = -EINVAL;
> +	unsigned long minsz;
> +	struct mm_struct *mm;
> +	struct vfio_mm *vfio_mm;
> +	struct vfio_iommu_type1_bind_process params;
> +
> +	minsz = sizeof(*bind) + sizeof(params);
> +	if (bind->argsz < minsz)
> +		return -EINVAL;
> +
> +	arg += sizeof(*bind);
> +	if (copy_from_user(&params, arg, sizeof(params)))
> +		return -EFAULT;
> +
> +	if (params.flags & ~VFIO_IOMMU_BIND_PID)
> +		return -EINVAL;
> +
> +	/*
> +	 * We can't simply unbind a foreign process by PASID, because the
> +	 * process might have died and the PASID might have been reallocated to
> +	 * another process. Instead we need to fetch that process mm by PID
> +	 * again to make sure we remove the right vfio_mm. In addition, holding
> +	 * the mm guarantees that mm_users isn't dropped while we unbind and the
> +	 * exit_mm handler doesn't fire. While not strictly necessary, not
> +	 * having to care about that race simplifies everyone's life.
> +	 */
> +	if (params.flags & VFIO_IOMMU_BIND_PID) {
> +		mm = vfio_iommu_get_mm_by_vpid(params.pid);
> +		if (IS_ERR(mm))
> +			return PTR_ERR(mm);
> +	} else {
> +		mm = get_task_mm(current);
> +		if (!mm)
> +			return -EINVAL;
> +	}
> +

I think you can merge mm failure in both states.

> +	ret = -ESRCH;
> +	mutex_lock(&iommu->lock);
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		if (vfio_mm->mm != mm)
> +			continue;
> +

these loops look wierd 
1. for loops + break 
2. for loops + goto

how about closing the for loop here. and then return here if not vfio_mm
not found.


> +		vfio_iommu_unbind(iommu, vfio_mm);
> +		list_del(&vfio_mm->next);
> +		kfree(vfio_mm);
> +		ret = 0;
> +		break;
> +	}
> +	mutex_unlock(&iommu->lock);
> +	mmput(mm);
> +
> +	return ret;
> +}
> +

-- 
Sinan Kaya
Qualcomm Datacenter Technologies, Inc. as an affiliate of Qualcomm Technologies, Inc.
Qualcomm Technologies, Inc. is a member of the Code Aurora Forum, a Linux Foundation Collaborative Project.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: okaya@codeaurora.org (Sinan Kaya)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 37/37] vfio: Add support for Shared Virtual Addressing
Date: Tue, 27 Feb 2018 20:26:39 -0500	[thread overview]
Message-ID: <1e76c66c-952e-71bd-d831-d3a1ded9559c@codeaurora.org> (raw)
In-Reply-To: <20180212183352.22730-38-jean-philippe.brucker@arm.com>

On 2/12/2018 1:33 PM, Jean-Philippe Brucker wrote:
> Add two new ioctl for VFIO containers. VFIO_IOMMU_BIND_PROCESS creates a
> bond between a container and a process address space, identified by a
> device-specific ID named PASID. This allows the device to target DMA
> transactions at the process virtual addresses without a need for mapping
> and unmapping buffers explicitly in the IOMMU. The process page tables are
> shared with the IOMMU, and mechanisms such as PCI ATS/PRI are used to
> handle faults. VFIO_IOMMU_UNBIND_PROCESS removes a bond created with
> VFIO_IOMMU_BIND_PROCESS.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> ---
>  drivers/vfio/vfio_iommu_type1.c | 399 ++++++++++++++++++++++++++++++++++++++++
>  include/uapi/linux/vfio.h       |  76 ++++++++
>  2 files changed, 475 insertions(+)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index e30e29ae4819..cac066f0026b 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -30,6 +30,7 @@
>  #include <linux/iommu.h>
>  #include <linux/module.h>
>  #include <linux/mm.h>
> +#include <linux/ptrace.h>
>  #include <linux/rbtree.h>
>  #include <linux/sched/signal.h>
>  #include <linux/sched/mm.h>
> @@ -60,6 +61,7 @@ MODULE_PARM_DESC(disable_hugepages,
>  
>  struct vfio_iommu {
>  	struct list_head	domain_list;
> +	struct list_head	mm_list;
>  	struct vfio_domain	*external_domain; /* domain for external user */
>  	struct mutex		lock;
>  	struct rb_root		dma_list;
> @@ -90,6 +92,15 @@ struct vfio_dma {
>  struct vfio_group {
>  	struct iommu_group	*iommu_group;
>  	struct list_head	next;
> +	bool			sva_enabled;
> +};
> +
> +struct vfio_mm {
> +#define VFIO_PASID_INVALID	(-1)
> +	spinlock_t		lock;
> +	int			pasid;
> +	struct mm_struct	*mm;
> +	struct list_head	next;
>  };
>  
>  /*
> @@ -1117,6 +1128,157 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
>  	return 0;
>  }
>  
> +static int vfio_iommu_mm_exit(struct device *dev, int pasid, void *data)
> +{
> +	struct vfio_mm *vfio_mm = data;
> +
> +	/*
> +	 * The mm_exit callback cannot block, so we can't take the iommu mutex
> +	 * and remove this vfio_mm from the list. Hopefully the SVA code will
> +	 * relax its locking requirement in the future.
> +	 *
> +	 * We mostly care about attach_group, which will attempt to replay all
> +	 * binds in this container. Ensure that it doesn't touch this defunct mm
> +	 * struct, by clearing the pointer. The structure will be freed when the
> +	 * group is removed from the container.
> +	 */
> +	spin_lock(&vfio_mm->lock);
> +	vfio_mm->mm = NULL;
> +	spin_unlock(&vfio_mm->lock);
> +
> +	return 0;
> +}
> +
> +static int vfio_iommu_sva_init(struct device *dev, void *data)
> +{

data is not getting used.

> +
> +	int ret;
> +
> +	ret = iommu_sva_device_init(dev, IOMMU_SVA_FEAT_PASID |
> +				    IOMMU_SVA_FEAT_IOPF, 0);
> +	if (ret)
> +		return ret;
> +
> +	return iommu_register_mm_exit_handler(dev, vfio_iommu_mm_exit);
> +}
> +
> +static int vfio_iommu_sva_shutdown(struct device *dev, void *data)
> +{
> +	iommu_sva_device_shutdown(dev);
> +	iommu_unregister_mm_exit_handler(dev);
> +
> +	return 0;
> +}
> +
> +static int vfio_iommu_bind_group(struct vfio_iommu *iommu,
> +				 struct vfio_group *group,
> +				 struct vfio_mm *vfio_mm)
> +{
> +	int ret;
> +	int pasid;
> +
> +	if (!group->sva_enabled) {
> +		ret = iommu_group_for_each_dev(group->iommu_group, NULL,
> +					       vfio_iommu_sva_init);
> +		if (ret)
> +			return ret;
> +
> +		group->sva_enabled = true;
> +	}
> +
> +	ret = iommu_sva_bind_group(group->iommu_group, vfio_mm->mm, &pasid,
> +				   IOMMU_SVA_FEAT_PASID | IOMMU_SVA_FEAT_IOPF,
> +				   vfio_mm);
> +	if (ret)
> +		return ret;

don't you need to clean up the work done by vfio_iommu_sva_init() here.

> +
> +	if (WARN_ON(vfio_mm->pasid != VFIO_PASID_INVALID && pasid !=
> +		    vfio_mm->pasid))
> +		return -EFAULT;
> +
> +	vfio_mm->pasid = pasid;
> +
> +	return 0;
> +}
> +
> +static void vfio_iommu_unbind_group(struct vfio_group *group,
> +				    struct vfio_mm *vfio_mm)
> +{
> +	iommu_sva_unbind_group(group->iommu_group, vfio_mm->pasid);
> +}
> +
> +static void vfio_iommu_unbind(struct vfio_iommu *iommu,
> +			      struct vfio_mm *vfio_mm)
> +{
> +	struct vfio_group *group;
> +	struct vfio_domain *domain;
> +
> +	list_for_each_entry(domain, &iommu->domain_list, next)
> +		list_for_each_entry(group, &domain->group_list, next)
> +			vfio_iommu_unbind_group(group, vfio_mm);
> +}
> +
> +static bool vfio_mm_get(struct vfio_mm *vfio_mm)
> +{
> +	bool ret;
> +
> +	spin_lock(&vfio_mm->lock);
> +	ret = vfio_mm->mm && mmget_not_zero(vfio_mm->mm);
> +	spin_unlock(&vfio_mm->lock);
> +
> +	return ret;
> +}
> +
> +static void vfio_mm_put(struct vfio_mm *vfio_mm)
> +{
> +	mmput(vfio_mm->mm);
> +}
> +
> +static int vfio_iommu_replay_bind(struct vfio_iommu *iommu, struct vfio_group *group)
> +{
> +	int ret = 0;
> +	struct vfio_mm *vfio_mm;
> +
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		/*
> +		 * Ensure mm doesn't exit while we're binding it to the new
> +		 * group.
> +		 */
> +		if (!vfio_mm_get(vfio_mm))
> +			continue;
> +		ret = vfio_iommu_bind_group(iommu, group, vfio_mm);
> +		vfio_mm_put(vfio_mm);
> +
> +		if (ret)
> +			goto out_unbind;
> +	}
> +
> +	return 0;
> +
> +out_unbind:
> +	list_for_each_entry_continue_reverse(vfio_mm, &iommu->mm_list, next) {
> +		if (!vfio_mm_get(vfio_mm))
> +			continue;
> +		iommu_sva_unbind_group(group->iommu_group, vfio_mm->pasid);
> +		vfio_mm_put(vfio_mm);
> +	}
> +
> +	return ret;
> +}
> +
> +static void vfio_iommu_free_all_mm(struct vfio_iommu *iommu)
> +{
> +	struct vfio_mm *vfio_mm, *tmp;
> +
> +	/*
> +	 * No need for unbind() here. Since all groups are detached from this
> +	 * iommu, bonds have been removed.
> +	 */
> +	list_for_each_entry_safe(vfio_mm, tmp, &iommu->mm_list, next)
> +		kfree(vfio_mm);
> +	INIT_LIST_HEAD(&iommu->mm_list);
> +}
> +
>  /*
>   * We change our unmap behavior slightly depending on whether the IOMMU
>   * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
> @@ -1301,6 +1463,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  		    d->prot == domain->prot) {
>  			iommu_detach_group(domain->domain, iommu_group);
>  			if (!iommu_attach_group(d->domain, iommu_group)) {
> +				if (vfio_iommu_replay_bind(iommu, group)) {
> +					iommu_detach_group(d->domain, iommu_group);
> +					ret = iommu_attach_group(domain->domain,
> +								 iommu_group);
> +					if (ret)
> +						goto out_domain;
> +					continue;
> +				}
> +
>  				list_add(&group->next, &d->group_list);
>  				iommu_domain_free(domain->domain);
>  				kfree(domain);
> @@ -1321,6 +1492,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  	if (ret)
>  		goto out_detach;
>  
> +	ret = vfio_iommu_replay_bind(iommu, group);
> +	if (ret)
> +		goto out_detach;
> +
>  	if (resv_msi) {
>  		ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
>  		if (ret)
> @@ -1426,6 +1601,11 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>  			continue;
>  
>  		iommu_detach_group(domain->domain, iommu_group);
> +		if (group->sva_enabled) {
> +			iommu_group_for_each_dev(iommu_group, NULL,
> +						 vfio_iommu_sva_shutdown);
> +			group->sva_enabled = false;
> +		}
>  		list_del(&group->next);
>  		kfree(group);
>  		/*
> @@ -1441,6 +1621,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>  					vfio_iommu_unmap_unpin_all(iommu);
>  				else
>  					vfio_iommu_unmap_unpin_reaccount(iommu);
> +				vfio_iommu_free_all_mm(iommu);
>  			}
>  			iommu_domain_free(domain->domain);
>  			list_del(&domain->next);
> @@ -1475,6 +1656,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
>  	}
>  
>  	INIT_LIST_HEAD(&iommu->domain_list);
> +	INIT_LIST_HEAD(&iommu->mm_list);
>  	iommu->dma_list = RB_ROOT;
>  	mutex_init(&iommu->lock);
>  	BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
> @@ -1509,6 +1691,7 @@ static void vfio_iommu_type1_release(void *iommu_data)
>  		kfree(iommu->external_domain);
>  	}
>  
> +	vfio_iommu_free_all_mm(iommu);
>  	vfio_iommu_unmap_unpin_all(iommu);
>  
>  	list_for_each_entry_safe(domain, domain_tmp,
> @@ -1537,6 +1720,184 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
>  	return ret;
>  }
>  
> +static struct mm_struct *vfio_iommu_get_mm_by_vpid(pid_t vpid)
> +{
> +	struct mm_struct *mm;
> +	struct task_struct *task;
> +
> +	rcu_read_lock();
> +	task = find_task_by_vpid(vpid);
> +	if (task)
> +		get_task_struct(task);
> +	rcu_read_unlock();
> +	if (!task)
> +		return ERR_PTR(-ESRCH);
> +
> +	/* Ensure that current has RW access on the mm */
> +	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
> +	put_task_struct(task);
> +
> +	if (!mm)
> +		return ERR_PTR(-ESRCH);
> +
> +	return mm;
> +}
> +
> +static long vfio_iommu_type1_bind_process(struct vfio_iommu *iommu,
> +					  void __user *arg,
> +					  struct vfio_iommu_type1_bind *bind)
> +{
> +	struct vfio_iommu_type1_bind_process params;
> +	struct vfio_domain *domain;
> +	struct vfio_group *group;
> +	struct vfio_mm *vfio_mm;
> +	struct mm_struct *mm;
> +	unsigned long minsz;
> +	int ret = 0;
> +
> +	minsz = sizeof(*bind) + sizeof(params);
> +	if (bind->argsz < minsz)
> +		return -EINVAL;
> +
> +	arg += sizeof(*bind);
> +	if (copy_from_user(&params, arg, sizeof(params)))
> +		return -EFAULT;
> +
> +	if (params.flags & ~VFIO_IOMMU_BIND_PID)
> +		return -EINVAL;
> +
> +	if (params.flags & VFIO_IOMMU_BIND_PID) {
> +		mm = vfio_iommu_get_mm_by_vpid(params.pid);
> +		if (IS_ERR(mm))
> +			return PTR_ERR(mm);
> +	} else {
> +		mm = get_task_mm(current);
> +		if (!mm)
> +			return -EINVAL;
> +	}

I think you can merge mm failure in both states.

> +
> +	mutex_lock(&iommu->lock);
> +	if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
> +		ret = -EINVAL;
> +		goto out_put_mm;
> +	}
> +
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		if (vfio_mm->mm != mm)
> +			continue;
> +
> +		params.pasid = vfio_mm->pasid;
> +
> +		ret = copy_to_user(arg, &params, sizeof(params)) ? -EFAULT : 0;
> +		goto out_put_mm;
> +	}
> +
> +	vfio_mm = kzalloc(sizeof(*vfio_mm), GFP_KERNEL);
> +	if (!vfio_mm) {
> +		ret = -ENOMEM;
> +		goto out_put_mm;
> +	}
> +
> +	vfio_mm->mm = mm;
> +	vfio_mm->pasid = VFIO_PASID_INVALID;
> +	spin_lock_init(&vfio_mm->lock);
> +
> +	list_for_each_entry(domain, &iommu->domain_list, next) {
> +		list_for_each_entry(group, &domain->group_list, next) {
> +			ret = vfio_iommu_bind_group(iommu, group, vfio_mm);
> +			if (ret)
> +				break;
> +		}
> +		if (ret)
> +			break;
> +	}
> +
> +	if (ret) {
> +		/* Undo all binds that already succeeded */
> +		list_for_each_entry_continue_reverse(group, &domain->group_list,
> +						     next)
> +			vfio_iommu_unbind_group(group, vfio_mm);
> +		list_for_each_entry_continue_reverse(domain, &iommu->domain_list,
> +						     next)
> +			list_for_each_entry(group, &domain->group_list, next)
> +				vfio_iommu_unbind_group(group, vfio_mm);
> +		kfree(vfio_mm);
> +	} else {
> +		list_add(&vfio_mm->next, &iommu->mm_list);
> +
> +		params.pasid = vfio_mm->pasid;
> +		ret = copy_to_user(arg, &params, sizeof(params)) ? -EFAULT : 0;
> +		if (ret) {
> +			vfio_iommu_unbind(iommu, vfio_mm);
> +			kfree(vfio_mm);
> +		}
> +	}
> +
> +out_put_mm:
> +	mutex_unlock(&iommu->lock);
> +	mmput(mm);
> +
> +	return ret;
> +}
> +
> +static long vfio_iommu_type1_unbind_process(struct vfio_iommu *iommu,
> +					    void __user *arg,
> +					    struct vfio_iommu_type1_bind *bind)
> +{
> +	int ret = -EINVAL;
> +	unsigned long minsz;
> +	struct mm_struct *mm;
> +	struct vfio_mm *vfio_mm;
> +	struct vfio_iommu_type1_bind_process params;
> +
> +	minsz = sizeof(*bind) + sizeof(params);
> +	if (bind->argsz < minsz)
> +		return -EINVAL;
> +
> +	arg += sizeof(*bind);
> +	if (copy_from_user(&params, arg, sizeof(params)))
> +		return -EFAULT;
> +
> +	if (params.flags & ~VFIO_IOMMU_BIND_PID)
> +		return -EINVAL;
> +
> +	/*
> +	 * We can't simply unbind a foreign process by PASID, because the
> +	 * process might have died and the PASID might have been reallocated to
> +	 * another process. Instead we need to fetch that process mm by PID
> +	 * again to make sure we remove the right vfio_mm. In addition, holding
> +	 * the mm guarantees that mm_users isn't dropped while we unbind and the
> +	 * exit_mm handler doesn't fire. While not strictly necessary, not
> +	 * having to care about that race simplifies everyone's life.
> +	 */
> +	if (params.flags & VFIO_IOMMU_BIND_PID) {
> +		mm = vfio_iommu_get_mm_by_vpid(params.pid);
> +		if (IS_ERR(mm))
> +			return PTR_ERR(mm);
> +	} else {
> +		mm = get_task_mm(current);
> +		if (!mm)
> +			return -EINVAL;
> +	}
> +

I think you can merge mm failure in both states.

> +	ret = -ESRCH;
> +	mutex_lock(&iommu->lock);
> +	list_for_each_entry(vfio_mm, &iommu->mm_list, next) {
> +		if (vfio_mm->mm != mm)
> +			continue;
> +

these loops look wierd 
1. for loops + break 
2. for loops + goto

how about closing the for loop here. and then return here if not vfio_mm
not found.


> +		vfio_iommu_unbind(iommu, vfio_mm);
> +		list_del(&vfio_mm->next);
> +		kfree(vfio_mm);
> +		ret = 0;
> +		break;
> +	}
> +	mutex_unlock(&iommu->lock);
> +	mmput(mm);
> +
> +	return ret;
> +}
> +

-- 
Sinan Kaya
Qualcomm Datacenter Technologies, Inc. as an affiliate of Qualcomm Technologies, Inc.
Qualcomm Technologies, Inc. is a member of the Code Aurora Forum, a Linux Foundation Collaborative Project.

  parent reply	other threads:[~2018-02-28  1:26 UTC|newest]

Thread overview: 311+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-12 18:33 [PATCH 00/37] Shared Virtual Addressing for the IOMMU Jean-Philippe Brucker
2018-02-12 18:33 ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 01/37] iommu: Introduce Shared Virtual Addressing API Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-2-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-13  7:31     ` Tian, Kevin
2018-02-13  7:31       ` Tian, Kevin
2018-02-13  7:31       ` Tian, Kevin
     [not found]       ` <AADFC41AFE54684AB9EE6CBC0274A5D191002823-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2018-02-13 12:40         ` Jean-Philippe Brucker
2018-02-13 12:40           ` Jean-Philippe Brucker
2018-02-13 12:40           ` Jean-Philippe Brucker
2018-02-13 23:43           ` Tian, Kevin
2018-02-13 23:43             ` Tian, Kevin
2018-02-13 23:43             ` Tian, Kevin
     [not found]             ` <AADFC41AFE54684AB9EE6CBC0274A5D191003B1B-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2018-02-15 12:42               ` Jean-Philippe Brucker
2018-02-15 12:42                 ` Jean-Philippe Brucker
2018-02-15 12:42                 ` Jean-Philippe Brucker
     [not found]                 ` <0b579768-3090-dd50-58b1-3385be92ef21-5wv7dgnIgG8@public.gmane.org>
2018-02-27  6:21                   ` Tian, Kevin
2018-02-27  6:21                     ` Tian, Kevin
2018-02-27  6:21                     ` Tian, Kevin
     [not found]                     ` <AADFC41AFE54684AB9EE6CBC0274A5D19101C8A7-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2018-02-28 16:20                       ` Jean-Philippe Brucker
2018-02-28 16:20                         ` Jean-Philippe Brucker
2018-02-28 16:20                         ` Jean-Philippe Brucker
2018-02-15  9:59   ` Joerg Roedel
2018-02-15  9:59     ` Joerg Roedel
2018-02-15  9:59     ` Joerg Roedel
     [not found]     ` <20180215095909.r4nwqjhuijusssuy-zLv9SwRftAIdnm+yROfE0A@public.gmane.org>
2018-02-15 12:43       ` Jean-Philippe Brucker
2018-02-15 12:43         ` Jean-Philippe Brucker
2018-02-15 12:43         ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 02/37] iommu/sva: Bind process address spaces to devices Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-13  7:54   ` Tian, Kevin
2018-02-13  7:54     ` Tian, Kevin
2018-02-13  7:54     ` Tian, Kevin
2018-02-13 12:57     ` Jean-Philippe Brucker
2018-02-13 12:57       ` Jean-Philippe Brucker
2018-02-13 12:57       ` Jean-Philippe Brucker
2018-02-13 12:57       ` Jean-Philippe Brucker
2018-02-13 23:34       ` Tian, Kevin
2018-02-13 23:34         ` Tian, Kevin
2018-02-13 23:34         ` Tian, Kevin
     [not found]         ` <AADFC41AFE54684AB9EE6CBC0274A5D191003AD6-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2018-02-15 12:40           ` Jean-Philippe Brucker
2018-02-15 12:40             ` Jean-Philippe Brucker
2018-02-15 12:40             ` Jean-Philippe Brucker
     [not found]             ` <ca4d4992-0c8b-dae6-e443-7c7f7164be60-5wv7dgnIgG8@public.gmane.org>
2018-03-01  3:03               ` Liu, Yi L
2018-03-01  3:03                 ` Liu, Yi L
2018-03-01  3:03                 ` Liu, Yi L
     [not found]                 ` <A2975661238FB949B60364EF0F2C257439B829DA-0J0gbvR4kTg/UvCtAeCM4rfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2018-03-02 16:03                   ` Jean-Philippe Brucker
2018-03-02 16:03                     ` Jean-Philippe Brucker
2018-03-02 16:03                     ` Jean-Philippe Brucker
     [not found]       ` <b9eacb30-817f-9027-bc0a-1f01cf9f13f9-5wv7dgnIgG8@public.gmane.org>
2018-02-15 10:21         ` joro-zLv9SwRftAIdnm+yROfE0A
2018-02-15 10:21           ` joro at 8bytes.org
2018-02-15 10:21           ` joro-zLv9SwRftAIdnm+yROfE0A
2018-02-15 10:21           ` joro
     [not found]           ` <20180215102113.c7t7rrnyzgazmdli-zLv9SwRftAIdnm+yROfE0A@public.gmane.org>
2018-02-15 12:29             ` Christian König
2018-02-15 12:29               ` Christian König
2018-02-15 12:29               ` Christian König
2018-02-15 12:29               ` Christian König
2018-02-15 12:46             ` Jean-Philippe Brucker
2018-02-15 12:46               ` Jean-Philippe Brucker
2018-02-15 12:46               ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-3-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-28 20:34     ` Sinan Kaya
2018-02-28 20:34       ` Sinan Kaya
2018-02-28 20:34       ` Sinan Kaya
     [not found]       ` <bce32071-4159-3bdd-1e03-77f540ee4509-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-03-02 12:32         ` Jean-Philippe Brucker
2018-03-02 12:32           ` Jean-Philippe Brucker
2018-03-02 12:32           ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 05/37] iommu/sva: Track mm changes with an MMU notifier Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 06/37] iommu/sva: Search mm by PASID Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 07/37] iommu: Add a page fault handler Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-14  7:18   ` Jacob Pan
2018-02-14  7:18     ` Jacob Pan
2018-02-14  7:18     ` Jacob Pan
2018-02-15 13:49     ` Jean-Philippe Brucker
2018-02-15 13:49       ` Jean-Philippe Brucker
2018-02-15 13:49       ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-8-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-05 21:44     ` Sinan Kaya
2018-03-05 21:44       ` Sinan Kaya
2018-03-05 21:44       ` Sinan Kaya
     [not found]       ` <b2a3d2a7-7042-aef3-0def-05e64e39d046-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-03-06 10:24         ` Jean-Philippe Brucker
2018-03-06 10:24           ` Jean-Philippe Brucker
2018-03-06 10:24           ` Jean-Philippe Brucker
2018-03-05 21:53     ` Sinan Kaya
2018-03-05 21:53       ` Sinan Kaya
2018-03-05 21:53       ` Sinan Kaya
     [not found]       ` <77afa195-4842-a112-eba5-409b861b5315-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-03-06 10:46         ` Jean-Philippe Brucker
2018-03-06 10:46           ` Jean-Philippe Brucker
2018-03-06 10:46           ` Jean-Philippe Brucker
     [not found]           ` <430e9754-4cf7-0aa8-7899-fc13e6a2e079-5wv7dgnIgG8@public.gmane.org>
2018-03-06 12:52             ` okaya-sgV2jX0FEOL9JmXXK+q4OQ
2018-03-06 12:52               ` okaya at codeaurora.org
2018-03-06 12:52               ` okaya
2018-03-08 15:40     ` Jonathan Cameron
2018-03-08 15:40       ` Jonathan Cameron
2018-03-08 15:40       ` Jonathan Cameron
     [not found]       ` <20180308164035.000065c2-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-14 13:08         ` Jean-Philippe Brucker
2018-03-14 13:08           ` Jean-Philippe Brucker
2018-03-14 13:08           ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 08/37] iommu/fault: Handle mm faults Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-9-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-14 18:46     ` Jacob Pan
2018-02-14 18:46       ` Jacob Pan
2018-02-14 18:46       ` Jacob Pan
2018-02-15 13:51       ` Jean-Philippe Brucker
2018-02-15 13:51         ` Jean-Philippe Brucker
2018-02-15 13:51         ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 09/37] iommu/fault: Let handler return a fault response Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-10-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-20 23:19     ` Jacob Pan
2018-02-20 23:19       ` Jacob Pan
2018-02-20 23:19       ` Jacob Pan
2018-02-21 10:28       ` Jean-Philippe Brucker
2018-02-21 10:28         ` Jean-Philippe Brucker
2018-02-21 10:28         ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 11/37] dt-bindings: document stall and PASID properties for IOMMU masters Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-12-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-19  2:51     ` Rob Herring
2018-02-19  2:51       ` Rob Herring
2018-02-19  2:51       ` Rob Herring
2018-02-20 11:28       ` Jean-Philippe Brucker
2018-02-20 11:28         ` Jean-Philippe Brucker
2018-02-20 11:28         ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 12/37] iommu/of: Add stall and pasid properties to iommu_fwspec Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 15/37] iommu/io-pgtable-arm: Factor out ARM LPAE register defines Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 18/37] iommu/arm-smmu-v3: Add support for Substream IDs Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 20/37] iommu/arm-smmu-v3: Share process page tables Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 23/37] iommu/arm-smmu-v3: Enable broadcast TLB maintenance Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 26/37] iommu/arm-smmu-v3: Add support for Hardware Translation Table Update Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
     [not found] ` <20180212183352.22730-1-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-12 18:33   ` [PATCH 03/37] iommu/sva: Manage process address spaces Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-4-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-01  6:52       ` Lu Baolu
2018-03-01  6:52         ` Lu Baolu
2018-03-01  6:52         ` Lu Baolu
     [not found]         ` <5A97A324.9050605-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>
2018-03-01  8:04           ` Christian König
2018-03-01  8:04             ` Christian König
2018-03-01  8:04             ` Christian König
     [not found]             ` <cd4d7a98-e45e-7066-345f-52d8eef926a2-5C7GfCeVMHo@public.gmane.org>
2018-03-02 16:42               ` Jean-Philippe Brucker
2018-03-02 16:42                 ` Jean-Philippe Brucker
2018-03-02 16:42                 ` Jean-Philippe Brucker
2018-03-02 16:19           ` Jean-Philippe Brucker
2018-03-02 16:19             ` Jean-Philippe Brucker
2018-03-02 16:19             ` Jean-Philippe Brucker
2018-03-05 15:28       ` Sinan Kaya
2018-03-05 15:28         ` Sinan Kaya
2018-03-05 15:28         ` Sinan Kaya
     [not found]         ` <27a044ee-0ed7-0470-0fef-289d0d5cf5e8-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-03-06 10:37           ` Jean-Philippe Brucker
2018-03-06 10:37             ` Jean-Philippe Brucker
2018-03-06 10:37             ` Jean-Philippe Brucker
2018-04-24  1:32       ` Sinan Kaya
2018-04-24  1:32         ` Sinan Kaya
2018-04-24  1:32         ` Sinan Kaya
     [not found]         ` <57d77955-caa7-ddac-df7d-7eef1f05dbb2-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-04-24  9:33           ` Jean-Philippe Brucker
2018-04-24  9:33             ` Jean-Philippe Brucker
2018-04-24  9:33             ` Jean-Philippe Brucker
     [not found]             ` <66ec18ca-ea4e-d224-c9c5-8dbee5da8a72-5wv7dgnIgG8@public.gmane.org>
2018-04-24 17:17               ` Sinan Kaya
2018-04-24 17:17                 ` Sinan Kaya
2018-04-24 17:17                 ` Sinan Kaya
     [not found]                 ` <e7c4053a-20cc-d2db-16da-100b1157eca4-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-04-24 18:52                   ` Jean-Philippe Brucker via iommu
2018-04-24 18:52                     ` Jean-Philippe Brucker
2018-04-10 18:53     ` Sinan Kaya
2018-04-10 18:53       ` Sinan Kaya
2018-04-10 18:53       ` Sinan Kaya
     [not found]       ` <04d4d161-ed72-f6b6-9b94-1d60bd79ef94-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-04-13 10:59         ` Jean-Philippe Brucker
2018-04-13 10:59           ` Jean-Philippe Brucker
2018-04-13 10:59           ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 04/37] iommu/sva: Add a mm_exit callback for device drivers Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-5-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-13  8:11       ` Tian, Kevin
2018-02-13  8:11         ` Tian, Kevin
2018-02-13  8:11         ` Tian, Kevin
2018-02-13 12:57         ` Jean-Philippe Brucker
2018-02-13 12:57           ` Jean-Philippe Brucker
2018-02-13 12:57           ` Jean-Philippe Brucker
2018-02-13 12:57           ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 10/37] iommu/fault: Allow blocking fault handlers Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 13/37] arm64: mm: Pin down ASIDs for sharing mm with devices Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 14/37] iommu/arm-smmu-v3: Link domains and devices Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 16/37] iommu: Add generic PASID table library Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-17-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-27 18:51       ` Jacob Pan
2018-02-27 18:51         ` Jacob Pan
2018-02-27 18:51         ` Jacob Pan
2018-02-28 16:22         ` Jean-Philippe Brucker
2018-02-28 16:22           ` Jean-Philippe Brucker
2018-02-28 16:22           ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 17/37] iommu/arm-smmu-v3: Move context descriptor code Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-18-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-09 11:44       ` Jonathan Cameron
2018-03-09 11:44         ` Jonathan Cameron
2018-03-09 11:44         ` Jonathan Cameron
     [not found]         ` <20180309124445.00005e08-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-14 13:08           ` Jean-Philippe Brucker
2018-03-14 13:08             ` Jean-Philippe Brucker
2018-03-14 13:08             ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 19/37] iommu/arm-smmu-v3: Add second level of context descriptor table Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 21/37] iommu/arm-smmu-v3: Seize private ASID Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 22/37] iommu/arm-smmu-v3: Add support for VHE Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 24/37] iommu/arm-smmu-v3: Add SVA feature checking Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 25/37] iommu/arm-smmu-v3: Implement mm operations Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 27/37] iommu/arm-smmu-v3: Register fault workqueue Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-28-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-08 17:44       ` Jonathan Cameron
2018-03-08 17:44         ` Jonathan Cameron
2018-03-08 17:44         ` Jonathan Cameron
     [not found]         ` <20180308184454.00000b4e-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-14 13:08           ` Jean-Philippe Brucker
2018-03-14 13:08             ` Jean-Philippe Brucker
2018-03-14 13:08             ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 28/37] iommu/arm-smmu-v3: Maintain a SID->device structure Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-29-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-08 17:34       ` Jonathan Cameron
2018-03-08 17:34         ` Jonathan Cameron
2018-03-08 17:34         ` Jonathan Cameron
     [not found]         ` <20180308183431.00005f86-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-14 13:09           ` Jean-Philippe Brucker
2018-03-14 13:09             ` Jean-Philippe Brucker
2018-03-14 13:09             ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 29/37] iommu/arm-smmu-v3: Add stall support for platform devices Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-13  1:46     ` Xu Zaibo
2018-02-13  1:46       ` Xu Zaibo
2018-02-13  1:46       ` Xu Zaibo
2018-02-13  1:46       ` Xu Zaibo
     [not found]       ` <5A824359.1080005-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-02-13 12:58         ` Jean-Philippe Brucker
2018-02-13 12:58           ` Jean-Philippe Brucker
2018-02-13 12:58           ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 30/37] ACPI/IORT: Check ATS capability in root complex nodes Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 31/37] iommu/arm-smmu-v3: Add support for PCI ATS Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-32-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-08 16:17       ` Jonathan Cameron
2018-03-08 16:17         ` Jonathan Cameron
2018-03-08 16:17         ` Jonathan Cameron
     [not found]         ` <20180308171725.0000763c-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-14 13:09           ` Jean-Philippe Brucker
2018-03-14 13:09             ` Jean-Philippe Brucker
2018-03-14 13:09             ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 32/37] iommu/arm-smmu-v3: Hook up ATC invalidation to mm ops Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 33/37] iommu/arm-smmu-v3: Disable tagged pointers Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33   ` [PATCH 35/37] iommu/arm-smmu-v3: Add support for PRI Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
2018-02-12 18:33     ` Jean-Philippe Brucker
     [not found]     ` <20180212183352.22730-36-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-03-05 12:29       ` Dongdong Liu
2018-03-05 12:29         ` Dongdong Liu
2018-03-05 12:29         ` Dongdong Liu
2018-03-05 12:29         ` Dongdong Liu
     [not found]         ` <6f55afcf-04b0-0dc4-6c75-064b70e6851c-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-05 13:09           ` Jean-Philippe Brucker
2018-03-05 13:09             ` Jean-Philippe Brucker
2018-03-05 13:09             ` Jean-Philippe Brucker
2018-03-08 16:24       ` Jonathan Cameron
2018-03-08 16:24         ` Jonathan Cameron
2018-03-08 16:24         ` Jonathan Cameron
     [not found]         ` <20180308172436.00006554-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-14 13:10           ` Jean-Philippe Brucker
2018-03-14 13:10             ` Jean-Philippe Brucker
2018-03-14 13:10             ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 34/37] PCI: Make "PRG Response PASID Required" handling common Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 36/37] iommu/arm-smmu-v3: Add support for PCI PASID Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
2018-02-12 18:33 ` [PATCH 37/37] vfio: Add support for Shared Virtual Addressing Jean-Philippe Brucker
2018-02-12 18:33   ` Jean-Philippe Brucker
     [not found]   ` <20180212183352.22730-38-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org>
2018-02-16 19:33     ` Alex Williamson
2018-02-16 19:33       ` Alex Williamson
2018-02-16 19:33       ` Alex Williamson
     [not found]       ` <20180216123329.10f6dc23-DGNDKt5SQtizQB+pC5nmwQ@public.gmane.org>
2018-02-20 11:26         ` Jean-Philippe Brucker
2018-02-20 11:26           ` Jean-Philippe Brucker
2018-02-20 11:26           ` Jean-Philippe Brucker
2018-02-28  1:26     ` Sinan Kaya [this message]
2018-02-28  1:26       ` Sinan Kaya
2018-02-28  1:26       ` Sinan Kaya
     [not found]       ` <1e76c66c-952e-71bd-d831-d3a1ded9559c-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-02-28 16:25         ` Jean-Philippe Brucker
2018-02-28 16:25           ` Jean-Philippe Brucker
2018-02-28 16:25           ` Jean-Philippe Brucker
     [not found] <1519280641-30258-1-git-send-email-xieyisheng1@huawei.com>
     [not found] ` <1519280641-30258-37-git-send-email-xieyisheng1@huawei.com>
     [not found]   ` <1519280641-30258-37-git-send-email-xieyisheng1-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-19  9:47     ` Yisheng Xie
2018-03-19  9:47       ` Yisheng Xie
2018-03-19  9:47       ` Yisheng Xie
     [not found]       ` <e3ae1693-0ed8-65e1-352a-07f79038dcbd-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2018-03-21 13:40         ` Jean-Philippe Brucker
2018-03-21 13:40           ` Jean-Philippe Brucker
2018-03-21 13:40           ` Jean-Philippe Brucker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1e76c66c-952e-71bd-d831-d3a1ded9559c@codeaurora.org \
    --to=okaya-sgv2jx0feol9jmxxk+q4oq@public.gmane.org \
    --cc=ashok.raj-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org \
    --cc=bharatku-gjFFaj9aHVfQT0dZR+AlfA@public.gmane.org \
    --cc=bhelgaas-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
    --cc=catalin.marinas-5wv7dgnIgG8@public.gmane.org \
    --cc=christian.koenig-5C7GfCeVMHo@public.gmane.org \
    --cc=devicetree-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=dwmw2-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org \
    --cc=ilias.apalodimas-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org \
    --cc=iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org \
    --cc=jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org \
    --cc=kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=lenb-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
    --cc=linux-acpi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org \
    --cc=linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=mark.rutland-5wv7dgnIgG8@public.gmane.org \
    --cc=rfranz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org \
    --cc=rjw-LthD3rsA81gm4RdzfppkhA@public.gmane.org \
    --cc=robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
    --cc=sudeep.holla-5wv7dgnIgG8@public.gmane.org \
    --cc=will.deacon-5wv7dgnIgG8@public.gmane.org \
    --cc=xuzaibo-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.