From: Kirti Wankhede <kwankhede@nvidia.com>
To: Lu Baolu <baolu.lu@linux.intel.com>,
Joerg Roedel <joro@8bytes.org>,
David Woodhouse <dwmw2@infradead.org>,
Alex Williamson <alex.williamson@redhat.com>
Cc: <ashok.raj@intel.com>, <sanjay.k.kumar@intel.com>,
<jacob.jun.pan@intel.com>, <kevin.tian@intel.com>,
Jean-Philippe Brucker <jean-philippe.brucker@arm.com>,
<yi.l.liu@intel.com>, <yi.y.sun@intel.com>, <peterx@redhat.com>,
<tiwei.bie@intel.com>, <xin.zeng@intel.com>,
<iommu@lists.linux-foundation.org>, <kvm@vger.kernel.org>,
<linux-kernel@vger.kernel.org>,
Jacob Pan <jacob.jun.pan@linux.intel.com>,
Neo Jia <cjia@nvidia.com>
Subject: Re: [PATCH v7 8/9] vfio/type1: Add domain at(de)taching group helpers
Date: Tue, 26 Mar 2019 15:03:06 +0530 [thread overview]
Message-ID: <4ed10087-1e96-2aea-91a5-93ded001cb0b@nvidia.com> (raw)
In-Reply-To: <20190222021927.13132-9-baolu.lu@linux.intel.com>
On 2/22/2019 7:49 AM, Lu Baolu wrote:
> This adds helpers to attach or detach a domain to a
> group. This will replace iommu_attach_group() which
> only works for non-mdev devices.
>
> If a domain is attaching to a group which includes the
> mediated devices, it should attach to the iommu device
> (a pci device which represents the mdev in iommu scope)
> instead. The added helper supports attaching domain to
> groups for both pci and mdev devices.
>
> Cc: Ashok Raj <ashok.raj@intel.com>
> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
> Cc: Kevin Tian <kevin.tian@intel.com>
> Signed-off-by: Sanjay Kumar <sanjay.k.kumar@intel.com>
> Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
> Reviewed-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com>
Thanks,
Kirti
> ---
> drivers/vfio/vfio_iommu_type1.c | 84 ++++++++++++++++++++++++++++++---
> 1 file changed, 77 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 73652e21efec..ccc4165474aa 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -91,6 +91,7 @@ struct vfio_dma {
> struct vfio_group {
> struct iommu_group *iommu_group;
> struct list_head next;
> + bool mdev_group; /* An mdev group */
> };
>
> /*
> @@ -1298,6 +1299,75 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
> return ret;
> }
>
> +static struct device *vfio_mdev_get_iommu_device(struct device *dev)
> +{
> + struct device *(*fn)(struct device *dev);
> + struct device *iommu_device;
> +
> + fn = symbol_get(mdev_get_iommu_device);
> + if (fn) {
> + iommu_device = fn(dev);
> + symbol_put(mdev_get_iommu_device);
> +
> + return iommu_device;
> + }
> +
> + return NULL;
> +}
> +
> +static int vfio_mdev_attach_domain(struct device *dev, void *data)
> +{
> + struct iommu_domain *domain = data;
> + struct device *iommu_device;
> +
> + iommu_device = vfio_mdev_get_iommu_device(dev);
> + if (iommu_device) {
> + if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
> + return iommu_aux_attach_device(domain, iommu_device);
> + else
> + return iommu_attach_device(domain, iommu_device);
> + }
> +
> + return -EINVAL;
> +}
> +
> +static int vfio_mdev_detach_domain(struct device *dev, void *data)
> +{
> + struct iommu_domain *domain = data;
> + struct device *iommu_device;
> +
> + iommu_device = vfio_mdev_get_iommu_device(dev);
> + if (iommu_device) {
> + if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
> + iommu_aux_detach_device(domain, iommu_device);
> + else
> + iommu_detach_device(domain, iommu_device);
> + }
> +
> + return 0;
> +}
> +
> +static int vfio_iommu_attach_group(struct vfio_domain *domain,
> + struct vfio_group *group)
> +{
> + if (group->mdev_group)
> + return iommu_group_for_each_dev(group->iommu_group,
> + domain->domain,
> + vfio_mdev_attach_domain);
> + else
> + return iommu_attach_group(domain->domain, group->iommu_group);
> +}
> +
> +static void vfio_iommu_detach_group(struct vfio_domain *domain,
> + struct vfio_group *group)
> +{
> + if (group->mdev_group)
> + iommu_group_for_each_dev(group->iommu_group, domain->domain,
> + vfio_mdev_detach_domain);
> + else
> + iommu_detach_group(domain->domain, group->iommu_group);
> +}
> +
> static int vfio_iommu_type1_attach_group(void *iommu_data,
> struct iommu_group *iommu_group)
> {
> @@ -1373,7 +1443,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
> goto out_domain;
> }
>
> - ret = iommu_attach_group(domain->domain, iommu_group);
> + ret = vfio_iommu_attach_group(domain, group);
> if (ret)
> goto out_domain;
>
> @@ -1405,8 +1475,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
> list_for_each_entry(d, &iommu->domain_list, next) {
> if (d->domain->ops == domain->domain->ops &&
> d->prot == domain->prot) {
> - iommu_detach_group(domain->domain, iommu_group);
> - if (!iommu_attach_group(d->domain, iommu_group)) {
> + vfio_iommu_detach_group(domain, group);
> + if (!vfio_iommu_attach_group(d, group)) {
> list_add(&group->next, &d->group_list);
> iommu_domain_free(domain->domain);
> kfree(domain);
> @@ -1414,7 +1484,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
> return 0;
> }
>
> - ret = iommu_attach_group(domain->domain, iommu_group);
> + ret = vfio_iommu_attach_group(domain, group);
> if (ret)
> goto out_domain;
> }
> @@ -1440,7 +1510,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
> return 0;
>
> out_detach:
> - iommu_detach_group(domain->domain, iommu_group);
> + vfio_iommu_detach_group(domain, group);
> out_domain:
> iommu_domain_free(domain->domain);
> out_free:
> @@ -1531,7 +1601,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
> if (!group)
> continue;
>
> - iommu_detach_group(domain->domain, iommu_group);
> + vfio_iommu_detach_group(domain, group);
> list_del(&group->next);
> kfree(group);
> /*
> @@ -1596,7 +1666,7 @@ static void vfio_release_domain(struct vfio_domain *domain, bool external)
> list_for_each_entry_safe(group, group_tmp,
> &domain->group_list, next) {
> if (!external)
> - iommu_detach_group(domain->domain, group->iommu_group);
> + vfio_iommu_detach_group(domain, group);
> list_del(&group->next);
> kfree(group);
> }
>
next prev parent reply other threads:[~2019-03-26 9:33 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-22 2:19 [PATCH v7 0/9] vfio/mdev: IOMMU aware mediated device Lu Baolu
2019-02-22 2:19 ` [PATCH v7 1/9] iommu: Add APIs for multiple domains per device Lu Baolu
2019-02-22 2:19 ` [PATCH v7 2/9] iommu/vt-d: Move enable pasid out of CONFIG_INTEL_IOMMU_SVM Lu Baolu
2019-02-22 2:19 ` [PATCH v7 3/9] iommu/vt-d: Add per-device IOMMU feature ops entries Lu Baolu
2019-02-22 2:19 ` [PATCH v7 4/9] iommu/vt-d: Move common code out of iommu_attch_device() Lu Baolu
2019-02-22 2:19 ` [PATCH v7 5/9] iommu/vt-d: Aux-domain specific domain attach/detach Lu Baolu
2019-02-22 2:19 ` [PATCH v7 6/9] iommu/vt-d: Return ID associated with an auxiliary domain Lu Baolu
2019-02-22 2:19 ` [PATCH v7 7/9] vfio/mdev: Add iommu related member in mdev_device Lu Baolu
2019-02-22 14:34 ` Christoph Hellwig
2019-02-22 15:31 ` Alex Williamson
2019-02-22 2:19 ` [PATCH v7 8/9] vfio/type1: Add domain at(de)taching group helpers Lu Baolu
2019-03-26 9:33 ` Kirti Wankhede [this message]
2019-02-22 2:19 ` [PATCH v7 9/9] vfio/type1: Handle different mdev isolation type Lu Baolu
2019-03-07 8:44 ` Neo Jia
2019-03-07 23:56 ` Alex Williamson
2019-03-08 18:03 ` Neo Jia
2019-03-20 5:52 ` Lu Baolu
2019-02-26 10:37 ` [PATCH v7 0/9] vfio/mdev: IOMMU aware mediated device Joerg Roedel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4ed10087-1e96-2aea-91a5-93ded001cb0b@nvidia.com \
--to=kwankhede@nvidia.com \
--cc=alex.williamson@redhat.com \
--cc=ashok.raj@intel.com \
--cc=baolu.lu@linux.intel.com \
--cc=cjia@nvidia.com \
--cc=dwmw2@infradead.org \
--cc=iommu@lists.linux-foundation.org \
--cc=jacob.jun.pan@intel.com \
--cc=jacob.jun.pan@linux.intel.com \
--cc=jean-philippe.brucker@arm.com \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=peterx@redhat.com \
--cc=sanjay.k.kumar@intel.com \
--cc=tiwei.bie@intel.com \
--cc=xin.zeng@intel.com \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).