From: Liu Yi L <yi.l.liu@intel.com>
To: alex.williamson@redhat.com, eric.auger@redhat.com,
baolu.lu@linux.intel.com, joro@8bytes.org
Cc: jean-philippe@linaro.org, kevin.tian@intel.com,
ashok.raj@intel.com, kvm@vger.kernel.org, jun.j.tian@intel.com,
iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
yi.y.sun@intel.com, hao.wu@intel.com
Subject: [PATCH v3 09/14] vfio/type1: Support binding guest page tables to PASID
Date: Wed, 24 Jun 2020 01:55:22 -0700 [thread overview]
Message-ID: <1592988927-48009-10-git-send-email-yi.l.liu@intel.com> (raw)
In-Reply-To: <1592988927-48009-1-git-send-email-yi.l.liu@intel.com>
Nesting translation allows two-levels/stages page tables, with 1st level
for guest translations (e.g. GVA->GPA), 2nd level for host translations
(e.g. GPA->HPA). This patch adds interface for binding guest page tables
to a PASID. This PASID must have been allocated to user space before the
binding request.
Cc: Kevin Tian <kevin.tian@intel.com>
CC: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Eric Auger <eric.auger@redhat.com>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.com>
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
---
v2 -> v3:
*) use __iommu_sva_unbind_gpasid() for unbind call issued by VFIO
https://lore.kernel.org/linux-iommu/1592931837-58223-6-git-send-email-jacob.jun.pan@linux.intel.com/
v1 -> v2:
*) rename subject from "vfio/type1: Bind guest page tables to host"
*) remove VFIO_IOMMU_BIND, introduce VFIO_IOMMU_NESTING_OP to support bind/
unbind guet page table
*) replaced vfio_iommu_for_each_dev() with a group level loop since this
series enforces one group per container w/ nesting type as start.
*) rename vfio_bind/unbind_gpasid_fn() to vfio_dev_bind/unbind_gpasid_fn()
*) vfio_dev_unbind_gpasid() always successful
*) use vfio_mm->pasid_lock to avoid race between PASID free and page table
bind/unbind
---
drivers/vfio/vfio_iommu_type1.c | 169 ++++++++++++++++++++++++++++++++++++++++
drivers/vfio/vfio_pasid.c | 30 +++++++
include/linux/vfio.h | 20 +++++
include/uapi/linux/vfio.h | 30 +++++++
4 files changed, 249 insertions(+)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index d0891c5..5926533 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,6 +148,33 @@ struct vfio_regions {
#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
+struct domain_capsule {
+ struct vfio_group *group;
+ struct iommu_domain *domain;
+ void *data;
+};
+
+/* iommu->lock must be held */
+static struct vfio_group *vfio_find_nesting_group(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *d;
+ struct vfio_group *g, *group = NULL;
+
+ if (!iommu->nesting_info)
+ return NULL;
+
+ /* only support singleton container with nesting type */
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ list_for_each_entry(g, &d->group_list, next) {
+ if (!group) {
+ group = g;
+ break;
+ }
+ }
+ }
+ return group;
+}
+
static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
@@ -2351,6 +2378,48 @@ static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
return ret;
}
+static int vfio_dev_bind_gpasid_fn(struct device *dev, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *)data;
+ unsigned long arg = *(unsigned long *) dc->data;
+
+ return iommu_sva_bind_gpasid(dc->domain, dev, (void __user *) arg);
+}
+
+static int vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *)data;
+ unsigned long arg = *(unsigned long *) dc->data;
+
+ iommu_sva_unbind_gpasid(dc->domain, dev, (void __user *) arg);
+ return 0;
+}
+
+static int __vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *)data;
+ struct iommu_gpasid_bind_data *unbind_data =
+ (struct iommu_gpasid_bind_data *) dc->data;
+
+ __iommu_sva_unbind_gpasid(dc->domain, dev, unbind_data);
+ return 0;
+}
+
+static void vfio_group_unbind_gpasid_fn(ioasid_t pasid, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *) data;
+ struct iommu_gpasid_bind_data unbind_data;
+
+ unbind_data.argsz = offsetof(struct iommu_gpasid_bind_data, vendor);
+ unbind_data.flags = 0;
+ unbind_data.hpasid = pasid;
+
+ dc->data = &unbind_data;
+
+ iommu_group_for_each_dev(dc->group->iommu_group,
+ dc, __vfio_dev_unbind_gpasid_fn);
+}
+
static void vfio_iommu_type1_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -2394,6 +2463,21 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (!group)
continue;
+ if (iommu->nesting_info && iommu->vmm &&
+ (iommu->nesting_info->features &
+ IOMMU_NESTING_FEAT_BIND_PGTBL)) {
+ struct domain_capsule dc = { .group = group,
+ .domain = domain->domain,
+ .data = NULL };
+
+ /*
+ * Unbind page tables bound with system wide PASIDs
+ * which are allocated to user space.
+ */
+ vfio_mm_for_each_pasid(iommu->vmm, &dc,
+ vfio_group_unbind_gpasid_fn);
+ }
+
vfio_iommu_detach_group(domain, group);
update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
@@ -2942,6 +3026,89 @@ static int vfio_iommu_type1_pasid_request(struct vfio_iommu *iommu,
}
}
+static long vfio_iommu_handle_pgtbl_op(struct vfio_iommu *iommu,
+ bool is_bind, unsigned long arg)
+{
+ struct iommu_nesting_info *info;
+ struct domain_capsule dc = { .data = &arg };
+ struct vfio_group *group;
+ struct vfio_domain *domain;
+ int ret;
+
+ mutex_lock(&iommu->lock);
+
+ info = iommu->nesting_info;
+ if (!info || !(info->features & IOMMU_NESTING_FEAT_BIND_PGTBL)) {
+ ret = -ENOTSUPP;
+ goto out_unlock_iommu;
+ }
+
+ if (!iommu->vmm) {
+ ret = -EINVAL;
+ goto out_unlock_iommu;
+ }
+
+ group = vfio_find_nesting_group(iommu);
+ if (!group) {
+ ret = -EINVAL;
+ goto out_unlock_iommu;
+ }
+
+ domain = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+ dc.group = group;
+ dc.domain = domain->domain;
+
+ /* Avoid race with other containers within the same process */
+ vfio_mm_pasid_lock(iommu->vmm);
+
+ if (is_bind) {
+ ret = iommu_group_for_each_dev(group->iommu_group, &dc,
+ vfio_dev_bind_gpasid_fn);
+ if (ret)
+ iommu_group_for_each_dev(group->iommu_group, &dc,
+ vfio_dev_unbind_gpasid_fn);
+ } else {
+ iommu_group_for_each_dev(group->iommu_group,
+ &dc, vfio_dev_unbind_gpasid_fn);
+ ret = 0;
+ }
+
+ vfio_mm_pasid_unlock(iommu->vmm);
+out_unlock_iommu:
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static long vfio_iommu_type1_nesting_op(struct vfio_iommu *iommu,
+ unsigned long arg)
+{
+ struct vfio_iommu_type1_nesting_op hdr;
+ unsigned int minsz;
+ int ret;
+
+ minsz = offsetofend(struct vfio_iommu_type1_nesting_op, flags);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags & ~VFIO_NESTING_OP_MASK)
+ return -EINVAL;
+
+ switch (hdr.flags & VFIO_NESTING_OP_MASK) {
+ case VFIO_IOMMU_NESTING_OP_BIND_PGTBL:
+ ret = vfio_iommu_handle_pgtbl_op(iommu, true, arg + minsz);
+ break;
+ case VFIO_IOMMU_NESTING_OP_UNBIND_PGTBL:
+ ret = vfio_iommu_handle_pgtbl_op(iommu, false, arg + minsz);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -2960,6 +3127,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return vfio_iommu_type1_dirty_pages(iommu, arg);
case VFIO_IOMMU_PASID_REQUEST:
return vfio_iommu_type1_pasid_request(iommu, arg);
+ case VFIO_IOMMU_NESTING_OP:
+ return vfio_iommu_type1_nesting_op(iommu, arg);
}
return -ENOTTY;
diff --git a/drivers/vfio/vfio_pasid.c b/drivers/vfio/vfio_pasid.c
index 2ea9f1a..20f1e72 100644
--- a/drivers/vfio/vfio_pasid.c
+++ b/drivers/vfio/vfio_pasid.c
@@ -30,6 +30,7 @@ struct vfio_mm {
struct kref kref;
struct vfio_mm_token token;
int ioasid_sid;
+ struct mutex pasid_lock;
int pasid_quota;
struct list_head next;
};
@@ -97,6 +98,7 @@ struct vfio_mm *vfio_mm_get_from_task(struct task_struct *task)
kref_init(&vmm->kref);
vmm->token.val = (unsigned long long) mm;
vmm->pasid_quota = pasid_quota;
+ mutex_init(&vmm->pasid_lock);
list_add(&vmm->next, &vfio_pasid.vfio_mm_list);
out:
@@ -134,12 +136,40 @@ void vfio_pasid_free_range(struct vfio_mm *vmm,
* IOASID core will notify PASID users (e.g. IOMMU driver) to
* teardown necessary structures depending on the to-be-freed
* PASID.
+ * Hold pasid_lock to avoid race with PASID usages like bind/
+ * unbind page tables to requested PASID.
*/
+ mutex_lock(&vmm->pasid_lock);
for (; pasid <= max; pasid++)
ioasid_free(pasid);
+ mutex_unlock(&vmm->pasid_lock);
}
EXPORT_SYMBOL_GPL(vfio_pasid_free_range);
+int vfio_mm_for_each_pasid(struct vfio_mm *vmm, void *data,
+ void (*fn)(ioasid_t id, void *data))
+{
+ int ret;
+
+ mutex_lock(&vmm->pasid_lock);
+ ret = ioasid_set_for_each_ioasid(vmm->ioasid_sid, fn, data);
+ mutex_unlock(&vmm->pasid_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_mm_for_each_pasid);
+
+void vfio_mm_pasid_lock(struct vfio_mm *vmm)
+{
+ mutex_lock(&vmm->pasid_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_mm_pasid_lock);
+
+void vfio_mm_pasid_unlock(struct vfio_mm *vmm)
+{
+ mutex_unlock(&vmm->pasid_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_mm_pasid_unlock);
+
static int __init vfio_pasid_init(void)
{
mutex_init(&vfio_pasid.vfio_mm_lock);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 8e60a32..9028a09 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -105,6 +105,11 @@ int vfio_mm_ioasid_sid(struct vfio_mm *vmm);
extern int vfio_pasid_alloc(struct vfio_mm *vmm, int min, int max);
extern void vfio_pasid_free_range(struct vfio_mm *vmm,
ioasid_t min, ioasid_t max);
+extern int vfio_mm_for_each_pasid(struct vfio_mm *vmm, void *data,
+ void (*fn)(ioasid_t id, void *data));
+extern void vfio_mm_pasid_lock(struct vfio_mm *vmm);
+extern void vfio_mm_pasid_unlock(struct vfio_mm *vmm);
+
#else
static inline struct vfio_mm *vfio_mm_get_from_task(struct task_struct *task)
{
@@ -129,6 +134,21 @@ static inline void vfio_pasid_free_range(struct vfio_mm *vmm,
ioasid_t min, ioasid_t max)
{
}
+
+static inline int vfio_mm_for_each_pasid(struct vfio_mm *vmm, void *data,
+ void (*fn)(ioasid_t id, void *data))
+{
+ return -ENOTTY;
+}
+
+static inline void vfio_mm_pasid_lock(struct vfio_mm *vmm)
+{
+}
+
+static inline void vfio_mm_pasid_unlock(struct vfio_mm *vmm)
+{
+}
+
#endif /* CONFIG_VFIO_PASID */
/*
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 657b2db..2c9def8 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -1198,6 +1198,36 @@ struct vfio_iommu_type1_pasid_request {
#define VFIO_IOMMU_PASID_REQUEST _IO(VFIO_TYPE, VFIO_BASE + 18)
+/**
+ * VFIO_IOMMU_NESTING_OP - _IOW(VFIO_TYPE, VFIO_BASE + 19,
+ * struct vfio_iommu_type1_nesting_op)
+ *
+ * This interface allows user space to utilize the nesting IOMMU
+ * capabilities as reported through VFIO_IOMMU_GET_INFO.
+ *
+ * @data[] types defined for each op:
+ * +=================+===============================================+
+ * | NESTING OP | @data[] |
+ * +=================+===============================================+
+ * | BIND_PGTBL | struct iommu_gpasid_bind_data |
+ * +-----------------+-----------------------------------------------+
+ * | UNBIND_PGTBL | struct iommu_gpasid_bind_data |
+ * +-----------------+-----------------------------------------------+
+ *
+ * returns: 0 on success, -errno on failure.
+ */
+struct vfio_iommu_type1_nesting_op {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_NESTING_OP_MASK (0xffff) /* lower 16-bits for op */
+ __u8 data[];
+};
+
+#define VFIO_IOMMU_NESTING_OP_BIND_PGTBL (0)
+#define VFIO_IOMMU_NESTING_OP_UNBIND_PGTBL (1)
+
+#define VFIO_IOMMU_NESTING_OP _IO(VFIO_TYPE, VFIO_BASE + 19)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
--
2.7.4
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2020-06-24 8:49 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-24 8:55 [PATCH v3 00/14] vfio: expose virtual Shared Virtual Addressing to VMs Liu Yi L
2020-06-24 8:55 ` [PATCH v3 01/14] vfio/type1: Refactor vfio_iommu_type1_ioctl() Liu Yi L
2020-07-02 21:21 ` Alex Williamson
2020-07-03 3:46 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 02/14] iommu: Report domain nesting info Liu Yi L
2020-06-26 7:47 ` Jean-Philippe Brucker
2020-06-26 16:04 ` Robin Murphy
2020-06-27 6:53 ` Liu, Yi L
2020-06-30 1:20 ` Tian, Kevin
2020-06-27 6:14 ` Liu, Yi L
2020-06-29 9:24 ` Stefan Hajnoczi
2020-06-29 12:23 ` Liu, Yi L
2020-06-30 2:00 ` Tian, Kevin
2020-06-30 3:45 ` Liu, Yi L
2020-07-03 9:59 ` Stefan Hajnoczi
2020-07-02 17:54 ` Alex Williamson
2020-07-03 3:53 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 03/14] vfio/type1: Report iommu nesting info to userspace Liu Yi L
2020-07-02 18:38 ` Alex Williamson
2020-07-03 6:05 ` Liu, Yi L
2020-07-03 13:03 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 04/14] vfio: Add PASID allocation/free support Liu Yi L
2020-07-02 21:17 ` Alex Williamson
2020-07-03 6:08 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 05/14] iommu/vt-d: Support setting ioasid set to domain Liu Yi L
2020-06-24 8:55 ` [PATCH v3 06/14] vfio/type1: Add VFIO_IOMMU_PASID_REQUEST (alloc/free) Liu Yi L
2020-07-02 21:18 ` Alex Williamson
2020-07-03 6:28 ` Liu, Yi L
2020-07-08 8:16 ` Liu, Yi L
2020-07-08 19:54 ` Alex Williamson
2020-07-09 0:32 ` Liu, Yi L
2020-07-09 1:56 ` Tian, Kevin
2020-07-09 2:08 ` Liu, Yi L
2020-07-09 2:18 ` Tian, Kevin
2020-07-09 2:26 ` Liu, Yi L
2020-07-09 7:16 ` Liu, Yi L
2020-07-09 14:27 ` Alex Williamson
2020-07-09 18:05 ` Jacob Pan
2020-07-10 5:39 ` Liu, Yi L
2020-07-10 12:55 ` Alex Williamson
2020-07-10 13:03 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 07/14] iommu: Pass domain to sva_unbind_gpasid() Liu Yi L
2020-06-24 8:55 ` [PATCH v3 08/14] iommu/vt-d: Check ownership for PASIDs from user-space Liu Yi L
2020-06-24 8:55 ` Liu Yi L [this message]
2020-07-02 21:19 ` [PATCH v3 09/14] vfio/type1: Support binding guest page tables to PASID Alex Williamson
2020-07-03 6:46 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 10/14] vfio/type1: Allow invalidating first-level/stage IOMMU cache Liu Yi L
2020-07-02 21:19 ` Alex Williamson
2020-07-03 3:47 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 11/14] vfio/type1: Add vSVA support for IOMMU-backed mdevs Liu Yi L
2020-06-24 8:55 ` [PATCH v3 12/14] vfio/pci: Expose PCIe PASID capability to guest Liu Yi L
2020-06-24 8:55 ` [PATCH v3 13/14] vfio: Document dual stage control Liu Yi L
2020-06-29 9:21 ` Stefan Hajnoczi
2020-06-29 9:24 ` Liu, Yi L
2020-06-24 8:55 ` [PATCH v3 14/14] iommu/vt-d: Support reporting nesting capability info Liu Yi L
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1592988927-48009-10-git-send-email-yi.l.liu@intel.com \
--to=yi.l.liu@intel.com \
--cc=alex.williamson@redhat.com \
--cc=ashok.raj@intel.com \
--cc=baolu.lu@linux.intel.com \
--cc=eric.auger@redhat.com \
--cc=hao.wu@intel.com \
--cc=iommu@lists.linux-foundation.org \
--cc=jean-philippe@linaro.org \
--cc=joro@8bytes.org \
--cc=jun.j.tian@intel.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=yi.y.sun@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).