From: Shenming Lu <lushenming@huawei.com>
To: Alex Williamson <alex.williamson@redhat.com>,
Cornelia Huck <cohuck@redhat.com>, Will Deacon <will@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Joerg Roedel <joro@8bytes.org>,
Jean-Philippe Brucker <jean-philippe@linaro.org>,
Eric Auger <eric.auger@redhat.com>, <kvm@vger.kernel.org>,
<linux-kernel@vger.kernel.org>,
<linux-arm-kernel@lists.infradead.org>,
<iommu@lists.linux-foundation.org>, <linux-api@vger.kernel.org>
Cc: Kevin Tian <kevin.tian@intel.com>,
Lu Baolu <baolu.lu@linux.intel.com>, <yi.l.liu@intel.com>,
Christoph Hellwig <hch@infradead.org>,
Jonathan Cameron <Jonathan.Cameron@huawei.com>,
Barry Song <song.bao.hua@hisilicon.com>,
<wanghaibin.wang@huawei.com>, <yuzenghui@huawei.com>,
<lushenming@huawei.com>
Subject: [RFC PATCH v3 4/8] vfio/type1: Pre-map more pages than requested in the IOPF handling
Date: Fri, 9 Apr 2021 11:44:16 +0800 [thread overview]
Message-ID: <20210409034420.1799-5-lushenming@huawei.com> (raw)
In-Reply-To: <20210409034420.1799-1-lushenming@huawei.com>
To optimize for fewer page fault handlings, we can pre-map more pages
than requested at once.
Note that IOPF_PREMAP_LEN is just an arbitrary value for now, which we
could try further tuning.
Signed-off-by: Shenming Lu <lushenming@huawei.com>
---
drivers/vfio/vfio_iommu_type1.c | 131 ++++++++++++++++++++++++++++++--
1 file changed, 123 insertions(+), 8 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 1cb9d1f2717b..01e296c6dc9e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -3217,6 +3217,91 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
return -EINVAL;
}
+/*
+ * To optimize for fewer page fault handlings, try to
+ * pre-map more pages than requested.
+ */
+#define IOPF_PREMAP_LEN 512
+
+/*
+ * Return 0 on success or a negative error code, the
+ * number of pages contiguously pinned is in @pinned.
+ */
+static int pin_pages_iopf(struct vfio_dma *dma, unsigned long vaddr,
+ unsigned long npages, unsigned long *pfn_base,
+ unsigned long *pinned, struct vfio_batch *batch)
+{
+ struct mm_struct *mm;
+ unsigned long pfn;
+ int ret = 0;
+ *pinned = 0;
+
+ mm = get_task_mm(dma->task);
+ if (!mm)
+ return -ENODEV;
+
+ if (batch->size) {
+ *pfn_base = page_to_pfn(batch->pages[batch->offset]);
+ pfn = *pfn_base;
+ } else {
+ *pfn_base = 0;
+ }
+
+ while (npages) {
+ if (!batch->size) {
+ unsigned long req_pages = min_t(unsigned long, npages,
+ batch->capacity);
+
+ ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot,
+ &pfn, batch->pages);
+ if (ret < 0)
+ goto out;
+
+ batch->size = ret;
+ batch->offset = 0;
+ ret = 0;
+
+ if (!*pfn_base)
+ *pfn_base = pfn;
+ }
+
+ while (true) {
+ if (pfn != *pfn_base + *pinned)
+ goto out;
+
+ (*pinned)++;
+ npages--;
+ vaddr += PAGE_SIZE;
+ batch->offset++;
+ batch->size--;
+
+ if (!batch->size)
+ break;
+
+ pfn = page_to_pfn(batch->pages[batch->offset]);
+ }
+
+ if (unlikely(disable_hugepages))
+ break;
+ }
+
+out:
+ if (batch->size == 1 && !batch->offset) {
+ put_pfn(pfn, dma->prot);
+ batch->size = 0;
+ }
+
+ mmput(mm);
+ return ret;
+}
+
+static void unpin_pages_iopf(struct vfio_dma *dma,
+ unsigned long pfn, unsigned long npages)
+{
+ while (npages--)
+ put_pfn(pfn++, dma->prot);
+}
+
/* VFIO I/O Page Fault handler */
static int vfio_iommu_type1_dma_map_iopf(struct iommu_fault *fault, void *data)
{
@@ -3225,9 +3310,11 @@ static int vfio_iommu_type1_dma_map_iopf(struct iommu_fault *fault, void *data)
struct vfio_iopf_group *iopf_group;
struct vfio_iommu *iommu;
struct vfio_dma *dma;
+ struct vfio_batch batch;
dma_addr_t iova = ALIGN_DOWN(fault->prm.addr, PAGE_SIZE);
int access_flags = 0;
- unsigned long bit_offset, vaddr, pfn;
+ size_t premap_len, map_len, mapped_len = 0;
+ unsigned long bit_offset, vaddr, pfn, i, npages;
int ret;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
struct iommu_page_response resp = {0};
@@ -3263,19 +3350,47 @@ static int vfio_iommu_type1_dma_map_iopf(struct iommu_fault *fault, void *data)
if (IOPF_MAPPED_BITMAP_GET(dma, bit_offset))
goto out_success;
+ premap_len = IOPF_PREMAP_LEN << PAGE_SHIFT;
+ npages = dma->size >> PAGE_SHIFT;
+ map_len = PAGE_SIZE;
+ for (i = bit_offset + 1; i < npages; i++) {
+ if (map_len >= premap_len || IOPF_MAPPED_BITMAP_GET(dma, i))
+ break;
+ map_len += PAGE_SIZE;
+ }
vaddr = iova - dma->iova + dma->vaddr;
+ vfio_batch_init(&batch);
- if (vfio_pin_page_external(dma, vaddr, &pfn, false))
- goto out_invalid;
+ while (map_len) {
+ ret = pin_pages_iopf(dma, vaddr + mapped_len,
+ map_len >> PAGE_SHIFT, &pfn,
+ &npages, &batch);
+ if (!npages)
+ break;
- if (vfio_iommu_map(iommu, iova, pfn, 1, dma->prot)) {
- put_pfn(pfn, dma->prot);
- goto out_invalid;
+ if (vfio_iommu_map(iommu, iova + mapped_len, pfn,
+ npages, dma->prot)) {
+ unpin_pages_iopf(dma, pfn, npages);
+ vfio_batch_unpin(&batch, dma);
+ break;
+ }
+
+ bitmap_set(dma->iopf_mapped_bitmap,
+ bit_offset + (mapped_len >> PAGE_SHIFT), npages);
+
+ unpin_pages_iopf(dma, pfn, npages);
+
+ map_len -= npages << PAGE_SHIFT;
+ mapped_len += npages << PAGE_SHIFT;
+
+ if (ret)
+ break;
}
- bitmap_set(dma->iopf_mapped_bitmap, bit_offset, 1);
+ vfio_batch_fini(&batch);
- put_pfn(pfn, dma->prot);
+ if (!mapped_len)
+ goto out_invalid;
out_success:
status = IOMMU_PAGE_RESP_SUCCESS;
--
2.19.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2021-04-09 3:47 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-09 3:44 [RFC PATCH v3 0/8] Add IOPF support for VFIO passthrough Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 1/8] iommu: Evolve the device fault reporting framework Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 6:37 ` Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 2/8] vfio/type1: Add a page fault handler Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 6:38 ` Shenming Lu
2021-05-24 22:11 ` Alex Williamson
2021-05-27 11:16 ` Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 3/8] vfio/type1: Add an MMU notifier to avoid pinning Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 6:37 ` Shenming Lu
2021-04-09 3:44 ` Shenming Lu [this message]
2021-05-18 18:58 ` [RFC PATCH v3 4/8] vfio/type1: Pre-map more pages than requested in the IOPF handling Alex Williamson
2021-05-21 6:37 ` Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 5/8] vfio/type1: VFIO_IOMMU_ENABLE_IOPF Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 6:38 ` Shenming Lu
2021-05-24 22:11 ` Alex Williamson
2021-05-27 11:15 ` Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 6/8] vfio/type1: No need to statically pin and map if IOPF enabled Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 6:39 ` Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 7/8] vfio/type1: Add selective DMA faulting support Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 6:39 ` Shenming Lu
2021-04-09 3:44 ` [RFC PATCH v3 8/8] vfio: Add nested IOPF support Shenming Lu
2021-05-18 18:58 ` Alex Williamson
2021-05-21 7:59 ` Shenming Lu
2021-05-24 13:11 ` Shenming Lu
2021-05-24 22:11 ` Alex Williamson
2021-05-27 11:03 ` Shenming Lu
2021-05-27 11:18 ` Lu Baolu
2021-06-01 4:36 ` Shenming Lu
2021-04-26 1:41 ` [RFC PATCH v3 0/8] Add IOPF support for VFIO passthrough Shenming Lu
2021-05-11 11:30 ` Shenming Lu
2021-05-18 18:57 ` Alex Williamson
2021-05-21 6:37 ` Shenming Lu
2021-05-24 22:11 ` Alex Williamson
2021-05-27 11:25 ` Shenming Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210409034420.1799-5-lushenming@huawei.com \
--to=lushenming@huawei.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=alex.williamson@redhat.com \
--cc=baolu.lu@linux.intel.com \
--cc=cohuck@redhat.com \
--cc=eric.auger@redhat.com \
--cc=hch@infradead.org \
--cc=iommu@lists.linux-foundation.org \
--cc=jean-philippe@linaro.org \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-api@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=song.bao.hua@hisilicon.com \
--cc=wanghaibin.wang@huawei.com \
--cc=will@kernel.org \
--cc=yi.l.liu@intel.com \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).