From: Praveen Kumar <kumarpraveen@linux.microsoft.com>
To: Wei Liu <wei.liu@kernel.org>,
Linux on Hyper-V List <linux-hyperv@vger.kernel.org>
Cc: virtualization@lists.linux-foundation.org,
Linux Kernel List <linux-kernel@vger.kernel.org>,
Michael Kelley <mikelley@microsoft.com>,
Vineeth Pillai <viremana@linux.microsoft.com>,
Sunil Muthuswamy <sunilmut@microsoft.com>,
Nuno Das Neves <nunodasneves@linux.microsoft.com>,
pasha.tatashin@soleen.com, Joerg Roedel <joro@8bytes.org>,
Will Deacon <will@kernel.org>,
"K. Y. Srinivasan" <kys@microsoft.com>,
Haiyang Zhang <haiyangz@microsoft.com>,
Stephen Hemminger <sthemmin@microsoft.com>,
Dexuan Cui <decui@microsoft.com>,
"open list:IOMMU DRIVERS" <iommu@lists.linux-foundation.org>
Subject: Re: [RFC v1 5/8] mshv: add paravirtualized IOMMU support
Date: Wed, 4 Aug 2021 00:10:45 +0530 [thread overview]
Message-ID: <77670985-2a1b-7bbd-2ede-4b7810c3e220@linux.microsoft.com> (raw)
In-Reply-To: <20210709114339.3467637-6-wei.liu@kernel.org>
On 09-07-2021 17:13, Wei Liu wrote:
> +static void hv_iommu_domain_free(struct iommu_domain *d)
> +{
> + struct hv_iommu_domain *domain = to_hv_iommu_domain(d);
> + unsigned long flags;
> + u64 status;
> + struct hv_input_delete_device_domain *input;
> +
> + if (is_identity_domain(domain) || is_null_domain(domain))
> + return;
> +
> + local_irq_save(flags);
> + input = *this_cpu_ptr(hyperv_pcpu_input_arg);
> + memset(input, 0, sizeof(*input));
> +
> + input->device_domain= domain->device_domain;
> +
> + status = hv_do_hypercall(HVCALL_DELETE_DEVICE_DOMAIN, input, NULL);
> +
> + local_irq_restore(flags);
> +
> + if (!hv_result_success(status))
> + pr_err("%s: hypercall failed, status %lld\n", __func__, status);
Is it OK to deallocate the resources, if hypercall has failed ?
Do we have any specific error code EBUSY (kind of) which we need to wait upon ?
> +
> + ida_free(&domain->hv_iommu->domain_ids, domain->device_domain.domain_id.id);
> +
> + iommu_put_dma_cookie(d);
> +
> + kfree(domain);
> +}
> +
> +static int hv_iommu_attach_dev(struct iommu_domain *d, struct device *dev)
> +{
> + struct hv_iommu_domain *domain = to_hv_iommu_domain(d);
> + u64 status;
> + unsigned long flags;
> + struct hv_input_attach_device_domain *input;
> + struct pci_dev *pdev;
> + struct hv_iommu_endpoint *vdev = dev_iommu_priv_get(dev);
> +
> + /* Only allow PCI devices for now */
> + if (!dev_is_pci(dev))
> + return -EINVAL;
> +
> + pdev = to_pci_dev(dev);
> +
> + dev_dbg(dev, "Attaching (%strusted) to %d\n", pdev->untrusted ? "un" : "",
> + domain->device_domain.domain_id.id);
> +
> + local_irq_save(flags);
> + input = *this_cpu_ptr(hyperv_pcpu_input_arg);
> + memset(input, 0, sizeof(*input));
> +
> + input->device_domain = domain->device_domain;
> + input->device_id = hv_build_pci_dev_id(pdev);
> +
> + status = hv_do_hypercall(HVCALL_ATTACH_DEVICE_DOMAIN, input, NULL);
> + local_irq_restore(flags);
> +
> + if (!hv_result_success(status))
> + pr_err("%s: hypercall failed, status %lld\n", __func__, status);
Does it make sense to vdev->domain = NULL ?
> + else
> + vdev->domain = domain;
> +
> + return hv_status_to_errno(status);
> +}
> +
> +static void hv_iommu_detach_dev(struct iommu_domain *d, struct device *dev)
> +{
> + u64 status;
> + unsigned long flags;
> + struct hv_input_detach_device_domain *input;
> + struct pci_dev *pdev;
> + struct hv_iommu_domain *domain = to_hv_iommu_domain(d);
> + struct hv_iommu_endpoint *vdev = dev_iommu_priv_get(dev);
> +
> + /* See the attach function, only PCI devices for now */
> + if (!dev_is_pci(dev))
> + return;
> +
> + pdev = to_pci_dev(dev);
> +
> + dev_dbg(dev, "Detaching from %d\n", domain->device_domain.domain_id.id);
> +
> + local_irq_save(flags);
> + input = *this_cpu_ptr(hyperv_pcpu_input_arg);
> + memset(input, 0, sizeof(*input));
> +
> + input->partition_id = HV_PARTITION_ID_SELF;
> + input->device_id = hv_build_pci_dev_id(pdev);
> +
> + status = hv_do_hypercall(HVCALL_DETACH_DEVICE_DOMAIN, input, NULL);
> + local_irq_restore(flags);
> +
> + if (!hv_result_success(status))
> + pr_err("%s: hypercall failed, status %lld\n", __func__, status);
> +
> + vdev->domain = NULL;
> +}
> +
> +static int hv_iommu_add_mapping(struct hv_iommu_domain *domain, unsigned long iova,
> + phys_addr_t paddr, size_t size, u32 flags)
> +{
> + unsigned long irqflags;
> + struct hv_iommu_mapping *mapping;
> +
> + mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
> + if (!mapping)
> + return -ENOMEM;
> +
> + mapping->paddr = paddr;
> + mapping->iova.start = iova;
> + mapping->iova.last = iova + size - 1;
> + mapping->flags = flags;
> +
> + spin_lock_irqsave(&domain->mappings_lock, irqflags);
> + interval_tree_insert(&mapping->iova, &domain->mappings);
> + spin_unlock_irqrestore(&domain->mappings_lock, irqflags);
> +
> + return 0;
> +}
> +
> +static size_t hv_iommu_del_mappings(struct hv_iommu_domain *domain,
> + unsigned long iova, size_t size)
> +{
> + unsigned long flags;
> + size_t unmapped = 0;
> + unsigned long last = iova + size - 1;
> + struct hv_iommu_mapping *mapping = NULL;
> + struct interval_tree_node *node, *next;
> +
> + spin_lock_irqsave(&domain->mappings_lock, flags);
> + next = interval_tree_iter_first(&domain->mappings, iova, last);
> + while (next) {
> + node = next;
> + mapping = container_of(node, struct hv_iommu_mapping, iova);
> + next = interval_tree_iter_next(node, iova, last);
> +
> + /* Trying to split a mapping? Not supported for now. */
> + if (mapping->iova.start < iova)
> + break;
> +
> + unmapped += mapping->iova.last - mapping->iova.start + 1;
> +
> + interval_tree_remove(node, &domain->mappings);
> + kfree(mapping);
> + }
> + spin_unlock_irqrestore(&domain->mappings_lock, flags);
> +
> + return unmapped;
> +}
> +
> +static int hv_iommu_map(struct iommu_domain *d, unsigned long iova,
> + phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> +{
> + u32 map_flags;
> + unsigned long flags, pfn, npages;
> + int ret, i;
> + struct hv_iommu_domain *domain = to_hv_iommu_domain(d);
> + struct hv_input_map_device_gpa_pages *input;
> + u64 status;
> +
> + /* Reject size that's not a whole page */
> + if (size & ~HV_HYP_PAGE_MASK)
> + return -EINVAL;
> +
> + map_flags = HV_MAP_GPA_READABLE; /* Always required */
> + map_flags |= prot & IOMMU_WRITE ? HV_MAP_GPA_WRITABLE : 0;
> +
> + ret = hv_iommu_add_mapping(domain, iova, paddr, size, flags);
> + if (ret)
> + return ret;
> +
> + npages = size >> HV_HYP_PAGE_SHIFT;
> +
> + local_irq_save(flags);
> + input = *this_cpu_ptr(hyperv_pcpu_input_arg);
> + memset(input, 0, sizeof(*input));
> +
> + input->device_domain = domain->device_domain;
> + input->map_flags = map_flags;
> + input->target_device_va_base = iova;
> +
> + pfn = paddr >> HV_HYP_PAGE_SHIFT;
> + for (i = 0; i < npages; i++) {
> + input->gpa_page_list[i] = pfn;
> + pfn += 1;
> + }
> +
> + status = hv_do_rep_hypercall(HVCALL_MAP_DEVICE_GPA_PAGES, npages, 0,
> + input, NULL);
> +
> + local_irq_restore(flags);
> +
> + if (!hv_result_success(status)) {
> + pr_err("%s: hypercall failed, status %lld\n", __func__, status);
> + hv_iommu_del_mappings(domain, iova, size);
> + }
> +
> + return hv_status_to_errno(status);
> +}
> +
> +static size_t hv_iommu_unmap(struct iommu_domain *d, unsigned long iova,
> + size_t size, struct iommu_iotlb_gather *gather)
> +{
> + size_t unmapped;
> + struct hv_iommu_domain *domain = to_hv_iommu_domain(d);
> + unsigned long flags, npages;
> + struct hv_input_unmap_device_gpa_pages *input;
> + u64 status;
> +
> + unmapped = hv_iommu_del_mappings(domain, iova, size);
> + if (unmapped < size)
> + return 0;
Is there a case where unmapped > 0 && unmapped < size ?
> +
> + npages = size >> HV_HYP_PAGE_SHIFT;
> +
> + local_irq_save(flags);
> + input = *this_cpu_ptr(hyperv_pcpu_input_arg);
> + memset(input, 0, sizeof(*input));
> +
> + input->device_domain = domain->device_domain;
> + input->target_device_va_base = iova;
> +
> + /* Unmap `npages` pages starting from VA base */
> + status = hv_do_rep_hypercall(HVCALL_UNMAP_DEVICE_GPA_PAGES, npages,
> + 0, input, NULL);
> +
> + local_irq_restore(flags);
> +
> + if (!hv_result_success(status))
> + pr_err("%s: hypercall failed, status %lld\n", __func__, status);
> +
> + return hv_result_success(status) ? unmapped : 0;
> +}
> +
Regards,
~Praveen.
next prev parent reply other threads:[~2021-08-03 18:40 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-09 11:43 [RFC v1 0/8] MSHV: add PV-IOMMU driver Wei Liu
2021-07-09 11:43 ` [RFC v1 1/8] x86/hyperv: export hv_build_pci_dev_id Wei Liu
2021-07-09 11:43 ` [RFC v1 2/8] asm-generic/hyperv: add device domain definitions Wei Liu
2021-07-09 11:43 ` [RFC v1 3/8] intel/vt-d: make DMAR table parsing code more flexible Wei Liu
2021-07-09 12:56 ` Robin Murphy
2021-07-09 13:42 ` Wei Liu
2021-07-09 11:43 ` [RFC v1 4/8] intel/vt-d: export intel_iommu_get_resv_regions Wei Liu
2021-07-09 14:17 ` Lu Baolu
2021-07-09 14:21 ` Wei Liu
2021-07-09 11:43 ` [RFC v1 5/8] mshv: add paravirtualized IOMMU support Wei Liu
2021-08-03 18:40 ` Praveen Kumar [this message]
2021-08-03 21:47 ` Wei Liu
2021-08-04 6:43 ` Praveen Kumar
2021-08-10 10:46 ` Wei Liu
2021-07-09 11:43 ` [RFC v1 6/8] mshv: command line option to skip devices in PV-IOMMU Wei Liu
2021-07-09 12:46 ` Robin Murphy
2021-07-09 13:34 ` Wei Liu
2021-08-03 18:50 ` Praveen Kumar
2021-08-03 21:56 ` Wei Liu
2021-08-04 7:03 ` Praveen Kumar
2021-08-10 10:04 ` Wei Liu
2021-07-09 11:43 ` [RFC v1 7/8] mshv: implement in-kernel device framework Wei Liu
2021-07-09 13:02 ` Matthew Wilcox
2021-07-09 13:50 ` Wei Liu
2021-07-09 15:32 ` Matthew Wilcox
2021-07-09 16:27 ` Wei Liu
2021-07-09 16:38 ` Matthew Wilcox
2021-07-09 19:14 ` Wei Liu
2021-07-09 19:48 ` Matthew Wilcox
2021-07-09 20:11 ` Wei Liu
2021-08-03 19:12 ` Praveen Kumar
2021-08-03 22:04 ` Wei Liu
2021-07-09 11:43 ` [RFC v1 8/8] mshv: add vfio bridge device Wei Liu
2021-08-03 19:27 ` Praveen Kumar
2021-08-10 10:52 ` Wei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=77670985-2a1b-7bbd-2ede-4b7810c3e220@linux.microsoft.com \
--to=kumarpraveen@linux.microsoft.com \
--cc=decui@microsoft.com \
--cc=haiyangz@microsoft.com \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=kys@microsoft.com \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mikelley@microsoft.com \
--cc=nunodasneves@linux.microsoft.com \
--cc=pasha.tatashin@soleen.com \
--cc=sthemmin@microsoft.com \
--cc=sunilmut@microsoft.com \
--cc=viremana@linux.microsoft.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=wei.liu@kernel.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).