iommu.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: Jean-Philippe Brucker <jean-philippe@linaro.org>
To: maz@kernel.org, catalin.marinas@arm.com, will@kernel.org,
	joro@8bytes.org
Cc: robin.murphy@arm.com, james.morse@arm.com,
	suzuki.poulose@arm.com, oliver.upton@linux.dev,
	yuzenghui@huawei.com, smostafa@google.com, dbrazdil@google.com,
	ryan.roberts@arm.com, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.linux.dev, iommu@lists.linux.dev,
	Jean-Philippe Brucker <jean-philippe@linaro.org>
Subject: [RFC PATCH 40/45] iommu/arm-smmu-v3-kvm: Add IOMMU ops
Date: Wed,  1 Feb 2023 12:53:24 +0000	[thread overview]
Message-ID: <20230201125328.2186498-41-jean-philippe@linaro.org> (raw)
In-Reply-To: <20230201125328.2186498-1-jean-philippe@linaro.org>

Forward alloc_domain(), attach_dev(), map_pages(), etc to the
hypervisor.

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
 .../iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c   | 330 +++++++++++++++++-
 1 file changed, 328 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c
index 55489d56fb5b..930d78f6e29f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c
@@ -22,10 +22,28 @@ struct host_arm_smmu_device {
 #define smmu_to_host(_smmu) \
 	container_of(_smmu, struct host_arm_smmu_device, smmu);
 
+struct kvm_arm_smmu_master {
+	struct arm_smmu_device		*smmu;
+	struct device			*dev;
+	struct kvm_arm_smmu_domain	*domain;
+};
+
+struct kvm_arm_smmu_domain {
+	struct iommu_domain		domain;
+	struct arm_smmu_device		*smmu;
+	struct mutex			init_mutex;
+	unsigned long			pgd;
+	pkvm_handle_t			id;
+};
+
+#define to_kvm_smmu_domain(_domain) \
+	container_of(_domain, struct kvm_arm_smmu_domain, domain)
+
 static size_t				kvm_arm_smmu_cur;
 static size_t				kvm_arm_smmu_count;
 static struct hyp_arm_smmu_v3_device	*kvm_arm_smmu_array;
 static struct kvm_hyp_iommu_memcache	*kvm_arm_smmu_memcache;
+static DEFINE_IDA(kvm_arm_smmu_domain_ida);
 
 static DEFINE_PER_CPU(local_lock_t, memcache_lock) =
 				INIT_LOCAL_LOCK(memcache_lock);
@@ -57,7 +75,6 @@ static void *kvm_arm_smmu_host_va(phys_addr_t pa)
 	return __va(pa);
 }
 
-__maybe_unused
 static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu)
 {
 	struct kvm_hyp_memcache *mc;
@@ -74,7 +91,6 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu)
 				     kvm_arm_smmu_host_pa, smmu);
 }
 
-__maybe_unused
 static void kvm_arm_smmu_reclaim_memcache(void)
 {
 	struct kvm_hyp_memcache *mc;
@@ -101,6 +117,299 @@ static void kvm_arm_smmu_reclaim_memcache(void)
 	__ret;							\
 })
 
+static struct platform_driver kvm_arm_smmu_driver;
+
+static struct arm_smmu_device *
+kvm_arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
+{
+	struct device *dev;
+
+	dev = driver_find_device_by_fwnode(&kvm_arm_smmu_driver.driver, fwnode);
+	put_device(dev);
+	return dev ? dev_get_drvdata(dev) : NULL;
+}
+
+static struct iommu_ops kvm_arm_smmu_ops;
+
+static struct iommu_device *kvm_arm_smmu_probe_device(struct device *dev)
+{
+	struct arm_smmu_device *smmu;
+	struct kvm_arm_smmu_master *master;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+	if (!fwspec || fwspec->ops != &kvm_arm_smmu_ops)
+		return ERR_PTR(-ENODEV);
+
+	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
+		return ERR_PTR(-EBUSY);
+
+	smmu = kvm_arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+	if (!smmu)
+		return ERR_PTR(-ENODEV);
+
+	master = kzalloc(sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return ERR_PTR(-ENOMEM);
+
+	master->dev = dev;
+	master->smmu = smmu;
+	dev_iommu_priv_set(dev, master);
+
+	return &smmu->iommu;
+}
+
+static void kvm_arm_smmu_release_device(struct device *dev)
+{
+	struct kvm_arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+	kfree(master);
+	iommu_fwspec_free(dev);
+}
+
+static struct iommu_domain *kvm_arm_smmu_domain_alloc(unsigned type)
+{
+	struct kvm_arm_smmu_domain *kvm_smmu_domain;
+
+	/*
+	 * We don't support
+	 * - IOMMU_DOMAIN_IDENTITY because we rely on the host telling the
+	 *   hypervisor which pages are used for DMA.
+	 * - IOMMU_DOMAIN_DMA_FQ because lazy unmap would clash with memory
+	 *   donation to guests.
+	 */
+	if (type != IOMMU_DOMAIN_DMA &&
+	    type != IOMMU_DOMAIN_UNMANAGED)
+		return NULL;
+
+	kvm_smmu_domain = kzalloc(sizeof(*kvm_smmu_domain), GFP_KERNEL);
+	if (!kvm_smmu_domain)
+		return NULL;
+
+	mutex_init(&kvm_smmu_domain->init_mutex);
+
+	return &kvm_smmu_domain->domain;
+}
+
+static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_domain,
+					struct kvm_arm_smmu_master *master)
+{
+	int ret = 0;
+	struct page *p;
+	unsigned long pgd;
+	struct arm_smmu_device *smmu = master->smmu;
+	struct host_arm_smmu_device *host_smmu = smmu_to_host(smmu);
+
+	if (kvm_smmu_domain->smmu) {
+		if (kvm_smmu_domain->smmu != smmu)
+			return -EINVAL;
+		return 0;
+	}
+
+	ret = ida_alloc_range(&kvm_arm_smmu_domain_ida, 0, 1 << smmu->vmid_bits,
+			      GFP_KERNEL);
+	if (ret < 0)
+		return ret;
+	kvm_smmu_domain->id = ret;
+
+	/*
+	 * PGD allocation does not use the memcache because it may be of higher
+	 * order when concatenated.
+	 */
+	p = alloc_pages_node(dev_to_node(smmu->dev), GFP_KERNEL | __GFP_ZERO,
+			     host_smmu->pgd_order);
+	if (!p)
+		return -ENOMEM;
+
+	pgd = (unsigned long)page_to_virt(p);
+
+	local_lock_irq(&memcache_lock);
+	ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_alloc_domain,
+				   host_smmu->id, kvm_smmu_domain->id, pgd);
+	local_unlock_irq(&memcache_lock);
+	if (ret)
+		goto err_free;
+
+	kvm_smmu_domain->domain.pgsize_bitmap = smmu->pgsize_bitmap;
+	kvm_smmu_domain->domain.geometry.aperture_end = (1UL << smmu->ias) - 1;
+	kvm_smmu_domain->domain.geometry.force_aperture = true;
+	kvm_smmu_domain->smmu = smmu;
+	kvm_smmu_domain->pgd = pgd;
+
+	return 0;
+
+err_free:
+	free_pages(pgd, host_smmu->pgd_order);
+	ida_free(&kvm_arm_smmu_domain_ida, kvm_smmu_domain->id);
+	return ret;
+}
+
+static void kvm_arm_smmu_domain_free(struct iommu_domain *domain)
+{
+	int ret;
+	struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+	struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
+
+	if (smmu) {
+		struct host_arm_smmu_device *host_smmu = smmu_to_host(smmu);
+
+		ret = kvm_call_hyp_nvhe(__pkvm_host_iommu_free_domain,
+					host_smmu->id, kvm_smmu_domain->id);
+		/*
+		 * On failure, leak the pgd because it probably hasn't been
+		 * reclaimed by the host.
+		 */
+		if (!WARN_ON(ret))
+			free_pages(kvm_smmu_domain->pgd, host_smmu->pgd_order);
+		ida_free(&kvm_arm_smmu_domain_ida, kvm_smmu_domain->id);
+	}
+	kfree(kvm_smmu_domain);
+}
+
+static int kvm_arm_smmu_detach_dev(struct host_arm_smmu_device *host_smmu,
+				   struct kvm_arm_smmu_master *master)
+{
+	int i, ret;
+	struct arm_smmu_device *smmu = &host_smmu->smmu;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+	if (!master->domain)
+		return 0;
+
+	for (i = 0; i < fwspec->num_ids; i++) {
+		int sid = fwspec->ids[i];
+
+		ret = kvm_call_hyp_nvhe(__pkvm_host_iommu_detach_dev,
+					host_smmu->id, master->domain->id, sid);
+		if (ret) {
+			dev_err(smmu->dev, "cannot detach device %s (0x%x): %d\n",
+				dev_name(master->dev), sid, ret);
+			break;
+		}
+	}
+
+	master->domain = NULL;
+
+	return ret;
+}
+
+static int kvm_arm_smmu_attach_dev(struct iommu_domain *domain,
+				   struct device *dev)
+{
+	int i, ret;
+	struct arm_smmu_device *smmu;
+	struct host_arm_smmu_device *host_smmu;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+	struct kvm_arm_smmu_master *master = dev_iommu_priv_get(dev);
+	struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+
+	if (!master)
+		return -ENODEV;
+
+	smmu = master->smmu;
+	host_smmu = smmu_to_host(smmu);
+
+	ret = kvm_arm_smmu_detach_dev(host_smmu, master);
+	if (ret)
+		return ret;
+
+	mutex_lock(&kvm_smmu_domain->init_mutex);
+	ret = kvm_arm_smmu_domain_finalize(kvm_smmu_domain, master);
+	mutex_unlock(&kvm_smmu_domain->init_mutex);
+	if (ret)
+		return ret;
+
+	local_lock_irq(&memcache_lock);
+	for (i = 0; i < fwspec->num_ids; i++) {
+		int sid = fwspec->ids[i];
+
+		ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_attach_dev,
+					   host_smmu->id, kvm_smmu_domain->id,
+					   sid);
+		if (ret) {
+			dev_err(smmu->dev, "cannot attach device %s (0x%x): %d\n",
+				dev_name(dev), sid, ret);
+			goto out_unlock;
+		}
+	}
+	master->domain = kvm_smmu_domain;
+
+out_unlock:
+	if (ret)
+		kvm_arm_smmu_detach_dev(host_smmu, master);
+	local_unlock_irq(&memcache_lock);
+	return ret;
+}
+
+static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
+				  unsigned long iova, phys_addr_t paddr,
+				  size_t pgsize, size_t pgcount, int prot,
+				  gfp_t gfp, size_t *mapped)
+{
+	int ret;
+	unsigned long irqflags;
+	struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+	struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
+	struct host_arm_smmu_device *host_smmu = smmu_to_host(smmu);
+
+	local_lock_irqsave(&memcache_lock, irqflags);
+	ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_map_pages,
+				   host_smmu->id, kvm_smmu_domain->id, iova,
+				   paddr, pgsize, pgcount, prot);
+	local_unlock_irqrestore(&memcache_lock, irqflags);
+	if (ret)
+		return ret;
+
+	*mapped = pgsize * pgcount;
+	return 0;
+}
+
+static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
+				       unsigned long iova, size_t pgsize,
+				       size_t pgcount,
+				       struct iommu_iotlb_gather *iotlb_gather)
+{
+	int ret;
+	unsigned long irqflags;
+	struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+	struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
+	struct host_arm_smmu_device *host_smmu = smmu_to_host(smmu);
+
+	local_lock_irqsave(&memcache_lock, irqflags);
+	ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_unmap_pages,
+				   host_smmu->id, kvm_smmu_domain->id, iova,
+				   pgsize, pgcount);
+	local_unlock_irqrestore(&memcache_lock, irqflags);
+
+	return ret ? 0 : pgsize * pgcount;
+}
+
+static phys_addr_t kvm_arm_smmu_iova_to_phys(struct iommu_domain *domain,
+					     dma_addr_t iova)
+{
+	struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+	struct host_arm_smmu_device *host_smmu = smmu_to_host(kvm_smmu_domain->smmu);
+
+	return kvm_call_hyp_nvhe(__pkvm_host_iommu_iova_to_phys, host_smmu->id,
+				 kvm_smmu_domain->id, iova);
+}
+
+static struct iommu_ops kvm_arm_smmu_ops = {
+	.capable		= arm_smmu_capable,
+	.device_group		= arm_smmu_device_group,
+	.of_xlate		= arm_smmu_of_xlate,
+	.probe_device		= kvm_arm_smmu_probe_device,
+	.release_device		= kvm_arm_smmu_release_device,
+	.domain_alloc		= kvm_arm_smmu_domain_alloc,
+	.owner			= THIS_MODULE,
+	.default_domain_ops = &(const struct iommu_domain_ops) {
+		.attach_dev	= kvm_arm_smmu_attach_dev,
+		.free		= kvm_arm_smmu_domain_free,
+		.map_pages	= kvm_arm_smmu_map_pages,
+		.unmap_pages	= kvm_arm_smmu_unmap_pages,
+		.iova_to_phys	= kvm_arm_smmu_iova_to_phys,
+	}
+};
+
 static bool kvm_arm_smmu_validate_features(struct arm_smmu_device *smmu)
 {
 	unsigned long oas;
@@ -186,6 +495,12 @@ static int kvm_arm_smmu_device_reset(struct host_arm_smmu_device *host_smmu)
 	return 0;
 }
 
+static void *kvm_arm_smmu_alloc_domains(struct arm_smmu_device *smmu)
+{
+	return (void *)devm_get_free_pages(smmu->dev, GFP_KERNEL | __GFP_ZERO,
+					   get_order(KVM_IOMMU_DOMAINS_ROOT_SIZE));
+}
+
 static int kvm_arm_smmu_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -274,6 +589,16 @@ static int kvm_arm_smmu_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
+	hyp_smmu->iommu.domains = kvm_arm_smmu_alloc_domains(smmu);
+	if (!hyp_smmu->iommu.domains)
+		return -ENOMEM;
+
+	hyp_smmu->iommu.nr_domains = 1 << smmu->vmid_bits;
+
+	ret = arm_smmu_register_iommu(smmu, &kvm_arm_smmu_ops, ioaddr);
+	if (ret)
+		return ret;
+
 	platform_set_drvdata(pdev, host_smmu);
 
 	/* Hypervisor parameters */
@@ -296,6 +621,7 @@ static int kvm_arm_smmu_remove(struct platform_device *pdev)
 	 * There was an error during hypervisor setup. The hyp driver may
 	 * have already enabled the device, so disable it.
 	 */
+	arm_smmu_unregister_iommu(smmu);
 	arm_smmu_device_disable(smmu);
 	arm_smmu_update_gbpa(smmu, host_smmu->boot_gbpa, GBPA_ABORT);
 	return 0;
-- 
2.39.0


  parent reply	other threads:[~2023-02-01 12:59 UTC|newest]

Thread overview: 101+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-01 12:52 [RFC PATCH 00/45] KVM: Arm SMMUv3 driver for pKVM Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 01/45] iommu/io-pgtable-arm: Split the page table driver Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 02/45] iommu/io-pgtable-arm: Split initialization Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 03/45] iommu/io-pgtable: Move fmt into io_pgtable_cfg Jean-Philippe Brucker
2024-02-16 11:55   ` Mostafa Saleh
2023-02-01 12:52 ` [RFC PATCH 04/45] iommu/io-pgtable: Add configure() operation Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 05/45] iommu/io-pgtable: Split io_pgtable structure Jean-Philippe Brucker
2023-02-07 12:16   ` Mostafa Saleh
2023-02-08 18:01     ` Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 06/45] iommu/io-pgtable-arm: Extend __arm_lpae_free_pgtable() to only free child tables Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 07/45] iommu/arm-smmu-v3: Move some definitions to arm64 include/ Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 08/45] KVM: arm64: pkvm: Add pkvm_udelay() Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 09/45] KVM: arm64: pkvm: Add pkvm_create_hyp_device_mapping() Jean-Philippe Brucker
2023-02-07 12:22   ` Mostafa Saleh
2023-02-08 18:02     ` Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 10/45] KVM: arm64: pkvm: Expose pkvm_map/unmap_donated_memory() Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 11/45] KVM: arm64: pkvm: Expose pkvm_admit_host_page() Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 12/45] KVM: arm64: pkvm: Unify pkvm_pkvm_teardown_donated_memory() Jean-Philippe Brucker
2024-01-15 14:33   ` Sebastian Ene
2024-01-23 19:49     ` Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 13/45] KVM: arm64: pkvm: Add hyp_page_ref_inc_return() Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 14/45] KVM: arm64: pkvm: Prevent host donation of device memory Jean-Philippe Brucker
2023-02-01 12:52 ` [RFC PATCH 15/45] KVM: arm64: pkvm: Add __pkvm_host_share/unshare_dma() Jean-Philippe Brucker
2023-02-04 12:51   ` tina.zhang
2023-02-06 12:13     ` Jean-Philippe Brucker
2023-02-07  2:37       ` tina.zhang
2023-02-07 10:39         ` Jean-Philippe Brucker
2023-02-07 12:53   ` Mostafa Saleh
2023-02-10 19:21     ` Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 16/45] KVM: arm64: Introduce IOMMU driver infrastructure Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 17/45] KVM: arm64: pkvm: Add IOMMU hypercalls Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 18/45] KVM: arm64: iommu: Add per-cpu page queue Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 19/45] KVM: arm64: iommu: Add domains Jean-Philippe Brucker
2023-02-07 13:13   ` Mostafa Saleh
2023-02-08 12:31     ` Mostafa Saleh
2023-02-08 18:05       ` Jean-Philippe Brucker
2023-02-10 22:03         ` Mostafa Saleh
2023-05-19 15:33   ` Mostafa Saleh
2023-06-02 15:29     ` Jean-Philippe Brucker
2023-06-15 13:32       ` Mostafa Saleh
2023-02-01 12:53 ` [RFC PATCH 20/45] KVM: arm64: iommu: Add map() and unmap() operations Jean-Philippe Brucker
2023-03-30 18:14   ` Mostafa Saleh
2023-04-04 16:00     ` Jean-Philippe Brucker
2023-09-20 16:23       ` Mostafa Saleh
2023-09-25 17:21         ` Jean-Philippe Brucker
2024-02-16 11:59   ` Mostafa Saleh
2024-02-26 14:12     ` Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 21/45] KVM: arm64: iommu: Add SMMUv3 driver Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 22/45] KVM: arm64: smmu-v3: Initialize registers Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 23/45] KVM: arm64: smmu-v3: Setup command queue Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 24/45] KVM: arm64: smmu-v3: Setup stream table Jean-Philippe Brucker
2024-01-16  8:59   ` Mostafa Saleh
2024-01-23 19:45     ` Jean-Philippe Brucker
2024-02-16 12:19       ` Mostafa Saleh
2024-02-26 14:13         ` Jean-Philippe Brucker
2024-03-06 12:51           ` Mostafa Saleh
2023-02-01 12:53 ` [RFC PATCH 25/45] KVM: arm64: smmu-v3: Reset the device Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 26/45] KVM: arm64: smmu-v3: Support io-pgtable Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 27/45] KVM: arm64: smmu-v3: Setup domains and page table configuration Jean-Philippe Brucker
2023-06-23 19:12   ` Mostafa Saleh
2023-07-03 10:41     ` Jean-Philippe Brucker
2024-01-15 14:34   ` Mostafa Saleh
2024-01-23 19:50     ` Jean-Philippe Brucker
2024-02-16 12:11       ` Mostafa Saleh
2024-02-26 14:18         ` Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 28/45] iommu/arm-smmu-v3: Extract driver-specific bits from probe function Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 29/45] iommu/arm-smmu-v3: Move some functions to arm-smmu-v3-common.c Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 30/45] iommu/arm-smmu-v3: Move queue and table allocation " Jean-Philippe Brucker
2024-02-16 12:03   ` Mostafa Saleh
2024-02-26 14:19     ` Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 31/45] iommu/arm-smmu-v3: Move firmware probe to arm-smmu-v3-common Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 32/45] iommu/arm-smmu-v3: Move IOMMU registration to arm-smmu-v3-common.c Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 33/45] iommu/arm-smmu-v3: Use single pages for level-2 stream tables Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 34/45] iommu/arm-smmu-v3: Add host driver for pKVM Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 35/45] iommu/arm-smmu-v3-kvm: Pass a list of SMMU devices to the hypervisor Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 36/45] iommu/arm-smmu-v3-kvm: Validate device features Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 37/45] iommu/arm-smmu-v3-kvm: Allocate structures and reset device Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 38/45] iommu/arm-smmu-v3-kvm: Add per-cpu page queue Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 39/45] iommu/arm-smmu-v3-kvm: Initialize page table configuration Jean-Philippe Brucker
2023-03-22 10:23   ` Mostafa Saleh
2023-03-22 14:42     ` Jean-Philippe Brucker
2023-02-01 12:53 ` Jean-Philippe Brucker [this message]
2023-02-07 13:22   ` [RFC PATCH 40/45] iommu/arm-smmu-v3-kvm: Add IOMMU ops Mostafa Saleh
2023-02-08 18:13     ` Jean-Philippe Brucker
2023-09-20 16:27   ` Mostafa Saleh
2023-09-25 17:18     ` Jean-Philippe Brucker
2023-09-26  9:54       ` Mostafa Saleh
2023-02-01 12:53 ` [RFC PATCH 41/45] KVM: arm64: pkvm: Add __pkvm_host_add_remove_page() Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 42/45] KVM: arm64: pkvm: Support SCMI power domain Jean-Philippe Brucker
2023-02-07 13:27   ` Mostafa Saleh
2023-02-10 19:23     ` Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 43/45] KVM: arm64: smmu-v3: Support power management Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 44/45] iommu/arm-smmu-v3-kvm: Support power management with SCMI SMC Jean-Philippe Brucker
2023-02-01 12:53 ` [RFC PATCH 45/45] iommu/arm-smmu-v3-kvm: Enable runtime PM Jean-Philippe Brucker
2023-02-02  7:07 ` [RFC PATCH 00/45] KVM: Arm SMMUv3 driver for pKVM Tian, Kevin
2023-02-02 10:05   ` Jean-Philippe Brucker
2023-02-03  2:04     ` Tian, Kevin
2023-02-03  8:39       ` Chen, Jason CJ
2023-02-03 11:23         ` Jean-Philippe Brucker
2023-02-04  8:19           ` Chen, Jason CJ
2023-02-04 12:30             ` tina.zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230201125328.2186498-41-jean-philippe@linaro.org \
    --to=jean-philippe@linaro.org \
    --cc=catalin.marinas@arm.com \
    --cc=dbrazdil@google.com \
    --cc=iommu@lists.linux.dev \
    --cc=james.morse@arm.com \
    --cc=joro@8bytes.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=robin.murphy@arm.com \
    --cc=ryan.roberts@arm.com \
    --cc=smostafa@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).