xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: Rahul Singh <rahul.singh@arm.com>
Cc: xen-devel@lists.xenproject.org, bertrand.marquis@arm.com,
	 Stefano Stabellini <sstabellini@kernel.org>,
	Julien Grall <julien@xen.org>,
	 Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
Subject: Re: [PATCH v2 7/8] xen/arm: Remove Linux specific code that is not usable in XEN
Date: Tue, 1 Dec 2020 17:48:00 -0800 (PST)	[thread overview]
Message-ID: <alpine.DEB.2.21.2012011724350.1100@sstabellini-ThinkPad-T480s> (raw)
In-Reply-To: <1d9da8ed4845aeb9e86a5ce6750b811bd7e2020e.1606406359.git.rahul.singh@arm.com>

On Thu, 26 Nov 2020, Rahul Singh wrote:
> struct io_pgtable_ops, struct io_pgtable_cfg, struct iommu_flush_ops,
> and struct iommu_ops related code are linux specific.
> 
> Remove code related to above struct as code is dead code in XEN.

There are still instances of struct io_pgtable_cfg after applying this
patch in the following functions:
- arm_smmu_domain_finalise_s2
- arm_smmu_domain_finalise



> Signed-off-by: Rahul Singh <rahul.singh@arm.com>
> ---
>  xen/drivers/passthrough/arm/smmu-v3.c | 457 --------------------------
>  1 file changed, 457 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
> index 40e3890a58..55d1cba194 100644
> --- a/xen/drivers/passthrough/arm/smmu-v3.c
> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
> @@ -599,7 +593,6 @@ struct arm_smmu_domain {
>  	struct arm_smmu_device		*smmu;
>  	struct mutex			init_mutex; /* Protects smmu pointer */
>  
> -	struct io_pgtable_ops		*pgtbl_ops;
>  	bool				non_strict;
>  
>  	enum arm_smmu_domain_stage	stage;
> @@ -1297,74 +1290,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
>  	arm_smmu_cmdq_issue_sync(smmu);
>  }
>  
> -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
> -					  size_t granule, bool leaf, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -	struct arm_smmu_cmdq_ent cmd = {
> -		.tlbi = {
> -			.leaf	= leaf,
> -			.addr	= iova,
> -		},
> -	};
> -
> -	cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
> -	cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
> -
> -	do {
> -		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
> -		cmd.tlbi.addr += granule;
> -	} while (size -= granule);
> -}
> -
> -static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
> -					 unsigned long iova, size_t granule,
> -					 void *cookie)
> -{
> -	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
> -}
> -
> -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
> -				  size_t granule, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -
> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
> -	arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
> -				  size_t granule, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -
> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
> -	arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static const struct iommu_flush_ops arm_smmu_flush_ops = {
> -	.tlb_flush_all	= arm_smmu_tlb_inv_context,
> -	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
> -	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
> -	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
> -};
> -
> -/* IOMMU API */
> -static bool arm_smmu_capable(enum iommu_cap cap)
> -{
> -	switch (cap) {
> -	case IOMMU_CAP_CACHE_COHERENCY:
> -		return true;
> -	case IOMMU_CAP_NOEXEC:
> -		return true;
> -	default:
> -		return false;
> -	}
> -}
> -
>  static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
>  {
>  	struct arm_smmu_domain *smmu_domain;
> @@ -1421,7 +1346,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>  	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>  
>  	iommu_put_dma_cookie(domain);
> -	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
>  
>  	if (cfg->vmid)
>  		arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
> @@ -1429,7 +1353,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>  	kfree(smmu_domain);
>  }
>  
> -
>  static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>  				       struct arm_smmu_master *master,
>  				       struct io_pgtable_cfg *pgtbl_cfg)
> @@ -1437,7 +1360,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>  	int vmid;
>  	struct arm_smmu_device *smmu = smmu_domain->smmu;
>  	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
> -	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
>  
>  	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
>  	if (vmid < 0)
> @@ -1461,20 +1383,12 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>  {
>  	int ret;
>  	unsigned long ias, oas;
> -	enum io_pgtable_fmt fmt;
> -	struct io_pgtable_cfg pgtbl_cfg;
> -	struct io_pgtable_ops *pgtbl_ops;
>  	int (*finalise_stage_fn)(struct arm_smmu_domain *,
>  				 struct arm_smmu_master *,
>  				 struct io_pgtable_cfg *);
>  	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>  	struct arm_smmu_device *smmu = smmu_domain->smmu;
>  
> -	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
> -		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
> -		return 0;
> -	}
> -
>  	/* Restrict the stage to what we can actually support */
>  	smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
>  
> @@ -1483,40 +1397,17 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>  	case ARM_SMMU_DOMAIN_S2:
>  		ias = smmu->ias;
>  		oas = smmu->oas;
> -		fmt = ARM_64_LPAE_S2;
>  		finalise_stage_fn = arm_smmu_domain_finalise_s2;
>  		break;
>  	default:
>  		return -EINVAL;
>  	}
>  
> -	pgtbl_cfg = (struct io_pgtable_cfg) {
> -		.pgsize_bitmap	= smmu->pgsize_bitmap,
> -		.ias		= ias,
> -		.oas		= oas,
> -		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
> -		.tlb		= &arm_smmu_flush_ops,
> -		.iommu_dev	= smmu->dev,
> -	};
> -
> -	if (smmu_domain->non_strict)
> -		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
> -
> -	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
> -	if (!pgtbl_ops)
> -		return -ENOMEM;
> -
> -	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
> -	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
> -	domain->geometry.force_aperture = true;
> -
>  	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
>  	if (ret < 0) {
> -		free_io_pgtable_ops(pgtbl_ops);
>  		return ret;
>  	}
>  
> -	smmu_domain->pgtbl_ops = pgtbl_ops;
>  	return 0;
>  }
>  
> @@ -1626,71 +1517,6 @@ out_unlock:
>  	return ret;
>  }
>  
> -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
> -			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> -{
> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
> -
> -	if (!ops)
> -		return -ENODEV;
> -
> -	return ops->map(ops, iova, paddr, size, prot, gfp);
> -}
> -
> -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
> -			     size_t size, struct iommu_iotlb_gather *gather)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
> -
> -	if (!ops)
> -		return 0;
> -
> -	return ops->unmap(ops, iova, size, gather);
> -}
> -
> -static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	if (smmu_domain->smmu)
> -		arm_smmu_tlb_inv_context(smmu_domain);
> -}
> -
> -static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
> -				struct iommu_iotlb_gather *gather)
> -{
> -	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
> -
> -	if (smmu)
> -		arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static phys_addr_t
> -arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
> -{
> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
> -
> -	if (domain->type == IOMMU_DOMAIN_IDENTITY)
> -		return iova;
> -
> -	if (!ops)
> -		return 0;
> -
> -	return ops->iova_to_phys(ops, iova);
> -}
> -
> -static struct platform_driver arm_smmu_driver;
> -
> -static
> -struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
> -{
> -	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
> -							  fwnode);
> -	put_device(dev);
> -	return dev ? dev_get_drvdata(dev) : NULL;
> -}
> -
>  static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>  {
>  	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
> @@ -1701,206 +1527,6 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>  	return sid < limit;
>  }
>  
> -static struct iommu_ops arm_smmu_ops;
> -
> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
> -{
> -	int i, ret;
> -	struct arm_smmu_device *smmu;
> -	struct arm_smmu_master *master;
> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> -
> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
> -		return ERR_PTR(-ENODEV);
> -
> -	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
> -		return ERR_PTR(-EBUSY);
> -
> -	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
> -	if (!smmu)
> -		return ERR_PTR(-ENODEV);
> -
> -	master = kzalloc(sizeof(*master), GFP_KERNEL);
> -	if (!master)
> -		return ERR_PTR(-ENOMEM);
> -
> -	master->dev = dev;
> -	master->smmu = smmu;
> -	master->sids = fwspec->ids;
> -	master->num_sids = fwspec->num_ids;
> -	dev_iommu_priv_set(dev, master);
> -
> -	/* Check the SIDs are in range of the SMMU and our stream table */
> -	for (i = 0; i < master->num_sids; i++) {
> -		u32 sid = master->sids[i];
> -
> -		if (!arm_smmu_sid_in_range(smmu, sid)) {
> -			ret = -ERANGE;
> -			goto err_free_master;
> -		}
> -
> -		/* Ensure l2 strtab is initialised */
> -		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
> -			ret = arm_smmu_init_l2_strtab(smmu, sid);
> -			if (ret)
> -				goto err_free_master;
> -		}
> -	}
> -
> -	return &smmu->iommu;
> -
> -err_free_master:
> -	kfree(master);
> -	dev_iommu_priv_set(dev, NULL);
> -	return ERR_PTR(ret);
> -}
> -
> -static void arm_smmu_release_device(struct device *dev)
> -{
> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> -	struct arm_smmu_master *master;
> -
> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
> -		return;
> -
> -	master = dev_iommu_priv_get(dev);
> -	arm_smmu_detach_dev(master);
> -	kfree(master);
> -	iommu_fwspec_free(dev);
> -}
> -
> -static struct iommu_group *arm_smmu_device_group(struct device *dev)
> -{
> -	struct iommu_group *group;
> -
> -	/*
> -	 * We don't support devices sharing stream IDs other than PCI RID
> -	 * aliases, since the necessary ID-to-device lookup becomes rather
> -	 * impractical given a potential sparse 32-bit stream ID space.
> -	 */
> -	if (dev_is_pci(dev))
> -		group = pci_device_group(dev);
> -	else
> -		group = generic_device_group(dev);
> -
> -	return group;
> -}
> -
> -static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
> -				    enum iommu_attr attr, void *data)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	switch (domain->type) {
> -	case IOMMU_DOMAIN_UNMANAGED:
> -		switch (attr) {
> -		case DOMAIN_ATTR_NESTING:
> -			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
> -			return 0;
> -		default:
> -			return -ENODEV;
> -		}
> -		break;
> -	case IOMMU_DOMAIN_DMA:
> -		switch (attr) {
> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
> -			*(int *)data = smmu_domain->non_strict;
> -			return 0;
> -		default:
> -			return -ENODEV;
> -		}
> -		break;
> -	default:
> -		return -EINVAL;
> -	}
> -}
> -
> -static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
> -				    enum iommu_attr attr, void *data)
> -{
> -	int ret = 0;
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	mutex_lock(&smmu_domain->init_mutex);
> -
> -	switch (domain->type) {
> -	case IOMMU_DOMAIN_UNMANAGED:
> -		switch (attr) {
> -		case DOMAIN_ATTR_NESTING:
> -			if (smmu_domain->smmu) {
> -				ret = -EPERM;
> -				goto out_unlock;
> -			}
> -
> -			if (*(int *)data)
> -				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
> -			else
> -				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
> -			break;
> -		default:
> -			ret = -ENODEV;
> -		}
> -		break;
> -	case IOMMU_DOMAIN_DMA:
> -		switch(attr) {
> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
> -			smmu_domain->non_strict = *(int *)data;
> -			break;
> -		default:
> -			ret = -ENODEV;
> -		}
> -		break;
> -	default:
> -		ret = -EINVAL;
> -	}
> -
> -out_unlock:
> -	mutex_unlock(&smmu_domain->init_mutex);
> -	return ret;
> -}
> -
> -static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
> -{
> -	return iommu_fwspec_add_ids(dev, args->args, 1);
> -}
> -
> -static void arm_smmu_get_resv_regions(struct device *dev,
> -				      struct list_head *head)
> -{
> -	struct iommu_resv_region *region;
> -	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> -
> -	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
> -					 prot, IOMMU_RESV_SW_MSI);
> -	if (!region)
> -		return;
> -
> -	list_add_tail(&region->list, head);
> -
> -	iommu_dma_get_resv_regions(dev, head);
> -}

Arguably this could have been removed previously as part of the MSI
patch, but that's OK either way.


> -static struct iommu_ops arm_smmu_ops = {
> -	.capable		= arm_smmu_capable,
> -	.domain_alloc		= arm_smmu_domain_alloc,
> -	.domain_free		= arm_smmu_domain_free,
> -	.attach_dev		= arm_smmu_attach_dev,
> -	.map			= arm_smmu_map,
> -	.unmap			= arm_smmu_unmap,
> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
> -	.iotlb_sync		= arm_smmu_iotlb_sync,
> -	.iova_to_phys		= arm_smmu_iova_to_phys,
> -	.probe_device		= arm_smmu_probe_device,
> -	.release_device		= arm_smmu_release_device,
> -	.device_group		= arm_smmu_device_group,
> -	.domain_get_attr	= arm_smmu_domain_get_attr,
> -	.domain_set_attr	= arm_smmu_domain_set_attr,
> -	.of_xlate		= arm_smmu_of_xlate,
> -	.get_resv_regions	= arm_smmu_get_resv_regions,
> -	.put_resv_regions	= generic_iommu_put_resv_regions,
> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
> -};
> -
>  /* Probing and initialisation functions */
>  static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>  				   struct arm_smmu_queue *q,
> @@ -2515,21 +2139,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>  	default:
>  		dev_info(smmu->dev,
>  			"unknown output address size. Truncating to 48-bit\n");
> -		fallthrough;
>  	case IDR5_OAS_48_BIT:
>  		smmu->oas = 48;
>  	}
>  
> -	if (arm_smmu_ops.pgsize_bitmap == -1UL)
> -		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
> -	else
> -		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
> -
> -	/* Set the DMA mask for our table walker */
> -	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
> -		dev_warn(smmu->dev,
> -			 "failed to set DMA mask for table walker\n");
> -
>  	smmu->ias = max(smmu->ias, smmu->oas);
>  
>  	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
> @@ -2595,9 +2208,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
>  
>  	parse_driver_options(smmu);
>  
> -	if (of_dma_is_coherent(dev->of_node))
> -		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
> -

Why this change? The ARM_SMMU_FEAT_COHERENCY flag is still used in
arm_smmu_device_hw_probe.


  reply	other threads:[~2020-12-02  1:48 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-26 17:01 [PATCH v2 0/8] xen/arm: Add support for SMMUv3 driver Rahul Singh
2020-11-26 17:02 ` [PATCH v2 1/8] xen/arm: Import the SMMUv3 driver from Linux Rahul Singh
2020-12-01 22:01   ` Stefano Stabellini
2020-11-26 17:02 ` [PATCH v2 2/8] xen/arm: revert atomic operation related command-queue insertion patch Rahul Singh
2020-12-01 22:23   ` Stefano Stabellini
2020-12-02 13:05     ` Rahul Singh
2020-12-02 13:44   ` Julien Grall
2020-12-03 11:49     ` Rahul Singh
2020-11-26 17:02 ` [PATCH v2 3/8] xen/arm: revert patch related to XArray Rahul Singh
2020-12-02  0:20   ` Stefano Stabellini
2020-12-02 13:46   ` Julien Grall
2020-12-03 12:57     ` Rahul Singh
2020-12-04  8:52       ` Julien Grall
2020-11-26 17:02 ` [PATCH v2 4/8] xen/arm: Remove support for MSI on SMMUv3 Rahul Singh
2020-12-02  0:33   ` Stefano Stabellini
2020-12-02  0:40     ` Stefano Stabellini
2020-12-02 13:12       ` Rahul Singh
2020-12-02 14:11         ` Julien Grall
2020-12-03 12:59           ` Rahul Singh
2020-11-26 17:02 ` [PATCH v2 5/8] xen/arm: Remove support for PCI ATS " Rahul Singh
2020-12-02  0:39   ` Stefano Stabellini
2020-12-02 13:07     ` Rahul Singh
2020-12-02 13:57   ` Julien Grall
2020-11-26 17:02 ` [PATCH v2 6/8] xen/arm: Remove support for Stage-1 translation " Rahul Singh
2020-12-02  0:53   ` Stefano Stabellini
2020-12-02 13:13     ` Rahul Singh
2020-11-26 17:02 ` [PATCH v2 7/8] xen/arm: Remove Linux specific code that is not usable in XEN Rahul Singh
2020-12-02  1:48   ` Stefano Stabellini [this message]
2020-12-02 14:34     ` Rahul Singh
2020-12-02 14:39       ` Julien Grall
2020-12-02 14:45   ` Julien Grall
2020-12-03 14:33     ` Rahul Singh
2020-12-04  9:05       ` Julien Grall
2020-12-07 10:36         ` Rahul Singh
2020-12-07 10:55           ` Julien Grall
2020-11-26 17:02 ` [PATCH v2 8/8] xen/arm: Add support for SMMUv3 driver Rahul Singh
2020-12-02  2:51   ` Stefano Stabellini
2020-12-02 16:27     ` Rahul Singh
2020-12-02 19:26       ` Rahul Singh
2020-12-02 16:47     ` Julien Grall
2020-12-03  4:13       ` Stefano Stabellini
2020-12-03 14:40         ` Rahul Singh
2020-12-03 18:47           ` Stefano Stabellini
2020-12-07  8:33             ` Rahul Singh
2020-12-02 16:22   ` Julien Grall
2020-12-07 12:12     ` Rahul Singh
2020-12-07 17:39       ` Julien Grall
2020-12-07 18:42         ` Rahul Singh
2020-12-08 19:05           ` Julien Grall
2020-12-09  1:19             ` Stefano Stabellini
2020-12-09  7:55               ` Bertrand Marquis
2020-12-09  9:18                 ` Julien Grall
2020-12-09 18:37                   ` Rahul Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=alpine.DEB.2.21.2012011724350.1100@sstabellini-ThinkPad-T480s \
    --to=sstabellini@kernel.org \
    --cc=Volodymyr_Babchuk@epam.com \
    --cc=bertrand.marquis@arm.com \
    --cc=julien@xen.org \
    --cc=rahul.singh@arm.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).