From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752362AbcBKOfU (ORCPT ); Thu, 11 Feb 2016 09:35:20 -0500 Received: from mail-wm0-f52.google.com ([74.125.82.52]:36471 "EHLO mail-wm0-f52.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752287AbcBKOfN (ORCPT ); Thu, 11 Feb 2016 09:35:13 -0500 From: Eric Auger To: eric.auger@st.com, eric.auger@linaro.org, alex.williamson@redhat.com, will.deacon@arm.com, joro@8bytes.org, tglx@linutronix.de, jason@lakedaemon.net, marc.zyngier@arm.com, christoffer.dall@linaro.org, linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org Cc: suravee.suthikulpanit@amd.com, patches@linaro.org, linux-kernel@vger.kernel.org, Manish.Jaggi@caviumnetworks.com, Bharat.Bhushan@freescale.com, pranav.sawargaonkar@gmail.com, p.fedin@samsung.com, iommu@lists.linux-foundation.org, sherry.hurwitz@amd.com, brijesh.singh@amd.com, leo.duran@amd.com, Thomas.Lendacky@amd.com Subject: [RFC v2 05/15] iommu/arm-smmu: implement alloc/free_reserved_iova_domain Date: Thu, 11 Feb 2016 14:34:12 +0000 Message-Id: <1455201262-5259-6-git-send-email-eric.auger@linaro.org> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1455201262-5259-1-git-send-email-eric.auger@linaro.org> References: <1455201262-5259-1-git-send-email-eric.auger@linaro.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Implement alloc/free_reserved_iova_domain for arm-smmu. we use the iova allocator (iova.c). The iova_domain is attached to the arm_smmu_domain struct. A mutex is introduced to protect it. Signed-off-by: Eric Auger --- v1 -> v2: - formerly implemented in vfio_iommu_type1 --- drivers/iommu/arm-smmu.c | 87 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 15 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c8b7e71..f42341d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -347,6 +348,9 @@ struct arm_smmu_domain { enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ struct iommu_domain domain; + struct iova_domain *reserved_iova_domain; + /* protects reserved domain manipulation */ + struct mutex reserved_mutex; }; static struct iommu_ops arm_smmu_ops; @@ -975,6 +979,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) return NULL; mutex_init(&smmu_domain->init_mutex); + mutex_init(&smmu_domain->reserved_mutex); spin_lock_init(&smmu_domain->pgtbl_lock); return &smmu_domain->domain; @@ -1446,22 +1451,74 @@ out_unlock: return ret; } +static int arm_smmu_alloc_reserved_iova_domain(struct iommu_domain *domain, + dma_addr_t iova, size_t size, + unsigned long order) +{ + unsigned long granule, mask; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = 0; + + granule = 1UL << order; + mask = granule - 1; + if (iova & mask || (!size) || (size & mask)) + return -EINVAL; + + if (smmu_domain->reserved_iova_domain) + return -EEXIST; + + mutex_lock(&smmu_domain->reserved_mutex); + + smmu_domain->reserved_iova_domain = + kzalloc(sizeof(struct iova_domain), GFP_KERNEL); + if (!smmu_domain->reserved_iova_domain) { + ret = -ENOMEM; + goto unlock; + } + + init_iova_domain(smmu_domain->reserved_iova_domain, + granule, iova >> order, (iova + size - 1) >> order); + +unlock: + mutex_unlock(&smmu_domain->reserved_mutex); + return ret; +} + +static void arm_smmu_free_reserved_iova_domain(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct iova_domain *iovad = smmu_domain->reserved_iova_domain; + + if (!iovad) + return; + + mutex_lock(&smmu_domain->reserved_mutex); + + put_iova_domain(iovad); + kfree(iovad); + + mutex_unlock(&smmu_domain->reserved_mutex); +} + static struct iommu_ops arm_smmu_ops = { - .capable = arm_smmu_capable, - .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .detach_dev = arm_smmu_detach_dev, - .map = arm_smmu_map, - .unmap = arm_smmu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = arm_smmu_iova_to_phys, - .add_device = arm_smmu_add_device, - .remove_device = arm_smmu_remove_device, - .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, - .domain_set_attr = arm_smmu_domain_set_attr, - .pgsize_bitmap = -1UL, /* Restricted during device attach */ + .capable = arm_smmu_capable, + .domain_alloc = arm_smmu_domain_alloc, + .domain_free = arm_smmu_domain_free, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = arm_smmu_iova_to_phys, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .device_group = arm_smmu_device_group, + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .alloc_reserved_iova_domain = arm_smmu_alloc_reserved_iova_domain, + .free_reserved_iova_domain = arm_smmu_free_reserved_iova_domain, + /* Page size bitmap, restricted during device attach */ + .pgsize_bitmap = -1UL, }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) -- 1.9.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Eric Auger Subject: [RFC v2 05/15] iommu/arm-smmu: implement alloc/free_reserved_iova_domain Date: Thu, 11 Feb 2016 14:34:12 +0000 Message-ID: <1455201262-5259-6-git-send-email-eric.auger@linaro.org> References: <1455201262-5259-1-git-send-email-eric.auger@linaro.org> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Cc: Thomas.Lendacky-5C7GfCeVMHo@public.gmane.org, brijesh.singh-5C7GfCeVMHo@public.gmane.org, patches-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org, Manish.Jaggi-M3mlKVOIwJVv6pq1l3V1OdBPR1lH4CV8@public.gmane.org, p.fedin-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org, pranav.sawargaonkar-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org, sherry.hurwitz-5C7GfCeVMHo@public.gmane.org To: eric.auger-qxv4g6HH51o@public.gmane.org, eric.auger-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org, alex.williamson-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org, will.deacon-5wv7dgnIgG8@public.gmane.org, joro-zLv9SwRftAIdnm+yROfE0A@public.gmane.org, tglx-hfZtesqFncYOwBW4kG4KsQ@public.gmane.org, jason-NLaQJdtUoK4Be96aLqz0jA@public.gmane.org, marc.zyngier-5wv7dgnIgG8@public.gmane.org, christoffer.dall-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org, linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org, kvmarm-FPEHb7Xf0XXUo1n7N8X6UoWGPAHP3yOg@public.gmane.org, kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org Return-path: In-Reply-To: <1455201262-5259-1-git-send-email-eric.auger-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: iommu-bounces-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org Errors-To: iommu-bounces-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org List-Id: kvm.vger.kernel.org Implement alloc/free_reserved_iova_domain for arm-smmu. we use the iova allocator (iova.c). The iova_domain is attached to the arm_smmu_domain struct. A mutex is introduced to protect it. Signed-off-by: Eric Auger --- v1 -> v2: - formerly implemented in vfio_iommu_type1 --- drivers/iommu/arm-smmu.c | 87 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 15 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c8b7e71..f42341d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -347,6 +348,9 @@ struct arm_smmu_domain { enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ struct iommu_domain domain; + struct iova_domain *reserved_iova_domain; + /* protects reserved domain manipulation */ + struct mutex reserved_mutex; }; static struct iommu_ops arm_smmu_ops; @@ -975,6 +979,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) return NULL; mutex_init(&smmu_domain->init_mutex); + mutex_init(&smmu_domain->reserved_mutex); spin_lock_init(&smmu_domain->pgtbl_lock); return &smmu_domain->domain; @@ -1446,22 +1451,74 @@ out_unlock: return ret; } +static int arm_smmu_alloc_reserved_iova_domain(struct iommu_domain *domain, + dma_addr_t iova, size_t size, + unsigned long order) +{ + unsigned long granule, mask; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = 0; + + granule = 1UL << order; + mask = granule - 1; + if (iova & mask || (!size) || (size & mask)) + return -EINVAL; + + if (smmu_domain->reserved_iova_domain) + return -EEXIST; + + mutex_lock(&smmu_domain->reserved_mutex); + + smmu_domain->reserved_iova_domain = + kzalloc(sizeof(struct iova_domain), GFP_KERNEL); + if (!smmu_domain->reserved_iova_domain) { + ret = -ENOMEM; + goto unlock; + } + + init_iova_domain(smmu_domain->reserved_iova_domain, + granule, iova >> order, (iova + size - 1) >> order); + +unlock: + mutex_unlock(&smmu_domain->reserved_mutex); + return ret; +} + +static void arm_smmu_free_reserved_iova_domain(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct iova_domain *iovad = smmu_domain->reserved_iova_domain; + + if (!iovad) + return; + + mutex_lock(&smmu_domain->reserved_mutex); + + put_iova_domain(iovad); + kfree(iovad); + + mutex_unlock(&smmu_domain->reserved_mutex); +} + static struct iommu_ops arm_smmu_ops = { - .capable = arm_smmu_capable, - .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .detach_dev = arm_smmu_detach_dev, - .map = arm_smmu_map, - .unmap = arm_smmu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = arm_smmu_iova_to_phys, - .add_device = arm_smmu_add_device, - .remove_device = arm_smmu_remove_device, - .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, - .domain_set_attr = arm_smmu_domain_set_attr, - .pgsize_bitmap = -1UL, /* Restricted during device attach */ + .capable = arm_smmu_capable, + .domain_alloc = arm_smmu_domain_alloc, + .domain_free = arm_smmu_domain_free, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = arm_smmu_iova_to_phys, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .device_group = arm_smmu_device_group, + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .alloc_reserved_iova_domain = arm_smmu_alloc_reserved_iova_domain, + .free_reserved_iova_domain = arm_smmu_free_reserved_iova_domain, + /* Page size bitmap, restricted during device attach */ + .pgsize_bitmap = -1UL, }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) -- 1.9.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: eric.auger@linaro.org (Eric Auger) Date: Thu, 11 Feb 2016 14:34:12 +0000 Subject: [RFC v2 05/15] iommu/arm-smmu: implement alloc/free_reserved_iova_domain In-Reply-To: <1455201262-5259-1-git-send-email-eric.auger@linaro.org> References: <1455201262-5259-1-git-send-email-eric.auger@linaro.org> Message-ID: <1455201262-5259-6-git-send-email-eric.auger@linaro.org> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org Implement alloc/free_reserved_iova_domain for arm-smmu. we use the iova allocator (iova.c). The iova_domain is attached to the arm_smmu_domain struct. A mutex is introduced to protect it. Signed-off-by: Eric Auger --- v1 -> v2: - formerly implemented in vfio_iommu_type1 --- drivers/iommu/arm-smmu.c | 87 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 15 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c8b7e71..f42341d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -347,6 +348,9 @@ struct arm_smmu_domain { enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ struct iommu_domain domain; + struct iova_domain *reserved_iova_domain; + /* protects reserved domain manipulation */ + struct mutex reserved_mutex; }; static struct iommu_ops arm_smmu_ops; @@ -975,6 +979,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) return NULL; mutex_init(&smmu_domain->init_mutex); + mutex_init(&smmu_domain->reserved_mutex); spin_lock_init(&smmu_domain->pgtbl_lock); return &smmu_domain->domain; @@ -1446,22 +1451,74 @@ out_unlock: return ret; } +static int arm_smmu_alloc_reserved_iova_domain(struct iommu_domain *domain, + dma_addr_t iova, size_t size, + unsigned long order) +{ + unsigned long granule, mask; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = 0; + + granule = 1UL << order; + mask = granule - 1; + if (iova & mask || (!size) || (size & mask)) + return -EINVAL; + + if (smmu_domain->reserved_iova_domain) + return -EEXIST; + + mutex_lock(&smmu_domain->reserved_mutex); + + smmu_domain->reserved_iova_domain = + kzalloc(sizeof(struct iova_domain), GFP_KERNEL); + if (!smmu_domain->reserved_iova_domain) { + ret = -ENOMEM; + goto unlock; + } + + init_iova_domain(smmu_domain->reserved_iova_domain, + granule, iova >> order, (iova + size - 1) >> order); + +unlock: + mutex_unlock(&smmu_domain->reserved_mutex); + return ret; +} + +static void arm_smmu_free_reserved_iova_domain(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct iova_domain *iovad = smmu_domain->reserved_iova_domain; + + if (!iovad) + return; + + mutex_lock(&smmu_domain->reserved_mutex); + + put_iova_domain(iovad); + kfree(iovad); + + mutex_unlock(&smmu_domain->reserved_mutex); +} + static struct iommu_ops arm_smmu_ops = { - .capable = arm_smmu_capable, - .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .detach_dev = arm_smmu_detach_dev, - .map = arm_smmu_map, - .unmap = arm_smmu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = arm_smmu_iova_to_phys, - .add_device = arm_smmu_add_device, - .remove_device = arm_smmu_remove_device, - .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, - .domain_set_attr = arm_smmu_domain_set_attr, - .pgsize_bitmap = -1UL, /* Restricted during device attach */ + .capable = arm_smmu_capable, + .domain_alloc = arm_smmu_domain_alloc, + .domain_free = arm_smmu_domain_free, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = arm_smmu_iova_to_phys, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .device_group = arm_smmu_device_group, + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .alloc_reserved_iova_domain = arm_smmu_alloc_reserved_iova_domain, + .free_reserved_iova_domain = arm_smmu_free_reserved_iova_domain, + /* Page size bitmap, restricted during device attach */ + .pgsize_bitmap = -1UL, }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) -- 1.9.1