From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mail-wm1-f50.google.com (mail-wm1-f50.google.com [209.85.128.50]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 66B139442 for ; Thu, 30 Mar 2023 18:14:11 +0000 (UTC) Received: by mail-wm1-f50.google.com with SMTP id 5b1f17b1804b1-3ee6c339cceso132075e9.0 for ; Thu, 30 Mar 2023 11:14:11 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680200049; h=in-reply-to:content-disposition:mime-version:references:message-id :subject:cc:to:from:date:from:to:cc:subject:date:message-id:reply-to; bh=rGdF3JRnHGbDQc345mQenBOBG4qzOhE/u5QBXOUj5ig=; b=GsAYwQKWXUBM7ONZAMjigQwgFkfLmvkWhlArLy3zsccR9nm1dirhrdhrU7jNm5oba4 1P4ZUrZxMe+/YHIi43OgFPlYkUUQOlJEkFlEFTeuFcE3QQ0Lc8L6Ya3RQm8BSNSSiafp YgWcm7O3IkURKjSAalDJV9wH+g4z7slrOPA9fyOsYGqe3LYxPwNZu3MaJCUN4hEg0iA9 S/2uoDX/zPsQInisy+7emtLRGr8Ws+HU0lWt3VzEkHQzkexk+TVLTwohklptoRaJRv+x mwwCwXYXlNkGx7lDMqd/CcMKTRxtHosL5C3940VivnYTHKVDMkuSfCBSi5z+ou8lGIyU 1WlQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680200049; h=in-reply-to:content-disposition:mime-version:references:message-id :subject:cc:to:from:date:x-gm-message-state:from:to:cc:subject:date :message-id:reply-to; bh=rGdF3JRnHGbDQc345mQenBOBG4qzOhE/u5QBXOUj5ig=; b=aey3I9Hb0g57XgLrp9jvzioKWI5iAYSCOoGZv6HtS75bIA7ySusNnRdb3NDh3ey6IQ 8XH3LC7MXKr4pxIH9fnSDUqOAa8o0Lb2hTAU80jDwI/53Ycai+9tHwYmIVx0e2aJPDIW GXkiiOdDjZLbLoiF7MkJJEuV7JchtK5p1wrSRxLtpRRVsohpoLiuOPRsaZhabmSvmNfa HVAUnGkjNS13VxvVJ2hHtCk81wp/DfPPUyX72I2qyYBfhh+BqMswGArSrlDE088k5NRM qaSTTAGPkZirLGFYuq7WxDYsxHkXNU+DxzchhjixeI0YHSyd8hj/PbI1uMbcjVErF+N7 6GxQ== X-Gm-Message-State: AAQBX9c3BbOtnfun7wgOjTfKdFRYrNwq5YJAg/xmNQ6mR8ptsSXOSVIN wiqJB5x69et2vr7yEtxFvqM4hg== X-Google-Smtp-Source: AKy350YBB9EBZyyusB1KlCOpD0hE4u7DO745J0Db6gb3oPKdmYCEE0hMkOZRupaFkgYZqkjpEa5vTw== X-Received: by 2002:a05:600c:3b83:b0:3ed:e6da:ffcd with SMTP id n3-20020a05600c3b8300b003ede6daffcdmr10317wms.7.1680200049482; Thu, 30 Mar 2023 11:14:09 -0700 (PDT) Received: from google.com (44.232.78.34.bc.googleusercontent.com. [34.78.232.44]) by smtp.gmail.com with ESMTPSA id p2-20020a05600c204200b003ed2c0a0f37sm6586732wmg.35.2023.03.30.11.14.08 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 30 Mar 2023 11:14:08 -0700 (PDT) Date: Thu, 30 Mar 2023 18:14:04 +0000 From: Mostafa Saleh To: Jean-Philippe Brucker Cc: maz@kernel.org, catalin.marinas@arm.com, will@kernel.org, joro@8bytes.org, robin.murphy@arm.com, james.morse@arm.com, suzuki.poulose@arm.com, oliver.upton@linux.dev, yuzenghui@huawei.com, dbrazdil@google.com, ryan.roberts@arm.com, linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev, iommu@lists.linux.dev Subject: Re: [RFC PATCH 20/45] KVM: arm64: iommu: Add map() and unmap() operations Message-ID: References: <20230201125328.2186498-1-jean-philippe@linaro.org> <20230201125328.2186498-21-jean-philippe@linaro.org> Precedence: bulk X-Mailing-List: iommu@lists.linux.dev List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20230201125328.2186498-21-jean-philippe@linaro.org> Hi Jean, On Wed, Feb 01, 2023 at 12:53:04PM +0000, Jean-Philippe Brucker wrote: > Handle map() and unmap() hypercalls by calling the io-pgtable library. > > Signed-off-by: Jean-Philippe Brucker > --- > arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 144 ++++++++++++++++++++++++++ > 1 file changed, 144 insertions(+) > > diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c > index 7404ea77ed9f..0550e7bdf179 100644 > --- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c > +++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c > @@ -183,6 +183,150 @@ int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, > return ret; > } > > +static int __kvm_iommu_unmap_pages(struct io_pgtable *iopt, unsigned long iova, > + size_t pgsize, size_t pgcount) > +{ > + int ret; > + size_t unmapped; > + phys_addr_t paddr; > + size_t total_unmapped = 0; > + size_t size = pgsize * pgcount; > + > + while (total_unmapped < size) { > + paddr = iopt_iova_to_phys(iopt, iova); > + if (paddr == 0) > + return -EINVAL; > + > + /* > + * One page/block at a time, because the range provided may not > + * be physically contiguous, and we need to unshare all physical > + * pages. > + */ > + unmapped = iopt_unmap_pages(iopt, iova, pgsize, 1, NULL); > + if (!unmapped) > + return -EINVAL; > + > + ret = __pkvm_host_unshare_dma(paddr, pgsize); > + if (ret) > + return ret; > + > + iova += unmapped; > + pgcount -= unmapped / pgsize; > + total_unmapped += unmapped; > + } > + > + return 0; > +} > + > +#define IOMMU_PROT_MASK (IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE |\ > + IOMMU_NOEXEC | IOMMU_MMIO) > + > +int kvm_iommu_map_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, > + unsigned long iova, phys_addr_t paddr, size_t pgsize, > + size_t pgcount, int prot) > +{ > + size_t size; > + size_t granule; > + int ret = -EINVAL; > + size_t mapped = 0; > + struct io_pgtable iopt; > + struct kvm_hyp_iommu *iommu; > + size_t pgcount_orig = pgcount; > + unsigned long iova_orig = iova; > + struct kvm_hyp_iommu_domain *domain; > + > + if (prot & ~IOMMU_PROT_MASK) > + return -EINVAL; > + > + if (__builtin_mul_overflow(pgsize, pgcount, &size) || > + iova + size < iova || paddr + size < paddr) > + return -EOVERFLOW; > + > + hyp_spin_lock(&iommu_lock); > + > + domain = handle_to_domain(iommu_id, domain_id, &iommu); > + if (!domain) > + goto err_unlock; > + > + granule = 1 << __ffs(iommu->pgtable->cfg.pgsize_bitmap); > + if (!IS_ALIGNED(iova | paddr | pgsize, granule)) > + goto err_unlock; > + > + ret = __pkvm_host_share_dma(paddr, size, !(prot & IOMMU_MMIO)); > + if (ret) > + goto err_unlock; > + > + iopt = domain_to_iopt(iommu, domain, domain_id); > + while (pgcount) { > + ret = iopt_map_pages(&iopt, iova, paddr, pgsize, pgcount, prot, > + 0, &mapped); > + WARN_ON(!IS_ALIGNED(mapped, pgsize)); > + pgcount -= mapped / pgsize; > + if (ret) > + goto err_unmap; > + iova += mapped; > + paddr += mapped; > + } > + > + hyp_spin_unlock(&iommu_lock); > + return 0; > + > +err_unmap: > + __kvm_iommu_unmap_pages(&iopt, iova_orig, pgsize, pgcount_orig - pgcount); On error here, this unmaps (and unshares) only pages that has been mapped. But all pages where shared with IOMMU before (via __pkvm_host_share_dma) and this corrupts the other pages state as they are marked as shared while they are not. I see we can add a "bool unshare" arg to __kvm_iommu_unmap_pages which will be called with false on error from here after calling __pkvm_host_unshare_dma for the whole range. And set to true from kvm_iommu_unmap_pages. > +err_unlock: > + hyp_spin_unlock(&iommu_lock); > + return ret; > +} > + > +int kvm_iommu_unmap_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, > + unsigned long iova, size_t pgsize, size_t pgcount) > +{ > + size_t size; > + size_t granule; > + int ret = -EINVAL; > + struct io_pgtable iopt; > + struct kvm_hyp_iommu *iommu; > + struct kvm_hyp_iommu_domain *domain; > + > + if (__builtin_mul_overflow(pgsize, pgcount, &size) || > + iova + size < iova) > + return -EOVERFLOW; > + > + hyp_spin_lock(&iommu_lock); > + domain = handle_to_domain(iommu_id, domain_id, &iommu); > + if (!domain) > + goto out_unlock; > + > + granule = 1 << __ffs(iommu->pgtable->cfg.pgsize_bitmap); > + if (!IS_ALIGNED(iova | pgsize, granule)) > + goto out_unlock; > + > + iopt = domain_to_iopt(iommu, domain, domain_id); > + ret = __kvm_iommu_unmap_pages(&iopt, iova, pgsize, pgcount); > +out_unlock: > + hyp_spin_unlock(&iommu_lock); > + return ret; > +} > + > +phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t iommu_id, > + pkvm_handle_t domain_id, unsigned long iova) > +{ > + phys_addr_t phys = 0; > + struct io_pgtable iopt; > + struct kvm_hyp_iommu *iommu; > + struct kvm_hyp_iommu_domain *domain; > + > + hyp_spin_lock(&iommu_lock); > + domain = handle_to_domain(iommu_id, domain_id, &iommu); > + if (domain) { > + iopt = domain_to_iopt(iommu, domain, domain_id); > + > + phys = iopt_iova_to_phys(&iopt, iova); > + } > + hyp_spin_unlock(&iommu_lock); > + return phys; > +} > + > int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu) > { > void *domains; > -- > 2.39.0 Thanks, Mostafa From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 3C0A7C6FD1D for ; Thu, 30 Mar 2023 18:15:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20210309; h=Sender: Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post: List-Archive:List-Unsubscribe:List-Id:In-Reply-To:MIME-Version:References: Message-ID:Subject:Cc:To:From:Date:Reply-To:Content-ID:Content-Description: Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID: List-Owner; bh=JGz6SshxCV2/Ur+iE9SdtoEfSLwNCqcbkHw5ury+aaw=; b=qzc206B2iaE9Bj 8vR83lLi+9MIM7A/5TvG4Cv+shlHo6oeBrPJc97YH2+8rBvtfqbMCDGIwUy+jjstK3eBZWrZJShDN 9jQrYXvRC2HM/3eJXQWpVuxGTYfR1KNOKyYEbvD/oU5HwAPIVCk+OGi2/oEgVgKZPssCC8+sTC9AA xSobTuyF83aAv1pbj0AajVvM9TiZgsvRhAxMw/lIP8xd18UuQSnk91Ux/hVNn3CrjkA/b/NefAGWn No5WX9jbcUAqmn2IVT6XNNqLcGb2uM7JcaJ5YQ99QxsygYyZrhknOVeowRS9BcKC1+9eOCX2WvRKt prYnjFtoEQdHODBRtWZw==; Received: from localhost ([::1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.96 #2 (Red Hat Linux)) id 1phwmz-004n4F-0g; Thu, 30 Mar 2023 18:14:21 +0000 Received: from mail-wm1-x32d.google.com ([2a00:1450:4864:20::32d]) by bombadil.infradead.org with esmtps (Exim 4.96 #2 (Red Hat Linux)) id 1phwmw-004n2F-36 for linux-arm-kernel@lists.infradead.org; Thu, 30 Mar 2023 18:14:20 +0000 Received: by mail-wm1-x32d.google.com with SMTP id 5b1f17b1804b1-3ee6c339cceso132065e9.0 for ; Thu, 30 Mar 2023 11:14:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680200049; h=in-reply-to:content-disposition:mime-version:references:message-id :subject:cc:to:from:date:from:to:cc:subject:date:message-id:reply-to; bh=rGdF3JRnHGbDQc345mQenBOBG4qzOhE/u5QBXOUj5ig=; b=GsAYwQKWXUBM7ONZAMjigQwgFkfLmvkWhlArLy3zsccR9nm1dirhrdhrU7jNm5oba4 1P4ZUrZxMe+/YHIi43OgFPlYkUUQOlJEkFlEFTeuFcE3QQ0Lc8L6Ya3RQm8BSNSSiafp YgWcm7O3IkURKjSAalDJV9wH+g4z7slrOPA9fyOsYGqe3LYxPwNZu3MaJCUN4hEg0iA9 S/2uoDX/zPsQInisy+7emtLRGr8Ws+HU0lWt3VzEkHQzkexk+TVLTwohklptoRaJRv+x mwwCwXYXlNkGx7lDMqd/CcMKTRxtHosL5C3940VivnYTHKVDMkuSfCBSi5z+ou8lGIyU 1WlQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680200049; h=in-reply-to:content-disposition:mime-version:references:message-id :subject:cc:to:from:date:x-gm-message-state:from:to:cc:subject:date :message-id:reply-to; bh=rGdF3JRnHGbDQc345mQenBOBG4qzOhE/u5QBXOUj5ig=; b=UyUJy0v1LEVD7VvlgmGjI+ue89vafqZ+R99ynhPa6YRqJYKbTKj5XQn9G5GWq1R6u0 1ILHjv+8Sb4cZN4dNN5RFTWpdfZ4Mv3MXtX1Zgnp4tH9BC+a3zk0BZrD4eRJnCQstIWN k+ZMQ4vJ1/OOr3+T2QCANmtnTCbdXCizGzZTRh2FD+d8QrF0sIafX9a7m/jZCsGRmzkx 773t+Pe2aY3j9RObPdARv4aCL3qcyIa4SscB8N3FbYSNjz98JQqk+jRMkyHXPsGwrEdu uzVzckG4LLYsc6AvIY6qy1O9B6npQ2MLCuT6bqW83Q/kInazZFw3sKvV9RxAatuoH5I5 ANMQ== X-Gm-Message-State: AAQBX9e7uy8aNdH0pcmf18BVcnxXXnjM5ON8cr4hW4gYRMEi7VPzBHBS Ip5+Df8tC86AMJZrM/uCa4APow== X-Google-Smtp-Source: AKy350YBB9EBZyyusB1KlCOpD0hE4u7DO745J0Db6gb3oPKdmYCEE0hMkOZRupaFkgYZqkjpEa5vTw== X-Received: by 2002:a05:600c:3b83:b0:3ed:e6da:ffcd with SMTP id n3-20020a05600c3b8300b003ede6daffcdmr10317wms.7.1680200049482; Thu, 30 Mar 2023 11:14:09 -0700 (PDT) Received: from google.com (44.232.78.34.bc.googleusercontent.com. [34.78.232.44]) by smtp.gmail.com with ESMTPSA id p2-20020a05600c204200b003ed2c0a0f37sm6586732wmg.35.2023.03.30.11.14.08 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 30 Mar 2023 11:14:08 -0700 (PDT) Date: Thu, 30 Mar 2023 18:14:04 +0000 From: Mostafa Saleh To: Jean-Philippe Brucker Cc: maz@kernel.org, catalin.marinas@arm.com, will@kernel.org, joro@8bytes.org, robin.murphy@arm.com, james.morse@arm.com, suzuki.poulose@arm.com, oliver.upton@linux.dev, yuzenghui@huawei.com, dbrazdil@google.com, ryan.roberts@arm.com, linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev, iommu@lists.linux.dev Subject: Re: [RFC PATCH 20/45] KVM: arm64: iommu: Add map() and unmap() operations Message-ID: References: <20230201125328.2186498-1-jean-philippe@linaro.org> <20230201125328.2186498-21-jean-philippe@linaro.org> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20230201125328.2186498-21-jean-philippe@linaro.org> X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20230330_111419_005215_2C2C6D7E X-CRM114-Status: GOOD ( 23.20 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org Hi Jean, On Wed, Feb 01, 2023 at 12:53:04PM +0000, Jean-Philippe Brucker wrote: > Handle map() and unmap() hypercalls by calling the io-pgtable library. > > Signed-off-by: Jean-Philippe Brucker > --- > arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 144 ++++++++++++++++++++++++++ > 1 file changed, 144 insertions(+) > > diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c > index 7404ea77ed9f..0550e7bdf179 100644 > --- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c > +++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c > @@ -183,6 +183,150 @@ int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, > return ret; > } > > +static int __kvm_iommu_unmap_pages(struct io_pgtable *iopt, unsigned long iova, > + size_t pgsize, size_t pgcount) > +{ > + int ret; > + size_t unmapped; > + phys_addr_t paddr; > + size_t total_unmapped = 0; > + size_t size = pgsize * pgcount; > + > + while (total_unmapped < size) { > + paddr = iopt_iova_to_phys(iopt, iova); > + if (paddr == 0) > + return -EINVAL; > + > + /* > + * One page/block at a time, because the range provided may not > + * be physically contiguous, and we need to unshare all physical > + * pages. > + */ > + unmapped = iopt_unmap_pages(iopt, iova, pgsize, 1, NULL); > + if (!unmapped) > + return -EINVAL; > + > + ret = __pkvm_host_unshare_dma(paddr, pgsize); > + if (ret) > + return ret; > + > + iova += unmapped; > + pgcount -= unmapped / pgsize; > + total_unmapped += unmapped; > + } > + > + return 0; > +} > + > +#define IOMMU_PROT_MASK (IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE |\ > + IOMMU_NOEXEC | IOMMU_MMIO) > + > +int kvm_iommu_map_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, > + unsigned long iova, phys_addr_t paddr, size_t pgsize, > + size_t pgcount, int prot) > +{ > + size_t size; > + size_t granule; > + int ret = -EINVAL; > + size_t mapped = 0; > + struct io_pgtable iopt; > + struct kvm_hyp_iommu *iommu; > + size_t pgcount_orig = pgcount; > + unsigned long iova_orig = iova; > + struct kvm_hyp_iommu_domain *domain; > + > + if (prot & ~IOMMU_PROT_MASK) > + return -EINVAL; > + > + if (__builtin_mul_overflow(pgsize, pgcount, &size) || > + iova + size < iova || paddr + size < paddr) > + return -EOVERFLOW; > + > + hyp_spin_lock(&iommu_lock); > + > + domain = handle_to_domain(iommu_id, domain_id, &iommu); > + if (!domain) > + goto err_unlock; > + > + granule = 1 << __ffs(iommu->pgtable->cfg.pgsize_bitmap); > + if (!IS_ALIGNED(iova | paddr | pgsize, granule)) > + goto err_unlock; > + > + ret = __pkvm_host_share_dma(paddr, size, !(prot & IOMMU_MMIO)); > + if (ret) > + goto err_unlock; > + > + iopt = domain_to_iopt(iommu, domain, domain_id); > + while (pgcount) { > + ret = iopt_map_pages(&iopt, iova, paddr, pgsize, pgcount, prot, > + 0, &mapped); > + WARN_ON(!IS_ALIGNED(mapped, pgsize)); > + pgcount -= mapped / pgsize; > + if (ret) > + goto err_unmap; > + iova += mapped; > + paddr += mapped; > + } > + > + hyp_spin_unlock(&iommu_lock); > + return 0; > + > +err_unmap: > + __kvm_iommu_unmap_pages(&iopt, iova_orig, pgsize, pgcount_orig - pgcount); On error here, this unmaps (and unshares) only pages that has been mapped. But all pages where shared with IOMMU before (via __pkvm_host_share_dma) and this corrupts the other pages state as they are marked as shared while they are not. I see we can add a "bool unshare" arg to __kvm_iommu_unmap_pages which will be called with false on error from here after calling __pkvm_host_unshare_dma for the whole range. And set to true from kvm_iommu_unmap_pages. > +err_unlock: > + hyp_spin_unlock(&iommu_lock); > + return ret; > +} > + > +int kvm_iommu_unmap_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id, > + unsigned long iova, size_t pgsize, size_t pgcount) > +{ > + size_t size; > + size_t granule; > + int ret = -EINVAL; > + struct io_pgtable iopt; > + struct kvm_hyp_iommu *iommu; > + struct kvm_hyp_iommu_domain *domain; > + > + if (__builtin_mul_overflow(pgsize, pgcount, &size) || > + iova + size < iova) > + return -EOVERFLOW; > + > + hyp_spin_lock(&iommu_lock); > + domain = handle_to_domain(iommu_id, domain_id, &iommu); > + if (!domain) > + goto out_unlock; > + > + granule = 1 << __ffs(iommu->pgtable->cfg.pgsize_bitmap); > + if (!IS_ALIGNED(iova | pgsize, granule)) > + goto out_unlock; > + > + iopt = domain_to_iopt(iommu, domain, domain_id); > + ret = __kvm_iommu_unmap_pages(&iopt, iova, pgsize, pgcount); > +out_unlock: > + hyp_spin_unlock(&iommu_lock); > + return ret; > +} > + > +phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t iommu_id, > + pkvm_handle_t domain_id, unsigned long iova) > +{ > + phys_addr_t phys = 0; > + struct io_pgtable iopt; > + struct kvm_hyp_iommu *iommu; > + struct kvm_hyp_iommu_domain *domain; > + > + hyp_spin_lock(&iommu_lock); > + domain = handle_to_domain(iommu_id, domain_id, &iommu); > + if (domain) { > + iopt = domain_to_iopt(iommu, domain, domain_id); > + > + phys = iopt_iova_to_phys(&iopt, iova); > + } > + hyp_spin_unlock(&iommu_lock); > + return phys; > +} > + > int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu) > { > void *domains; > -- > 2.39.0 Thanks, Mostafa _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel