From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 373FFC10F27 for ; Tue, 10 Mar 2020 09:13:02 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 0EB1A20674 for ; Tue, 10 Mar 2020 09:13:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726533AbgCJJMi (ORCPT ); Tue, 10 Mar 2020 05:12:38 -0400 Received: from 8bytes.org ([81.169.241.247]:50518 "EHLO theia.8bytes.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726504AbgCJJMi (ORCPT ); Tue, 10 Mar 2020 05:12:38 -0400 Received: by theia.8bytes.org (Postfix, from userid 1000) id 2EA32DB2; Tue, 10 Mar 2020 10:12:34 +0100 (CET) From: Joerg Roedel To: iommu@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org, linux-arm-msm@vger.kernel.org, linux-mediatek@lists.infradead.org, virtualization@lists.linux-foundation.org, Lorenzo Pieralisi , Hanjun Guo , Sudeep Holla , Rob Clark , Sean Paul , Will Deacon , Robin Murphy , Joerg Roedel , Matthias Brugger , Thierry Reding , Jean-Philippe Brucker , Andy Gross , Bjorn Andersson , Joerg Roedel Subject: [PATCH 13/15] iommu/qcom: Use accessor functions for iommu private data Date: Tue, 10 Mar 2020 10:12:27 +0100 Message-Id: <20200310091229.29830-14-joro@8bytes.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200310091229.29830-1-joro@8bytes.org> References: <20200310091229.29830-1-joro@8bytes.org> Sender: linux-arm-msm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-arm-msm@vger.kernel.org From: Joerg Roedel Make use of dev_iommu_priv_set/get() functions. Signed-off-by: Joerg Roedel --- drivers/iommu/qcom_iommu.c | 61 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 4328da0b0a9f..80147f82d427 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) static const struct iommu_ops qcom_iommu_ops; -static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) +static struct qcom_iommu_dev * to_iommu(struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + if (!fwspec || fwspec->ops != &qcom_iommu_ops) return NULL; - return fwspec->iommu_priv; + + return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct iommu_fwspec *fwspec; + struct device *dev = cookie; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) static int qcom_iommu_init_domain(struct iommu_domain *domain, struct qcom_iommu_dev *qcom_iommu, - struct iommu_fwspec *fwspec) + struct device *dev) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_cfg pgtbl_cfg; int i, ret = 0; @@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); int ret; @@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev /* Ensure that the domain is finalized */ pm_runtime_get_sync(qcom_iommu->dev); - ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); + ret = qcom_iommu_init_domain(domain, qcom_iommu, dev); pm_runtime_put_sync(qcom_iommu->dev); if (ret < 0) return ret; @@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); unsigned i; if (WARN_ON(!qcom_domain->iommu)) @@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); @@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap) static int qcom_iommu_add_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct iommu_group *group; struct device_link *link; @@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev) static void qcom_iommu_remove_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return; @@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev) static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct qcom_iommu_dev *qcom_iommu; struct platform_device *iommu_pdev; unsigned asid = args->args[0]; @@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) WARN_ON(asid > qcom_iommu->num_ctxs)) return -EINVAL; - if (!fwspec->iommu_priv) { - fwspec->iommu_priv = qcom_iommu; + if (dev_iommu_priv_get(dev)) { + dev_iommu_priv_set(dev, qcom_iommu); } else { /* make sure devices iommus dt node isn't referring to * multiple different iommu devices. Multiple context * banks are ok, but multiple devices are not: */ - if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) + if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) return -EINVAL; } -- 2.17.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 66338C18E7A for ; Tue, 10 Mar 2020 09:12:56 +0000 (UTC) Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 456A124681 for ; Tue, 10 Mar 2020 09:12:56 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 456A124681 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=8bytes.org Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=iommu-bounces@lists.linux-foundation.org Received: from localhost (localhost [127.0.0.1]) by whitealder.osuosl.org (Postfix) with ESMTP id 3DA32884D6; Tue, 10 Mar 2020 09:12:56 +0000 (UTC) X-Virus-Scanned: amavisd-new at osuosl.org Received: from whitealder.osuosl.org ([127.0.0.1]) by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id RLpaLI+KX1v9; Tue, 10 Mar 2020 09:12:51 +0000 (UTC) Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56]) by whitealder.osuosl.org (Postfix) with ESMTP id 418378858F; Tue, 10 Mar 2020 09:12:45 +0000 (UTC) Received: from lf-lists.osuosl.org (localhost [127.0.0.1]) by lists.linuxfoundation.org (Postfix) with ESMTP id 2FD86C18D3; Tue, 10 Mar 2020 09:12:45 +0000 (UTC) Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133]) by lists.linuxfoundation.org (Postfix) with ESMTP id 0A07DC1D8E; Tue, 10 Mar 2020 09:12:43 +0000 (UTC) Received: from localhost (localhost [127.0.0.1]) by hemlock.osuosl.org (Postfix) with ESMTP id EB345886DF; Tue, 10 Mar 2020 09:12:42 +0000 (UTC) X-Virus-Scanned: amavisd-new at osuosl.org Received: from hemlock.osuosl.org ([127.0.0.1]) by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id rdfuF5J0o+be; Tue, 10 Mar 2020 09:12:42 +0000 (UTC) X-Greylist: from auto-whitelisted by SQLgrey-1.7.6 X-Greylist: from auto-whitelisted by SQLgrey-1.7.6 Received: from theia.8bytes.org (8bytes.org [81.169.241.247]) by hemlock.osuosl.org (Postfix) with ESMTPS id D70FA8832F; Tue, 10 Mar 2020 09:12:41 +0000 (UTC) Received: by theia.8bytes.org (Postfix, from userid 1000) id 2EA32DB2; Tue, 10 Mar 2020 10:12:34 +0100 (CET) From: Joerg Roedel To: iommu@lists.linux-foundation.org Subject: [PATCH 13/15] iommu/qcom: Use accessor functions for iommu private data Date: Tue, 10 Mar 2020 10:12:27 +0100 Message-Id: <20200310091229.29830-14-joro@8bytes.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200310091229.29830-1-joro@8bytes.org> References: <20200310091229.29830-1-joro@8bytes.org> Cc: Jean-Philippe Brucker , Will Deacon , linux-arm-msm@vger.kernel.org, Sudeep Holla , linux-kernel@vger.kernel.org, virtualization@lists.linux-foundation.org, Thierry Reding , linux-mediatek@lists.infradead.org, Andy Gross , Joerg Roedel , Hanjun Guo , Matthias Brugger , Bjorn Andersson , Sean Paul , Robin Murphy X-BeenThere: iommu@lists.linux-foundation.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Development issues for Linux IOMMU support List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: iommu-bounces@lists.linux-foundation.org Sender: "iommu" From: Joerg Roedel Make use of dev_iommu_priv_set/get() functions. Signed-off-by: Joerg Roedel --- drivers/iommu/qcom_iommu.c | 61 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 4328da0b0a9f..80147f82d427 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) static const struct iommu_ops qcom_iommu_ops; -static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) +static struct qcom_iommu_dev * to_iommu(struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + if (!fwspec || fwspec->ops != &qcom_iommu_ops) return NULL; - return fwspec->iommu_priv; + + return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct iommu_fwspec *fwspec; + struct device *dev = cookie; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) static int qcom_iommu_init_domain(struct iommu_domain *domain, struct qcom_iommu_dev *qcom_iommu, - struct iommu_fwspec *fwspec) + struct device *dev) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_cfg pgtbl_cfg; int i, ret = 0; @@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); int ret; @@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev /* Ensure that the domain is finalized */ pm_runtime_get_sync(qcom_iommu->dev); - ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); + ret = qcom_iommu_init_domain(domain, qcom_iommu, dev); pm_runtime_put_sync(qcom_iommu->dev); if (ret < 0) return ret; @@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); unsigned i; if (WARN_ON(!qcom_domain->iommu)) @@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); @@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap) static int qcom_iommu_add_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct iommu_group *group; struct device_link *link; @@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev) static void qcom_iommu_remove_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return; @@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev) static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct qcom_iommu_dev *qcom_iommu; struct platform_device *iommu_pdev; unsigned asid = args->args[0]; @@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) WARN_ON(asid > qcom_iommu->num_ctxs)) return -EINVAL; - if (!fwspec->iommu_priv) { - fwspec->iommu_priv = qcom_iommu; + if (dev_iommu_priv_get(dev)) { + dev_iommu_priv_set(dev, qcom_iommu); } else { /* make sure devices iommus dt node isn't referring to * multiple different iommu devices. Multiple context * banks are ok, but multiple devices are not: */ - if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) + if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) return -EINVAL; } -- 2.17.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=DKIM_SIGNED,DKIM_VALID, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id B71AEC18E5B for ; Tue, 10 Mar 2020 09:13:19 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 82D5B2467F for ; Tue, 10 Mar 2020 09:13:19 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=lists.infradead.org header.i=@lists.infradead.org header.b="A1es9J0b" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 82D5B2467F Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=8bytes.org Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-mediatek-bounces+linux-mediatek=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20170209; h=Sender: Content-Transfer-Encoding:Content-Type:MIME-Version:Cc:List-Subscribe: List-Help:List-Post:List-Archive:List-Unsubscribe:List-Id:References: In-Reply-To:Message-Id:Date:Subject:To:From:Reply-To:Content-ID: Content-Description:Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc :Resent-Message-ID:List-Owner; bh=MO0MSDR/23SNqyNBltwoBQ+WBM6Ew4g1SrQzgIC3Ge0=; b=A1es9J0bMNOyP7MJiGHAPEXSQr Gx0s41vaD3zIOizBUNi7YvZTMQx7VIVVaviFX14eFPIAGf5umy/d2xByXSx/4wBHJBKZASDiVFeu0 O/+Sdm2qwhANGseggJuH+CpnrCHzovWemkUCCJpR2DXUbUlBEGmJsjUgeezWck4bpQ7u8XpFDG6mO rKtvuou0jbWPIhST+eXNyhtQjLVF4k+A6NUph5Ja1fWS4M5c/0v+3XX/lXHnV2kSRiUHDOtvfhv+I fdGgd2I60OjI76VCA6O1o4142dZY+TMXtMrcxqEaaMQ5g9SvV+sQVkNFrtJKzglXxXzgU0Qjmjcwo 4ogA4wow==; Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1jBawo-0006xW-QX; Tue, 10 Mar 2020 09:13:10 +0000 Received: from 8bytes.org ([2a01:238:4383:600:38bc:a715:4b6d:a889] helo=theia.8bytes.org) by bombadil.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux)) id 1jBawR-0006IO-Uv for linux-mediatek@lists.infradead.org; Tue, 10 Mar 2020 09:12:52 +0000 Received: by theia.8bytes.org (Postfix, from userid 1000) id 2EA32DB2; Tue, 10 Mar 2020 10:12:34 +0100 (CET) From: Joerg Roedel To: iommu@lists.linux-foundation.org Subject: [PATCH 13/15] iommu/qcom: Use accessor functions for iommu private data Date: Tue, 10 Mar 2020 10:12:27 +0100 Message-Id: <20200310091229.29830-14-joro@8bytes.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200310091229.29830-1-joro@8bytes.org> References: <20200310091229.29830-1-joro@8bytes.org> X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20200310_021248_287225_C543B9E3 X-CRM114-Status: GOOD ( 13.68 ) X-BeenThere: linux-mediatek@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Jean-Philippe Brucker , Lorenzo Pieralisi , Will Deacon , linux-arm-msm@vger.kernel.org, Joerg Roedel , Sudeep Holla , linux-kernel@vger.kernel.org, virtualization@lists.linux-foundation.org, Rob Clark , Thierry Reding , linux-mediatek@lists.infradead.org, Andy Gross , Joerg Roedel , Hanjun Guo , Matthias Brugger , Bjorn Andersson , Sean Paul , Robin Murphy MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "Linux-mediatek" Errors-To: linux-mediatek-bounces+linux-mediatek=archiver.kernel.org@lists.infradead.org From: Joerg Roedel Make use of dev_iommu_priv_set/get() functions. Signed-off-by: Joerg Roedel --- drivers/iommu/qcom_iommu.c | 61 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 4328da0b0a9f..80147f82d427 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) static const struct iommu_ops qcom_iommu_ops; -static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) +static struct qcom_iommu_dev * to_iommu(struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + if (!fwspec || fwspec->ops != &qcom_iommu_ops) return NULL; - return fwspec->iommu_priv; + + return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct iommu_fwspec *fwspec; + struct device *dev = cookie; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) static int qcom_iommu_init_domain(struct iommu_domain *domain, struct qcom_iommu_dev *qcom_iommu, - struct iommu_fwspec *fwspec) + struct device *dev) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_cfg pgtbl_cfg; int i, ret = 0; @@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); int ret; @@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev /* Ensure that the domain is finalized */ pm_runtime_get_sync(qcom_iommu->dev); - ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); + ret = qcom_iommu_init_domain(domain, qcom_iommu, dev); pm_runtime_put_sync(qcom_iommu->dev); if (ret < 0) return ret; @@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); unsigned i; if (WARN_ON(!qcom_domain->iommu)) @@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); @@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap) static int qcom_iommu_add_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct iommu_group *group; struct device_link *link; @@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev) static void qcom_iommu_remove_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return; @@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev) static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct qcom_iommu_dev *qcom_iommu; struct platform_device *iommu_pdev; unsigned asid = args->args[0]; @@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) WARN_ON(asid > qcom_iommu->num_ctxs)) return -EINVAL; - if (!fwspec->iommu_priv) { - fwspec->iommu_priv = qcom_iommu; + if (dev_iommu_priv_get(dev)) { + dev_iommu_priv_set(dev, qcom_iommu); } else { /* make sure devices iommus dt node isn't referring to * multiple different iommu devices. Multiple context * banks are ok, but multiple devices are not: */ - if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) + if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) return -EINVAL; } -- 2.17.1 _______________________________________________ Linux-mediatek mailing list Linux-mediatek@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-mediatek