All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrii Tseglytskyi <andrii.tseglytskyi@globallogic.com>
To: Ian Campbell <ian.campbell@citrix.com>,
	Stefano Stabellini <stefano.stabellini@citrix.com>,
	Julien Grall <julien.grall@linaro.org>,
	xen-devel@lists.xen.org
Subject: [PATCH v03 06/10] arm: omap: introduce iommu translation for GPU remoteproc
Date: Tue,  2 Sep 2014 18:46:06 +0300	[thread overview]
Message-ID: <1409672770-23164-7-git-send-email-andrii.tseglytskyi@globallogic.com> (raw)
In-Reply-To: <1409672770-23164-1-git-send-email-andrii.tseglytskyi@globallogic.com>

The following patch introduced platform specific MMU data
definitions and pagetable translation function for OMAP5 GPU
remoteproc. Typically GPU MMU performs uses two level address
translation, so algorithm is quite straightforward here -
pagetables are enumerated and all pfns are updated with
corresponding mfns.

Current patch adds functionality, needed for proper handling of
GPU MMU, which is very similar to existing IPU/DSP MMUs.

Signed-off-by: Andrii Tseglytskyi <andrii.tseglytskyi@globallogic.com>
---
 xen/arch/arm/remoteproc/omap_iommu.c       | 107 +++++++++++++++++++++++++++++
 xen/arch/arm/remoteproc/remoteproc_iommu.c |   1 +
 xen/include/asm-arm/remoteproc_iommu.h     |   1 +
 3 files changed, 109 insertions(+)

diff --git a/xen/arch/arm/remoteproc/omap_iommu.c b/xen/arch/arm/remoteproc/omap_iommu.c
index 8ed6d0b..f00bfc6 100644
--- a/xen/arch/arm/remoteproc/omap_iommu.c
+++ b/xen/arch/arm/remoteproc/omap_iommu.c
@@ -32,12 +32,23 @@
 /* register where address of pagetable is stored */
 #define MMU_IPU_TTB_OFFSET          0x4c
 
+#define MMU_GPU_TTB_OFFSET_00		0xc84
+#define MMU_GPU_TTB_OFFSET_01		0xc38
+#define MMU_GPU_TTB_OFFSET_02		0xc3c
+#define MMU_GPU_TTB_OFFSET_03		0xc40
+#define MMU_GPU_TTB_OFFSET_04		0xc44
+#define MMU_GPU_TTB_OFFSET_05		0xc48
+#define MMU_GPU_TTB_OFFSET_06		0xc4c
+#define MMU_GPU_TTB_OFFSET_07		0xc50
+
 /* 1st level translation */
 #define MMU_OMAP_PGD_SHIFT          20
 #define MMU_OMAP_SUPER_SHIFT        24	/* "supersection" - 16 Mb */
 #define MMU_OMAP_SECTION_SHIFT      20	/* "section"  - 1 Mb */
 #define MMU_OMAP_SECOND_LEVEL_SHIFT 10
 
+#define MMU_GPU_PGD_SHIFT			22	/* SGX section */
+
 /* 2nd level translation */
 #define MMU_OMAP_PTE_SMALL_SHIFT    12	/* "small page" - 4Kb */
 #define MMU_OMAP_PTE_LARGE_SHIFT    16	/* "large page" - 64 Kb */
@@ -57,15 +68,28 @@
 #define IPU_PTE_LARGE       (1 << 0)
 
 #define	OMAP_IPU_MMU_MEM_BASE   0x55082000
+#define	OMAP_GPU_MMU_MEM_BASE	0x56000000
 
 static int mmu_omap_copy_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt);
 
 static paddr_t mmu_ipu_translate_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt);
+static paddr_t mmu_gpu_translate_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt);
 
 static u32 ipu_trap_offsets[] = {
     MMU_IPU_TTB_OFFSET,
 };
 
+static u32 sgx_trap_offsets[] = {
+    MMU_GPU_TTB_OFFSET_00,
+    MMU_GPU_TTB_OFFSET_01,
+    MMU_GPU_TTB_OFFSET_02,
+    MMU_GPU_TTB_OFFSET_03,
+    MMU_GPU_TTB_OFFSET_04,
+    MMU_GPU_TTB_OFFSET_05,
+    MMU_GPU_TTB_OFFSET_06,
+    MMU_GPU_TTB_OFFSET_07,
+};
+
 static const struct pagetable_data pagetable_ipu_data = {
     .pgd_shift          = MMU_OMAP_PGD_SHIFT,
     .super_shift        = MMU_OMAP_SUPER_SHIFT,
@@ -85,6 +109,24 @@ struct mmu_info omap_ipu_mmu = {
     .translate_pfunc	= mmu_ipu_translate_pagetable,
 };
 
+static const struct pagetable_data pagetable_gpu_data = {
+    .pgd_shift      = MMU_GPU_PGD_SHIFT,
+    .super_shift    = MMU_GPU_PGD_SHIFT,
+    .section_shift  = MMU_GPU_PGD_SHIFT,
+    .pte_shift      = MMU_OMAP_PTE_SMALL_SHIFT,	/* the same as IPU */
+};
+
+struct mmu_info omap_gpu_mmu = {
+    .name           = "SGX_L2_MMU",
+    .pg_data        = &pagetable_gpu_data,
+    .trap_offsets   = sgx_trap_offsets,
+    .mem_start      = OMAP_GPU_MMU_MEM_BASE,
+    .mem_size       = 0x1000,
+    .num_traps      = ARRAY_SIZE(sgx_trap_offsets),
+    .copy_pagetable_pfunc	= mmu_omap_copy_pagetable,
+    .translate_pfunc    = mmu_gpu_translate_pagetable,
+};
+
 static bool translate_supersections_to_pages = true;
 static bool translate_sections_to_pages = true;
 
@@ -315,6 +357,71 @@ static paddr_t mmu_ipu_translate_pagetable(struct mmu_info *mmu, struct mmu_page
     return __pa(hyp_pgt);
 }
 
+static paddr_t mmu_gpu_translate_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt)
+{
+    /* GPU pagetable consists of set of 32 bit pointers */
+    u32 *kern_pgt, *hyp_pgt;
+    u32 i;
+
+    ASSERT(mmu);
+    ASSERT(pgt);
+
+    kern_pgt = pgt->kern_pagetable;
+    hyp_pgt = pgt->hyp_pagetable;
+    pgt->page_counter = 0;
+
+    /* 1-st level translation */
+    for ( i = 0; i < MMU_PTRS_PER_PGD(mmu); i++ )
+    {
+        paddr_t pd_maddr, pd_paddr, pd_flags, pgd;
+        u32 pd_mask = MMU_SECTION_MASK(mmu->pg_data->pte_shift);
+        int res;
+
+        pgd = kern_pgt[i];
+        if ( !pgd )
+        {
+            /* handle the case when second level translation table
+             * was removed from kernel */
+            if ( unlikely(hyp_pgt[i]) )
+            {
+                guest_physmap_unpin_range(current->domain,
+                            (hyp_pgt[i] & pd_mask) >> PAGE_SHIFT, 0);
+                xfree(__va(hyp_pgt[i] & pd_mask));
+                hyp_pgt[i] = 0;
+            }
+            continue;
+        }
+
+        pd_paddr = pgd & pd_mask;
+        pd_flags = pgd & ~pd_mask;
+        pd_maddr = p2m_lookup(current->domain, pd_paddr, NULL);
+
+        if ( INVALID_PADDR == pd_maddr )
+        {
+            pr_mmu(mmu, "failed to lookup paddr 0x%"PRIpaddr"", pd_paddr);
+            return INVALID_PADDR;
+        }
+
+        if ( !guest_physmap_pinned_range(current->domain, pd_maddr >> PAGE_SHIFT, 0) )
+        {
+            res = guest_physmap_pin_range(current->domain, pd_maddr >> PAGE_SHIFT, 0);
+            if ( res )
+            {
+                pr_mmu(mmu, "can't pin page pfn 0x%"PRIpaddr" mfn 0x%"PRIpaddr" res %d",
+                       pd_paddr, pd_maddr, res);
+                return INVALID_PADDR;
+            }
+        }
+
+        /* 2-nd level translation */
+        hyp_pgt[i] = remoteproc_iommu_translate_second_level(mmu, pgt, pd_maddr, hyp_pgt[i]);
+        hyp_pgt[i] |= pd_flags;
+    }
+
+    clean_and_invalidate_xen_dcache_va_range(hyp_pgt, MMU_PGD_TABLE_SIZE(mmu));
+    return __pa(hyp_pgt);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/remoteproc/remoteproc_iommu.c b/xen/arch/arm/remoteproc/remoteproc_iommu.c
index a2cae25..c691619 100644
--- a/xen/arch/arm/remoteproc/remoteproc_iommu.c
+++ b/xen/arch/arm/remoteproc/remoteproc_iommu.c
@@ -33,6 +33,7 @@
 
 static struct mmu_info *mmu_list[] = {
     &omap_ipu_mmu,
+    &omap_gpu_mmu,
 };
 
 #define mmu_for_each(pfunc, data)                       \
diff --git a/xen/include/asm-arm/remoteproc_iommu.h b/xen/include/asm-arm/remoteproc_iommu.h
index e581fc3..4983505 100644
--- a/xen/include/asm-arm/remoteproc_iommu.h
+++ b/xen/include/asm-arm/remoteproc_iommu.h
@@ -80,5 +80,6 @@ paddr_t remoteproc_iommu_translate_second_level(struct mmu_info *mmu,
                                                  paddr_t maddr, paddr_t hyp_addr);
 
 extern struct mmu_info omap_ipu_mmu;
+extern struct mmu_info omap_gpu_mmu;
 
 #endif /* _REMOTEPROC_IOMMU_H_ */
-- 
1.9.1

  parent reply	other threads:[~2014-09-02 15:46 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-02 15:46 [PATCH v03 00/10] arm: introduce remoteprocessor iommu module Andrii Tseglytskyi
2014-09-02 15:46 ` [PATCH v03 01/10] xen: implement guest_physmap_pin_range Andrii Tseglytskyi
2014-09-03  9:43   ` Jan Beulich
2014-09-11  1:12   ` Julien Grall
2014-09-02 15:46 ` [PATCH v03 02/10] domctl: introduce access_remote_pagetable call Andrii Tseglytskyi
2014-09-03  9:46   ` Jan Beulich
2014-09-02 15:46 ` [PATCH v03 03/10] xsm: arm: create domU_rpc_t security label Andrii Tseglytskyi
2014-09-02 15:46 ` [PATCH v03 04/10] arm: introduce remoteprocessor iommu module Andrii Tseglytskyi
2014-09-11  0:41   ` Julien Grall
2014-09-02 15:46 ` [PATCH v03 05/10] arm: omap: introduce iommu translation for IPU remoteproc Andrii Tseglytskyi
2014-09-02 15:46 ` Andrii Tseglytskyi [this message]
2014-09-02 15:46 ` [PATCH v03 07/10] arm: introduce remoteproc_mmu_translate_pagetable mem subops call Andrii Tseglytskyi
2014-09-03  9:48   ` Jan Beulich
2014-09-13  0:04   ` Stefano Stabellini
2014-09-02 15:46 ` [PATCH v03 08/10] arm: add trap for remoteproc mmio accesses Andrii Tseglytskyi
2014-09-03  9:52   ` Jan Beulich
2014-09-02 15:46 ` [PATCH v03 09/10] arm: omap: introduce print pagetable function for IPU remoteproc Andrii Tseglytskyi
2014-09-02 15:46 ` [PATCH v03 10/10] arm: omap: introduce print pagetable function for GPU remoteproc Andrii Tseglytskyi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409672770-23164-7-git-send-email-andrii.tseglytskyi@globallogic.com \
    --to=andrii.tseglytskyi@globallogic.com \
    --cc=ian.campbell@citrix.com \
    --cc=julien.grall@linaro.org \
    --cc=stefano.stabellini@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.