All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-15 13:36   ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr Ian Campbell
                   ` (46 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Everything is DTB based and on 64-bit there is no such concept even in
Linux.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Update start_secondary too.
---
 xen/arch/arm/arm32/head.S |    7 +++----
 xen/arch/arm/setup.c      |    1 -
 xen/arch/arm/smpboot.c    |    1 -
 3 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 20e9da6..92fc36c 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -72,7 +72,7 @@ past_zImage:
         cpsid aif                    /* Disable all interrupts */
 
         /* Save the bootloader arguments in less-clobberable registers */
-        mov   r7, r1                 /* r7 := ARM-linux machine type */
+        /* No need to save r1 == Unused ARM-linux machine type */
         mov   r8, r2                 /* r8 := ATAG base address */
 
         /* Find out where we are */
@@ -334,9 +334,8 @@ launch:
         add   sp, #STACK_SIZE        /* (which grows down from the top). */
         sub   sp, #CPUINFO_sizeof    /* Make room for CPU save record */
         mov   r0, r10                /* Marshal args: - phys_offset */
-        mov   r1, r7                 /*               - machine type */
-        mov   r2, r8                 /*               - ATAG address */
-        movs  r3, r12                /*               - CPU ID */
+        mov   r1, r8                 /*               - ATAG address */
+        movs  r2, r12                /*               - CPU ID */
         beq   start_xen              /* and disappear into the land of C */
         b     start_secondary        /* (to the appropriate entry point) */
 
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index acb7abb..782d252 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -329,7 +329,6 @@ void __init setup_cache(void)
 
 /* C entry point for boot CPU */
 void __init start_xen(unsigned long boot_phys_offset,
-                      unsigned long arm_type,
                       unsigned long atag_paddr,
                       unsigned long cpuid)
 {
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index c7a586b..da4880c 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -132,7 +132,6 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset)
 
 /* Boot the current CPU */
 void __cpuinit start_secondary(unsigned long boot_phys_offset,
-                               unsigned long arm_type,
                                unsigned long atag_paddr,
                                unsigned long cpuid)
 {
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-15 13:36   ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0 Ian Campbell
                   ` (45 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

We don't support ATAGs and this is always actually an FDT address.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: Update start_secondary too, s/ATAG/DTB in head.S comments
---
 xen/arch/arm/arm32/head.S |    4 ++--
 xen/arch/arm/setup.c      |    6 +++---
 xen/arch/arm/smpboot.c    |    2 +-
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 92fc36c..5ec46c3 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -73,7 +73,7 @@ past_zImage:
 
         /* Save the bootloader arguments in less-clobberable registers */
         /* No need to save r1 == Unused ARM-linux machine type */
-        mov   r8, r2                 /* r8 := ATAG base address */
+        mov   r8, r2                 /* r8 := DTB base address */
 
         /* Find out where we are */
         ldr   r0, =start
@@ -334,7 +334,7 @@ launch:
         add   sp, #STACK_SIZE        /* (which grows down from the top). */
         sub   sp, #CPUINFO_sizeof    /* Make room for CPU save record */
         mov   r0, r10                /* Marshal args: - phys_offset */
-        mov   r1, r8                 /*               - ATAG address */
+        mov   r1, r8                 /*               - DTB address */
         movs  r2, r12                /*               - CPU ID */
         beq   start_xen              /* and disappear into the land of C */
         b     start_secondary        /* (to the appropriate entry point) */
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 782d252..4e50b2b 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -329,7 +329,7 @@ void __init setup_cache(void)
 
 /* C entry point for boot CPU */
 void __init start_xen(unsigned long boot_phys_offset,
-                      unsigned long atag_paddr,
+                      unsigned long fdt_paddr,
                       unsigned long cpuid)
 {
     void *fdt;
@@ -341,7 +341,7 @@ void __init start_xen(unsigned long boot_phys_offset,
     smp_clear_cpu_maps();
 
     fdt = (void *)BOOT_MISC_VIRT_START
-        + (atag_paddr & ((1 << SECOND_SHIFT) - 1));
+        + (fdt_paddr & ((1 << SECOND_SHIFT) - 1));
     fdt_size = device_tree_early_init(fdt);
 
     cpus = smp_get_max_cpus();
@@ -365,7 +365,7 @@ void __init start_xen(unsigned long boot_phys_offset,
     set_current((struct vcpu *)0xfffff000); /* debug sanity */
     idle_vcpu[0] = current;
 
-    setup_mm(atag_paddr, fdt_size);
+    setup_mm(fdt_paddr, fdt_size);
 
     /* Setup Hyp vector base */
     WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index da4880c..60be1a4 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -132,7 +132,7 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset)
 
 /* Boot the current CPU */
 void __cpuinit start_secondary(unsigned long boot_phys_offset,
-                               unsigned long atag_paddr,
+                               unsigned long fdt_paddr,
                                unsigned long cpuid)
 {
     memset(get_cpu_info(), 0, sizeof (struct cpu_info));
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-15 13:37   ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev Ian Campbell
                   ` (44 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Xen relies on DTB and we pass in a suitable device-tree so we don't
need to (and shouldn't) pretend to be a Versatile Express here.

We already don't pass a machine ID to domU in the same way.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/domain_build.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 6abbb03..7403f1a 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -349,7 +349,7 @@ int construct_dom0(struct domain *d)
  */
 
     regs->r0 = 0; /* SBZ */
-    regs->r1 = 2272; /* Machine NR: Versatile Express */
+    regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */
     regs->r2 = kinfo.dtb_paddr;
 
     WRITE_CP32(SCTLR_BASE, SCTLR);
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (2 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0 Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 14:51   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code Ian Campbell
                   ` (43 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

"dsb" must be written "dsb sy" on arm64. "dsb sy" is also valid (and
synonymous) on arm32 but we have a macro so lets use it.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/arch/arm/domain.c  |    5 ++++-
 xen/arch/arm/smpboot.c |   10 ++++++----
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index e37ec54..e7d3ec6 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -29,7 +29,10 @@ void idle_loop(void)
 
         local_irq_disable();
         if ( cpu_is_haltable(smp_processor_id()) )
-            asm volatile ("dsb; wfi");
+        {
+            dsb();
+            wfi();
+        }
         local_irq_enable();
 
         do_tasklet();
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index 60be1a4..86379b7 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -122,7 +122,8 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset)
         /* TODO: handle boards where CPUIDs are not contiguous */
         *gate = i;
         flush_xen_dcache(*gate);
-        asm volatile("isb; sev");
+        isb();
+        sev();
         /* And wait for it to respond */
         while ( ready_cpus < i )
             smp_rmb();
@@ -204,8 +205,8 @@ void stop_cpu(void)
     /* Make sure the write happens before we sleep forever */
     dsb();
     isb();
-    while ( 1 ) 
-        asm volatile("wfi");
+    while ( 1 )
+        wfi();
 }
 
 /* Bring up a remote CPU */
@@ -220,7 +221,8 @@ int __cpu_up(unsigned int cpu)
     /* we need to make sure that the change to smp_up_cpu is visible to
      * secondary cpus with D-cache off */
     flush_xen_dcache(smp_up_cpu);
-    asm volatile("isb; sev");
+    isb();
+    sev();
 
     while ( !cpu_online(cpu) )
     {
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (3 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 14:56   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 06/46] xen: arm64: basic config and types headers Ian Campbell
                   ` (42 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of
      defining in head.S
    - Nuke hard tabs in head.S and mode_switch.S with expand(1)
---
 Config.mk                        |    2 +-
 config/arm64.mk                  |   12 ++
 xen/arch/arm/Makefile            |    1 +
 xen/arch/arm/Rules.mk            |    6 +
 xen/arch/arm/arm64/Makefile      |    1 +
 xen/arch/arm/arm64/head.S        |  394 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/arm64/mode_switch.S |   83 ++++++++
 xen/arch/arm/xen.lds.S           |    8 +-
 xen/include/asm-arm/page.h       |    1 +
 xen/include/public/arch-arm.h    |   14 ++
 xen/include/public/hvm/save.h    |    2 +-
 xen/include/public/xen.h         |    2 +-
 xen/include/xen/libelf.h         |    2 +-
 13 files changed, 523 insertions(+), 5 deletions(-)
 create mode 100644 config/arm64.mk
 create mode 100644 xen/arch/arm/arm64/Makefile
 create mode 100644 xen/arch/arm/arm64/head.S
 create mode 100644 xen/arch/arm/arm64/mode_switch.S

diff --git a/Config.mk b/Config.mk
index 64541c8..ea64925 100644
--- a/Config.mk
+++ b/Config.mk
@@ -15,7 +15,7 @@ debug_symbols ?= $(debug)
 
 XEN_COMPILE_ARCH    ?= $(shell uname -m | sed -e s/i.86/x86_32/ \
                          -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \
-                         -e s/armv7.*/arm32/)
+                         -e s/armv7.*/arm32/ -e s/armv8.*/arm64/)
 
 XEN_TARGET_ARCH     ?= $(XEN_COMPILE_ARCH)
 XEN_OS              ?= $(shell uname -s)
diff --git a/config/arm64.mk b/config/arm64.mk
new file mode 100644
index 0000000..b2457eb
--- /dev/null
+++ b/config/arm64.mk
@@ -0,0 +1,12 @@
+CONFIG_ARM := y
+CONFIG_ARM_64 := y
+CONFIG_ARM_$(XEN_OS) := y
+
+CFLAGS += #-marm -march= -mcpu= etc
+
+HAS_PL011 := y
+
+# Use only if calling $(LD) directly.
+LDFLAGS_DIRECT += -maarch64elf
+
+CONFIG_LOAD_ADDRESS ?= 0x80000000
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index f2822f2..7ff67c7 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -1,4 +1,5 @@
 subdir-$(arm32) += arm32
+subdir-$(arm64) += arm64
 
 obj-y += early_printk.o
 obj-y += domain.o
diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk
index 5b5768a..29b605d 100644
--- a/xen/arch/arm/Rules.mk
+++ b/xen/arch/arm/Rules.mk
@@ -26,6 +26,12 @@ arm32 := y
 arm64 := n
 endif
 
+ifeq ($(TARGET_SUBARCH),arm64)
+CFLAGS += -mcpu=generic
+arm32 := n
+arm64 := y
+endif
+
 ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n)
 CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE
 endif
diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
new file mode 100644
index 0000000..dffbeb1
--- /dev/null
+++ b/xen/arch/arm/arm64/Makefile
@@ -0,0 +1 @@
+obj-y += mode_switch.o
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
new file mode 100644
index 0000000..847043b
--- /dev/null
+++ b/xen/arch/arm/arm64/head.S
@@ -0,0 +1,394 @@
+/*
+ * xen/arch/arm/head.S
+ *
+ * Start-of-day code for an ARMv8.
+ *
+ * Ian Campbell <ian.campbell@citrix.com>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * Based on ARMv7-A head.S by
+ * Tim Deegan <tim@xen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/config.h>
+#include <asm/page.h>
+#include <asm/asm_defns.h>
+
+#define PT_PT     0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */
+#define PT_MEM    0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
+#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
+
+/* Macro to print a string to the UART, if there is one.
+ * Clobbers r0-r3. */
+#ifdef EARLY_UART_ADDRESS
+#define PRINT(_s)       \
+        adr   x0, 98f ; \
+        bl    puts    ; \
+        b     99f     ; \
+98:     .asciz _s     ; \
+        .align 2      ; \
+99:
+#else
+#define PRINT(s)
+#endif
+
+        /*.aarch64*/
+
+        /*
+         * Kernel startup entry point.
+         * ---------------------------
+         *
+         * The requirements are:
+         *   MMU = off, D-cache = off, I-cache = on or off,
+         *   x0 = physical address to the FDT blob.
+         *
+         * This must be the very first address in the loaded image.
+         * It should be linked at XEN_VIRT_START, and loaded at any
+         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
+         * or the initial pagetable code below will need adjustment.
+         */
+
+        .global start
+start:
+        /*
+         * DO NOT MODIFY. Image header expected by Linux boot-loaders.
+         */
+        b       real_start           /* branch to kernel start, magic */
+        .long   0                    /* reserved */
+        .quad   0                    /* Image load offset from start of RAM */
+        .quad   0                    /* reserved */
+        .quad   0                    /* reserved */
+
+real_start:
+        msr   DAIFSet, 0xf           /* Disable all interrupts */
+
+        /* Save the bootloader arguments in less-clobberable registers */
+        mov   x21, x0                /* x21 := DTB, physical address  */
+
+        /* Find out where we are */
+        ldr   x0, =start
+        adr   x19, start             /* x19 := paddr (start) */
+        sub   x20, x19, x0           /* x20 := phys-offset */
+
+        /* Using the DTB in the .dtb section? */
+#ifdef CONFIG_DTB_FILE
+        ldr   x21, =_sdtb
+        add   x21, x21, x20          /* x21 := paddr(DTB) */
+#endif
+
+        /* Are we the boot CPU? */
+        mov   x22, #0                /* x22 := CPU ID */
+        mrs   x0, mpidr_el1
+        tbz   x0, 31, boot_cpu       /* Multiprocessor extension supported? */
+        tbnz  x0, 30, boot_cpu       /* Uniprocessor system? */
+
+        mov   x13, #(0xff << 24)
+        bics  x22, x0, x13           /* Mask out flags to get CPU ID */
+        b.eq  boot_cpu               /* If we're CPU 0, boot now */
+
+        /* Non-boot CPUs wait here to be woken up one at a time. */
+1:      dsb   sy
+        ldr   x0, =smp_up_cpu        /* VA of gate */
+        add   x0, x0, x20            /* PA of gate */
+        ldr   x1, [x0]               /* Which CPU is being booted? */
+        cmp   x1, x22                /* Is it us? */
+        b.eq  2f
+        wfe
+        b     1b
+2:
+
+boot_cpu:
+#ifdef EARLY_UART_ADDRESS
+        ldr   x23, =EARLY_UART_ADDRESS  /* x23 := UART base address */
+        cbnz  x22, 1f
+        bl    init_uart                 /* CPU 0 sets up the UART too */
+1:      PRINT("- CPU ")
+        mov   x0, x22
+        bl    putn
+        PRINT(" booting -\r\n")
+#endif
+
+        PRINT("- Current EL ")
+        mrs   x0, CurrentEL
+        bl    putn
+        PRINT(" -\r\n")
+
+        /* Are we in EL3 */
+        mrs   x0, CurrentEL
+        cmp   x0, #PSR_MODE_EL3t
+        ccmp  x0, #PSR_MODE_EL3h, #0x4, ne
+        b.eq  1f /* Yes */
+
+        /* Are we in EL2 */
+        cmp   x0, #PSR_MODE_EL2t
+        ccmp  x0, #PSR_MODE_EL2h, #0x4, ne
+        b.eq  2f /* Yes */
+
+        /* Otherwise, it must have been EL0 or EL1 */
+        PRINT("- CPU is not in EL3 or EL2 -\r\n")
+        b     fail
+
+1:      PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n")
+        ldr   x1, =enter_el2_mode    /* VA of function */
+        add   x1, x1, x20            /* PA of function */
+        adr   x30, hyp               /* Set return address for call */
+        br    x1                     /* Call function */
+
+2:      PRINT("- Started in Hyp mode -\r\n")
+
+hyp:
+        /* Zero BSS On the boot CPU to avoid nasty surprises */
+        cbnz  x22, skip_bss
+
+        PRINT("- Zero BSS -\r\n")
+        ldr   x0, =__bss_start       /* Load start & end of bss */
+        ldr   x1, =__bss_end
+        add   x0, x0, x20            /* Apply physical offset */
+        add   x1, x1, x20
+
+1:      str   xzr, [x0], #8
+        cmp   x0, x1
+        b.lo  1b
+
+skip_bss:
+
+        PRINT("- Setting up control registers -\r\n")
+
+        /* Set up memory attribute type tables */
+        ldr   x0, =MAIRVAL
+        msr   mair_el2, x0
+
+        /* Set up the HTCR:
+         * PASize -- 4G
+         * Top byte is used
+         * PT walks use Outer-Shareable accesses,
+         * PT walks are write-back, no-write-allocate in both cache levels,
+         * Full 64-bit address space goes through this table. */
+        ldr   x0, =0x80802500
+        msr   tcr_el2, x0
+
+        /* Set up the HSCTLR:
+         * Exceptions in LE ARM,
+         * Low-latency IRQs disabled,
+         * Write-implies-XN disabled (for now),
+         * D-cache disabled (for now),
+         * I-cache enabled,
+         * Alignment checking enabled,
+         * MMU translation disabled (for now). */
+        ldr   x0, =(HSCTLR_BASE|SCTLR_A)
+        msr   SCTLR_EL2, x0
+
+        /* Write Xen's PT's paddr into the HTTBR */
+        ldr   x4, =xen_pgtable
+        add   x4, x4, x20            /* x4 := paddr (xen_pagetable) */
+        msr   TTBR0_EL2, x4
+
+        /* Non-boot CPUs don't need to rebuild the pagetable */
+        cbnz  x22, pt_ready
+
+        ldr   x1, =xen_first
+        add   x1, x1, x20            /* x1 := paddr (xen_first) */
+        mov   x3, #PT_PT             /* x2 := table map of xen_first */
+        orr   x2, x1, x3             /* (+ rights for linear PT) */
+        str   x2, [x4, #0]           /* Map it in slot 0 */
+
+        mov   x4, x1                 /* Next level into xen_first */
+
+       /* console fixmap */
+#ifdef EARLY_UART_ADDRESS
+        ldr   x1, =xen_fixmap
+        add   x1, x1, x20            /* x1 := paddr (xen_fixmap) */
+        lsr   x2, x23, #12
+        lsl   x2, x2, #12            /* 4K aligned paddr of UART */
+        mov   x3, #PT_DEV_L3
+        orr   x2, x2, x3             /* x2 := 4K dev map including UART */
+        str   x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap's slot */
+#endif
+
+        /* Build the baseline idle pagetable's first-level entries */
+        ldr   x1, =xen_second
+        add   x1, x1, x20            /* x1 := paddr (xen_second) */
+        mov   x3, #PT_PT             /* x2 := table map of xen_second */
+        orr   x2, x1, x3             /* (+ rights for linear PT) */
+        str   x2, [x4, #0]           /* Map it in slot 0 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #8]           /* Map 2nd page in slot 1 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #16]          /* Map 3rd page in slot 2 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #24]          /* Map 4th page in slot 3 */
+
+        /* Now set up the second-level entries */
+        mov   x3, #PT_MEM
+        orr   x2, x19, x3            /* x2 := 2MB normal map of Xen */
+        orr   x4, xzr, x19, lsr #18
+        str   x2, [x1, x4]           /* Map Xen there */
+        ldr   x4, =start
+        lsr   x4, x4, #18            /* Slot for vaddr(start) */
+        str   x2, [x1, x4]           /* Map Xen there too */
+
+        /* xen_fixmap pagetable */
+        ldr   x2, =xen_fixmap
+        add   x2, x2, x20            /* x2 := paddr (xen_fixmap) */
+        mov   x3, #PT_PT
+        orr   x2, x2, x3             /* x2 := table map of xen_fixmap */
+        add   x4, x4, #8
+        str   x2, [x1, x4]           /* Map it in the fixmap's slot */
+
+        lsr   x2, x21, #21
+        lsl   x2, x2, #21            /* 2MB-aligned paddr of DTB */
+        mov   x3, #PT_MEM            /* x2 := 2MB RAM incl. DTB */
+        orr   x2, x2, x3
+        add   x4, x4, #8
+        str   x2, [x1, x4]           /* Map it in the early boot slot */
+
+pt_ready:
+        PRINT("- Turning on paging -\r\n")
+
+        ldr   x1, =paging            /* Explicit vaddr, not RIP-relative */
+        mrs   x0, SCTLR_EL2
+        orr   x0, x0, #SCTLR_M       /* Enable MMU */
+        orr   x0, x0, #SCTLR_C       /* Enable D-cache */
+        dsb   sy                     /* Flush PTE writes and finish reads */
+        msr   SCTLR_EL2, x0          /* now paging is enabled */
+        isb                          /* Now, flush the icache */
+        br    x1                     /* Get a proper vaddr into PC */
+paging:
+
+#ifdef EARLY_UART_ADDRESS
+        /* Use a virtual address to access the UART. */
+        ldr   x23, =FIXMAP_ADDR(FIXMAP_CONSOLE)
+#endif
+
+        PRINT("- Ready -\r\n")
+
+        /* The boot CPU should go straight into C now */
+        cbz   x22, launch
+
+        /* Non-boot CPUs need to move on to the relocated pagetables */
+        //mov   x0, #0
+        ldr   x4, =boot_ttbr         /* VA of TTBR0_EL2 stashed by CPU 0 */
+        add   x4, x4, x20            /* PA of it */
+        ldr   x4, [x4]               /* Actual value */
+        dsb   sy
+        msr   TTBR0_EL2, x4
+        dsb   sy
+        isb
+        tlbi  alle2
+        dsb   sy                     /* Ensure completion of TLB flush */
+        isb
+
+        /* Non-boot CPUs report that they've got this far */
+        ldr   x0, =ready_cpus
+1:      ldaxr x1, [x0]               /*            { read # of ready CPUs } */
+        add   x1, x1, #1             /* Atomically { ++                   } */
+        stlxr w2, x1, [x0]           /*            { writeback            } */
+        cbnz  w2, 1b
+        dsb   sy
+        dc    cvac, x0               /* Flush D-Cache */
+        dsb   sy
+
+        /* Here, the non-boot CPUs must wait again -- they're now running on
+         * the boot CPU's pagetables so it's safe for the boot CPU to
+         * overwrite the non-relocated copy of Xen.  Once it's done that,
+         * and brought up the memory allocator, non-boot CPUs can get their
+         * own stacks and enter C. */
+1:      wfe
+        dsb   sy
+        ldr   x0, =smp_up_cpu
+        ldr   x1, [x0]               /* Which CPU is being booted? */
+        cmp   x1, x12                /* Is it us? */
+        b.ne  1b
+
+launch:
+        ldr   x0, =init_stack        /* Find the boot-time stack */
+        ldr   x0, [x0]
+        add   x0, x0, #STACK_SIZE    /* (which grows down from the top). */
+        sub   x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */
+        mov   sp, x0
+
+        mov   x0, x20                /* Marshal args: - phys_offset */
+        mov   x1, x21                /*               - FDT */
+        mov   x2, x22                /*               - CPU ID */
+        cbz   x22, start_xen         /* and disappear into the land of C */
+        b     start_secondary        /* (to the appropriate entry point) */
+
+/* Fail-stop
+ * r0: string explaining why */
+fail:   PRINT("- Boot failed -\r\n")
+1:      wfe
+        b     1b
+
+#ifdef EARLY_UART_ADDRESS
+
+/* Bring up the UART. Specific to the PL011 UART.
+ * Clobbers r0-r2 */
+init_uart:
+        mov   x1, #0x0
+        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor fraction) */
+        mov   x1, #0x4               /* 7.3728MHz / 0x4 == 16 * 115200 */
+        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor integer) */
+        mov   x1, #0x60              /* 8n1 */
+        strh  w1, [x23, #0x24]       /* -> UARTLCR_H (Line control) */
+        ldr   x1, =0x00000301        /* RXE | TXE | UARTEN */
+        strh  w1, [x23, #0x30]       /* -> UARTCR (Control Register) */
+        adr   x0, 1f
+        b     puts
+1:      .asciz "- UART enabled -\r\n"
+        .align 4
+
+/* Print early debug messages.  Specific to the PL011 UART.
+ * r0: Nul-terminated string to print.
+ * Clobbers r0-r2 */
+puts:
+        ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
+        tst   w2, #0x8               /* Check BUSY bit */
+        b.ne  puts                   /* Wait for the UART to be ready */
+        ldrb  w2, [x0], #1           /* Load next char */
+        cbz   w2, 1f                 /* Exit on nul */
+        str   w2, [x23]              /* -> UARTDR (Data Register) */
+        b     puts
+1:
+        ret
+
+/* Print a 32-bit number in hex.  Specific to the PL011 UART.
+ * r0: Number to print.
+ * clobbers r0-r3 */
+putn:
+        adr   x1, hex
+        mov   x3, #8
+1:      ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
+        tst   w2, #0x8               /* Check BUSY bit */
+        b.ne  1b                     /* Wait for the UART to be ready */
+        and   x2, x0, #0xf0000000    /* Mask off the top nybble */
+        lsr   x2, x2, #28
+        ldrb  w2, [x1, x2]           /* Convert to a char */
+        strb  w2, [x23]              /* -> UARTDR (Data Register) */
+        lsl   x0, x0, #4             /* Roll it through one nybble at a time */
+        subs  x3, x3, #1
+        b.ne  1b
+        ret
+
+hex:    .ascii "0123456789abcdef"
+        .align 2
+
+#else  /* EARLY_UART_ADDRESS */
+
+init_uart:
+.global early_puts
+early_puts:
+puts:
+putn:   mov   pc, lr
+
+#endif /* EARLY_UART_ADDRESS */
diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S
new file mode 100644
index 0000000..d1f66e5
--- /dev/null
+++ b/xen/arch/arm/arm64/mode_switch.S
@@ -0,0 +1,83 @@
+/*
+ * xen/arch/arm/arm64/mode_switch.S
+ *
+ * Start-of day code to take a CPU from EL3 to EL2. Largely taken from
+        bootwrapper.
+ *
+ * Ian Campbell <ian.campbell@citrix.com>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/config.h>
+#include <asm/page.h>
+#include <asm/asm_defns.h>
+
+/* Get up a CPU into EL2.  Clobbers x0-x3.
+ *
+ * Expects x22 == CPU number
+ * Expects x30  == EL2 entry point
+ *
+ * This code is specific to the VE model, and not intended to be used
+ * on production systems.  As such it's a bit hackier than the main
+ * boot code in head.S.  In future it will be replaced by better
+ * integration with the bootloader/firmware so that Xen always starts
+ * at EL2.
+ */
+
+.globl enter_el2_mode
+enter_el2_mode:
+        mov     x0, #0x30                       // RES1
+        orr     x0, x0, #(1 << 0)               // Non-secure EL1
+        orr     x0, x0, #(1 << 8)               // HVC enable
+        orr     x0, x0, #(1 << 10)              // 64-bit EL2
+        msr     scr_el3, x0
+
+        msr     cptr_el3, xzr                   // Disable copro. traps to EL3
+
+        ldr     x0, =0x01800000                 // 24Mhz
+        msr     cntfrq_el0, x0
+
+        /*
+         * Check for the primary CPU to avoid a race on the distributor
+         * registers.
+         */
+        cbnz    x22, 1f
+
+        ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR
+        mov     w0, #3                          // EnableGrp0 | EnableGrp1
+        str     w0, [x1]
+
+1:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR
+        mov     w0, #~0                         // Grp1 interrupts
+        str     w0, [x1], #4
+        b.ne    2f                              // Only local interrupts for secondary CPUs
+        str     w0, [x1], #4
+        str     w0, [x1], #4
+
+2:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR
+        ldr     w0, [x1]
+        mov     w0, #3                          // EnableGrp0 | EnableGrp1
+        str     w0, [x1]
+
+        mov     w0, #1 << 7                     // allow NS access to GICC_PMR
+        str     w0, [x1, #4]                    // GICC_PMR
+
+        msr     sctlr_el2, xzr
+
+        /*
+         * Prepare the switch to the EL2_SP1 mode from EL3
+         */
+        msr     elr_el3, x30                    // Return to desired function
+        mov     x1, #0x3c9                      // EL2_SP1 | D | A | I | F
+        msr     spsr_el3, x1
+        eret
diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index 410d7db..b1f0a78 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -11,7 +11,13 @@
 
 ENTRY(start)
 
-OUTPUT_ARCH(arm)
+#if defined(__arm__)
+#define FORMAT arm
+#elif defined(__aarch64__)
+#define FORMAT aarch64
+#endif
+
+OUTPUT_ARCH(FORMAT)
 
 PHDRS
 {
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 9acd0af..e0a636f 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -38,6 +38,7 @@
  */
 #define MAIR0VAL 0xeeaa4400
 #define MAIR1VAL 0xff000004
+#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32)
 
 /*
  * Attribute Indexes.
diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
index 8dd9062..dc12524 100644
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -174,6 +174,8 @@ typedef uint64_t xen_callback_t;
 
 /* 0-4: Mode */
 #define PSR_MODE_MASK 0x1f
+
+/* 32 bit modes */
 #define PSR_MODE_USR 0x10
 #define PSR_MODE_FIQ 0x11
 #define PSR_MODE_IRQ 0x12
@@ -184,6 +186,18 @@ typedef uint64_t xen_callback_t;
 #define PSR_MODE_UND 0x1b
 #define PSR_MODE_SYS 0x1f
 
+/* 64 bit modes */
+#ifdef CONFIG_ARM_64
+#define PSR_MODE_BIT  0x10 /* Set iff AArch32 */
+#define PSR_MODE_EL3h 0x0d
+#define PSR_MODE_EL3t 0x0c
+#define PSR_MODE_EL2h 0x09
+#define PSR_MODE_EL2t 0x08
+#define PSR_MODE_EL1h 0x05
+#define PSR_MODE_EL1t 0x04
+#define PSR_MODE_EL0t 0x00
+#endif
+
 #define PSR_THUMB       (1<<5)        /* Thumb Mode enable */
 #define PSR_FIQ_MASK    (1<<6)        /* Fast Interrupt mask */
 #define PSR_IRQ_MASK    (1<<7)        /* Interrupt mask */
diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h
index 5538d8e..cc8b5fd 100644
--- a/xen/include/public/hvm/save.h
+++ b/xen/include/public/hvm/save.h
@@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
 
 #if defined(__i386__) || defined(__x86_64__)
 #include "../arch-x86/hvm/save.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
 #include "../arch-arm/hvm/save.h"
 #else
 #error "unsupported architecture"
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 846f446..a1927c0 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -31,7 +31,7 @@
 
 #if defined(__i386__) || defined(__x86_64__)
 #include "arch-x86/xen.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined (__aarch64__)
 #include "arch-arm.h"
 #else
 #error "Unsupported architecture"
diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
index e8f6508..218bb18 100644
--- a/xen/include/xen/libelf.h
+++ b/xen/include/xen/libelf.h
@@ -23,7 +23,7 @@
 #ifndef __XEN_LIBELF_H__
 #define __XEN_LIBELF_H__
 
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 #define XEN_ELF_LITTLE_ENDIAN
 #else
 #error define architectural endianness
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 06/46] xen: arm64: basic config and types headers
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (4 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 07/46] xen: arm64: spinlocks Ian Campbell
                   ` (41 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

The 64-bit bitops are taken from the Linux asm-generic implementations. They
should be replaced with optimised versions from the Linux arm64 port when they
become available.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: mention bitops heritage.
---
 xen/arch/arm/arm64/Makefile            |    2 +
 xen/arch/arm/arm64/lib/Makefile        |    1 +
 xen/arch/arm/arm64/lib/bitops.c        |   22 +++
 xen/arch/arm/arm64/lib/find_next_bit.c |  284 ++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm32/bitops.h     |   54 ++++++
 xen/include/asm-arm/arm64/bitops.h     |  283 +++++++++++++++++++++++++++++++
 xen/include/asm-arm/bitops.h           |   65 ++------
 xen/include/asm-arm/config.h           |   15 ++
 xen/include/asm-arm/types.h            |   17 ++-
 9 files changed, 686 insertions(+), 57 deletions(-)
 create mode 100644 xen/arch/arm/arm64/lib/Makefile
 create mode 100644 xen/arch/arm/arm64/lib/bitops.c
 create mode 100644 xen/arch/arm/arm64/lib/find_next_bit.c
 create mode 100644 xen/include/asm-arm/arm32/bitops.h
 create mode 100644 xen/include/asm-arm/arm64/bitops.h

diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index dffbeb1..c447eaa 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -1 +1,3 @@
+subdir-y += lib
+
 obj-y += mode_switch.o
diff --git a/xen/arch/arm/arm64/lib/Makefile b/xen/arch/arm/arm64/lib/Makefile
new file mode 100644
index 0000000..32c02c4
--- /dev/null
+++ b/xen/arch/arm/arm64/lib/Makefile
@@ -0,0 +1 @@
+obj-y += bitops.o find_next_bit.o
diff --git a/xen/arch/arm/arm64/lib/bitops.c b/xen/arch/arm/arm64/lib/bitops.c
new file mode 100644
index 0000000..02d8d78
--- /dev/null
+++ b/xen/arch/arm/arm64/lib/bitops.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/spinlock.h>
+#include <xen/bitops.h>
+
+spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] /*__lock_aligned*/ = {
+       [0 ... (ATOMIC_HASH_SIZE-1)]  = SPIN_LOCK_UNLOCKED
+};
diff --git a/xen/arch/arm/arm64/lib/find_next_bit.c b/xen/arch/arm/arm64/lib/find_next_bit.c
new file mode 100644
index 0000000..aea69c2
--- /dev/null
+++ b/xen/arch/arm/arm64/lib/find_next_bit.c
@@ -0,0 +1,284 @@
+/* find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <xen/config.h>
+#include <xen/bitops.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
+
+#ifndef find_next_bit
+/*
+ * Find the next set bit in a memory region.
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+			    unsigned long offset)
+{
+	const unsigned long *p = addr + BITOP_WORD(offset);
+	unsigned long result = offset & ~(BITS_PER_LONG-1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset %= BITS_PER_LONG;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_next_bit);
+#endif
+
+#ifndef find_next_zero_bit
+/*
+ * This implementation of find_{first,next}_zero_bit was stolen from
+ * Linus' asm-alpha/bitops.h.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+				 unsigned long offset)
+{
+	const unsigned long *p = addr + BITOP_WORD(offset);
+	unsigned long result = offset & ~(BITS_PER_LONG-1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset %= BITS_PER_LONG;
+	if (offset) {
+		tmp = *(p++);
+		tmp |= ~0UL >> (BITS_PER_LONG - offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG-1)) {
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp |= ~0UL << size;
+	if (tmp == ~0UL)	/* Are any bits zero? */
+		return result + size;	/* Nope. */
+found_middle:
+	return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_next_zero_bit);
+#endif
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+	const unsigned long *p = addr;
+	unsigned long result = 0;
+	unsigned long tmp;
+
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+
+	tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found:
+	return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_first_bit);
+#endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+	const unsigned long *p = addr;
+	unsigned long result = 0;
+	unsigned long tmp;
+
+	while (size & ~(BITS_PER_LONG-1)) {
+		if (~(tmp = *(p++)))
+			goto found;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+
+	tmp = (*p) | (~0UL << size);
+	if (tmp == ~0UL)	/* Are any bits zero? */
+		return result + size;	/* Nope. */
+found:
+	return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_first_zero_bit);
+#endif
+
+#ifdef __BIG_ENDIAN
+
+/* include/linux/byteorder does not support "unsigned long" type */
+static inline unsigned long ext2_swabp(const unsigned long * x)
+{
+#if BITS_PER_LONG == 64
+	return (unsigned long) __swab64p((u64 *) x);
+#elif BITS_PER_LONG == 32
+	return (unsigned long) __swab32p((u32 *) x);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+/* include/linux/byteorder doesn't support "unsigned long" type */
+static inline unsigned long ext2_swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+	return (unsigned long) __swab64((u64) y);
+#elif BITS_PER_LONG == 32
+	return (unsigned long) __swab32((u32) y);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#ifndef find_next_zero_bit_le
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+		long size, unsigned long offset)
+{
+	const unsigned long *p = addr;
+	unsigned long result = offset & ~(BITS_PER_LONG - 1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	p += BITOP_WORD(offset);
+	size -= result;
+	offset &= (BITS_PER_LONG - 1UL);
+	if (offset) {
+		tmp = ext2_swabp(p++);
+		tmp |= (~0UL >> (BITS_PER_LONG - offset));
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+
+	while (size & ~(BITS_PER_LONG - 1)) {
+		if (~(tmp = *(p++)))
+			goto found_middle_swap;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = ext2_swabp(p);
+found_first:
+	tmp |= ~0UL << size;
+	if (tmp == ~0UL)	/* Are any bits zero? */
+		return result + size; /* Nope. Skip ffz */
+found_middle:
+	return result + ffz(tmp);
+
+found_middle_swap:
+	return result + ffz(ext2_swab(tmp));
+}
+EXPORT_SYMBOL(find_next_zero_bit_le);
+#endif
+
+#ifndef find_next_bit_le
+unsigned long find_next_bit_le(const void *addr, unsigned
+		long size, unsigned long offset)
+{
+	const unsigned long *p = addr;
+	unsigned long result = offset & ~(BITS_PER_LONG - 1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	p += BITOP_WORD(offset);
+	size -= result;
+	offset &= (BITS_PER_LONG - 1UL);
+	if (offset) {
+		tmp = ext2_swabp(p++);
+		tmp &= (~0UL << offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+
+	while (size & ~(BITS_PER_LONG - 1)) {
+		tmp = *(p++);
+		if (tmp)
+			goto found_middle_swap;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = ext2_swabp(p);
+found_first:
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+
+found_middle_swap:
+	return result + __ffs(ext2_swab(tmp));
+}
+EXPORT_SYMBOL(find_next_bit_le);
+#endif
+
+#endif /* __BIG_ENDIAN */
diff --git a/xen/include/asm-arm/arm32/bitops.h b/xen/include/asm-arm/arm32/bitops.h
new file mode 100644
index 0000000..0d05258
--- /dev/null
+++ b/xen/include/asm-arm/arm32/bitops.h
@@ -0,0 +1,54 @@
+#ifndef _ARM_ARM32_BITOPS_H
+#define _ARM_ARM32_BITOPS_H
+
+extern void _set_bit(int nr, volatile void * p);
+extern void _clear_bit(int nr, volatile void * p);
+extern void _change_bit(int nr, volatile void * p);
+extern int _test_and_set_bit(int nr, volatile void * p);
+extern int _test_and_clear_bit(int nr, volatile void * p);
+extern int _test_and_change_bit(int nr, volatile void * p);
+
+#define set_bit(n,p)              _set_bit(n,p)
+#define clear_bit(n,p)            _clear_bit(n,p)
+#define change_bit(n,p)           _change_bit(n,p)
+#define test_and_set_bit(n,p)     _test_and_set_bit(n,p)
+#define test_and_clear_bit(n,p)   _test_and_clear_bit(n,p)
+#define test_and_change_bit(n,p)  _test_and_change_bit(n,p)
+
+/*
+ * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
+ */
+extern int _find_first_zero_bit_le(const void * p, unsigned size);
+extern int _find_next_zero_bit_le(const void * p, int size, int offset);
+extern int _find_first_bit_le(const unsigned long *p, unsigned size);
+extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+
+/*
+ * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
+ */
+extern int _find_first_zero_bit_be(const void * p, unsigned size);
+extern int _find_next_zero_bit_be(const void * p, int size, int offset);
+extern int _find_first_bit_be(const unsigned long *p, unsigned size);
+extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+
+#ifndef __ARMEB__
+/*
+ * These are the little endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
+#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
+#define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
+#define find_next_bit(p,sz,off)		_find_next_bit_le(p,sz,off)
+
+#else
+/*
+ * These are the big endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
+#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
+#define find_first_bit(p,sz)		_find_first_bit_be(p,sz)
+#define find_next_bit(p,sz,off)		_find_next_bit_be(p,sz,off)
+
+#endif
+
+#endif /* _ARM_ARM32_BITOPS_H */
diff --git a/xen/include/asm-arm/arm64/bitops.h b/xen/include/asm-arm/arm64/bitops.h
new file mode 100644
index 0000000..847d65c
--- /dev/null
+++ b/xen/include/asm-arm/arm64/bitops.h
@@ -0,0 +1,283 @@
+#ifndef _ARM_ARM64_BITOPS_H
+#define _ARM_ARM64_BITOPS_H
+
+/* Generic bitop support. Based on linux/include/asm-generic/bitops/atomic.h */
+
+#include <xen/spinlock.h>
+#include <xen/cache.h>          /* we use L1_CACHE_BYTES */
+
+/* Use an array of spinlocks for our atomic_ts.
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
+#  define ATOMIC_HASH_SIZE 4
+#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
+
+extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]/* __lock_aligned*/;
+
+#define _atomic_spin_lock_irqsave(l,f) do {     \
+       spinlock_t *s = ATOMIC_HASH(l);          \
+       spin_lock_irqsave(s, f);\
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do {\
+        spinlock_t *s = ATOMIC_HASH(l);         \
+        spin_unlock_irqrestore(s,f);		\
+} while(0)
+
+#define FIXUP(_p, _mask)                        \
+    {                                           \
+        unsigned long __p = (unsigned long)_p;  \
+        if (__p & 0x7) {                        \
+            if (_mask > 0xffffffff) {           \
+             __p = (__p+32)&~0x7; _mask >>=32;  \
+            } else {                            \
+                __p &= ~0x7; _mask <<= 32;      \
+            }                                   \
+            if (0)printk("BITOPS: Fixup misaligned ptr %p => %#lx\n", _p, __p); \
+            _p = (void *)__p;                   \
+        }                                       \
+    }
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+
+static inline void set_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long flags;
+
+        //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n",
+        //       nr, addr, mask, p, ATOMIC_HASH(p));
+        FIXUP(p, mask);
+        //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n",
+        //       nr, addr, mask, p, ATOMIC_HASH(p));
+        //printk("before *p is %#lx\n", *p);
+	_atomic_spin_lock_irqsave(p, flags);
+	*p  |= mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+        //printk(" after *p is %#lx\n", *p);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long flags;
+
+        FIXUP(p, mask);
+
+	_atomic_spin_lock_irqsave(p, flags);
+	*p &= ~mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered. It may be
+ * reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long flags;
+
+        FIXUP(p, mask);
+
+	_atomic_spin_lock_irqsave(p, flags);
+	*p ^= mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It may be reordered on other architectures than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags;
+
+        FIXUP(p, mask);
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old | mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags;
+
+        FIXUP(p, mask);
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old & ~mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags;
+
+        FIXUP(p, mask);
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old ^ mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+
+/* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static /*__*/always_inline unsigned long __ffs(unsigned long word)
+{
+        return __builtin_ctzl(word);
+}
+
+/* Based on linux/include/asm-generic/bitops/ffz.h */
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x)  __ffs(~(x))
+
+
+
+/* Based on linux/include/asm-generic/bitops/find.h */
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+		size, unsigned long offset);
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+		long size, unsigned long offset);
+#endif
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+				    unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+					 unsigned long size);
+#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+
+#endif /* _ARM_ARM64_BITOPS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
index 87de5db..563b4be 100644
--- a/xen/include/asm-arm/bitops.h
+++ b/xen/include/asm-arm/bitops.h
@@ -9,28 +9,14 @@
 #ifndef _ARM_BITOPS_H
 #define _ARM_BITOPS_H
 
-extern void _set_bit(int nr, volatile void * p);
-extern void _clear_bit(int nr, volatile void * p);
-extern void _change_bit(int nr, volatile void * p);
-extern int _test_and_set_bit(int nr, volatile void * p);
-extern int _test_and_clear_bit(int nr, volatile void * p);
-extern int _test_and_change_bit(int nr, volatile void * p);
-
-#define set_bit(n,p)              _set_bit(n,p)
-#define clear_bit(n,p)            _clear_bit(n,p)
-#define change_bit(n,p)           _change_bit(n,p)
-#define test_and_set_bit(n,p)     _test_and_set_bit(n,p)
-#define test_and_clear_bit(n,p)   _test_and_clear_bit(n,p)
-#define test_and_change_bit(n,p)  _test_and_change_bit(n,p)
-
 /*
  * Non-atomic bit manipulation.
  *
  * Implemented using atomics to be interrupt safe. Could alternatively
  * implement with local interrupt masking.
  */
-#define __set_bit(n,p)            _set_bit(n,p)
-#define __clear_bit(n,p)          _clear_bit(n,p)
+#define __set_bit(n,p)            set_bit(n,p)
+#define __clear_bit(n,p)          clear_bit(n,p)
 
 #define BIT(nr)                 (1UL << (nr))
 #define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
@@ -40,6 +26,14 @@ extern int _test_and_change_bit(int nr, volatile void * p);
 #define ADDR (*(volatile long *) addr)
 #define CONST_ADDR (*(const volatile long *) addr)
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/bitops.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/bitops.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 /**
  * __test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
@@ -104,42 +98,6 @@ static inline int test_bit(int nr, const volatile void *addr)
         return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
 
-/*
- * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
- */
-extern int _find_first_zero_bit_le(const void * p, unsigned size);
-extern int _find_next_zero_bit_le(const void * p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
-
-/*
- * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
- */
-extern int _find_first_zero_bit_be(const void * p, unsigned size);
-extern int _find_next_zero_bit_be(const void * p, int size, int offset);
-extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
-
-#ifndef __ARMEB__
-/*
- * These are the little endian, atomic definitions.
- */
-#define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
-#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
-#define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
-#define find_next_bit(p,sz,off)		_find_next_bit_le(p,sz,off)
-
-#else
-/*
- * These are the big endian, atomic definitions.
- */
-#define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
-#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
-#define find_first_bit(p,sz)		_find_first_bit_be(p,sz)
-#define find_next_bit(p,sz,off)		_find_next_bit_be(p,sz,off)
-
-#endif
-
 static inline int constant_fls(int x)
 {
         int r = 32;
@@ -182,10 +140,11 @@ static inline int fls(int x)
                return constant_fls(x);
 
         asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-        ret = 32 - ret;
+        ret = BITS_PER_LONG - ret;
         return ret;
 }
 
+
 #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
 
 /**
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index e5dce5e..add70bd 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -7,6 +7,21 @@
 #ifndef __ARM_CONFIG_H__
 #define __ARM_CONFIG_H__
 
+#if defined(__aarch64__)
+# define CONFIG_ARM_64 1
+#elif defined(__arm__)
+# define CONFIG_ARM_32 1
+#endif
+
+#if defined(CONFIG_ARM_64)
+# define LONG_BYTEORDER 3
+#else
+# define LONG_BYTEORDER 2
+#endif
+
+#define BYTES_PER_LONG (1 << LONG_BYTEORDER)
+#define BITS_PER_LONG (BYTES_PER_LONG << 3)
+
 #define CONFIG_PAGING_ASSISTANCE 1
 
 #define CONFIG_PAGING_LEVELS 3
diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
index 48864f9..07f7898 100644
--- a/xen/include/asm-arm/types.h
+++ b/xen/include/asm-arm/types.h
@@ -15,8 +15,13 @@ typedef __signed__ int __s32;
 typedef unsigned int __u32;
 
 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#if defined(CONFIG_ARM_32)
 typedef __signed__ long long __s64;
 typedef unsigned long long __u64;
+#elif defined (CONFIG_ARM_64)
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+#endif
 #endif
 
 typedef signed char s8;
@@ -28,11 +33,19 @@ typedef unsigned short u16;
 typedef signed int s32;
 typedef unsigned int u32;
 
+#if defined(CONFIG_ARM_32)
 typedef signed long long s64;
 typedef unsigned long long u64;
 typedef u64 paddr_t;
 #define INVALID_PADDR (~0ULL)
 #define PRIpaddr "016llx"
+#elif defined (CONFIG_ARM_64)
+typedef signed long s64;
+typedef unsigned long u64;
+typedef u64 paddr_t;
+#define INVALID_PADDR (~0UL)
+#define PRIpaddr "016lx"
+#endif
 
 typedef unsigned long size_t;
 
@@ -42,10 +55,6 @@ typedef char bool_t;
 
 #endif /* __ASSEMBLY__ */
 
-#define BITS_PER_LONG 32
-#define BYTES_PER_LONG 4
-#define LONG_BYTEORDER 2
-
 #endif /* __ARM_TYPES_H__ */
 /*
  * Local variables:
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 07/46] xen: arm64: spinlocks
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (5 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 06/46] xen: arm64: basic config and types headers Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 08/46] xen: arm64: atomics Ian Campbell
                   ` (40 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: no change, but these need to be revisited considering the interaction of
sev/wfe etc. May need to rework the generic code in order to make best use of
wfe (on 32-bit ARM too)
---
 xen/include/asm-arm/arm32/spinlock.h |  141 ++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/spinlock.h |  125 ++++++++++++++++++++++++++++++
 xen/include/asm-arm/spinlock.h       |  135 ++------------------------------
 3 files changed, 273 insertions(+), 128 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/spinlock.h
 create mode 100644 xen/include/asm-arm/arm64/spinlock.h

diff --git a/xen/include/asm-arm/arm32/spinlock.h b/xen/include/asm-arm/arm32/spinlock.h
new file mode 100644
index 0000000..a7bcdbf
--- /dev/null
+++ b/xen/include/asm-arm/arm32/spinlock.h
@@ -0,0 +1,141 @@
+#ifndef __ASM_ARM32_SPINLOCK_H
+#define __ASM_ARM32_SPINLOCK_H
+
+static inline void dsb_sev(void)
+{
+    __asm__ __volatile__ (
+        "dsb\n"
+        "sev\n"
+        );
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"   str     %1, [%0]\n"
+    :
+    : "r" (&lock->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned long tmp;
+
+    __asm__ __volatile__(
+"   ldrex   %0, [%1]\n"
+"   teq     %0, #0\n"
+"   strexeq %0, %2, [%1]"
+    : "=&r" (tmp)
+    : "r" (&lock->lock), "r" (1)
+    : "cc");
+
+    if (tmp == 0) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define _RAW_RW_LOCK_UNLOCKED { 0 }
+
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+{
+    unsigned long tmp, tmp2 = 1;
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%2]\n"
+"   adds    %0, %0, #1\n"
+"   strexpl %1, %0, [%2]\n"
+    : "=&r" (tmp), "+r" (tmp2)
+    : "r" (&rw->lock)
+    : "cc");
+
+    smp_mb();
+    return tmp2 == 0;
+}
+
+static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+{
+    unsigned long tmp;
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%1]\n"
+"   teq     %0, #0\n"
+"   strexeq %0, %2, [%1]"
+    : "=&r" (tmp)
+    : "r" (&rw->lock), "r" (0x80000000)
+    : "cc");
+
+    if (tmp == 0) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+static inline void _raw_read_unlock(raw_rwlock_t *rw)
+{
+    unsigned long tmp, tmp2;
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%2]\n"
+"   sub     %0, %0, #1\n"
+"   strex   %1, %0, [%2]\n"
+"   teq     %1, #0\n"
+"   bne     1b"
+    : "=&r" (tmp), "=&r" (tmp2)
+    : "r" (&rw->lock)
+    : "cc");
+
+    if (tmp == 0)
+        dsb_sev();
+}
+
+static inline void _raw_write_unlock(raw_rwlock_t *rw)
+{
+    smp_mb();
+
+    __asm__ __volatile__(
+    "str    %1, [%0]\n"
+    :
+    : "r" (&rw->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/spinlock.h b/xen/include/asm-arm/arm64/spinlock.h
new file mode 100644
index 0000000..52ad688
--- /dev/null
+++ b/xen/include/asm-arm/arm64/spinlock.h
@@ -0,0 +1,125 @@
+/*
+ * Derived from Linux arch64 spinlock.h which is:
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM64_SPINLOCK_H
+#define __ASM_ARM64_SPINLOCK_H
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    asm volatile(
+        "       stlr    %w1, [%0]\n"
+        : : "r" (&lock->lock), "r" (0) : "memory");
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned int tmp;
+
+    asm volatile(
+        "       ldaxr   %w0, [%1]\n"
+        "       cbnz    %w0, 1f\n"
+        "       stxr    %w0, %w2, [%1]\n"
+        "1:\n"
+        : "=&r" (tmp)
+        : "r" (&lock->lock), "r" (1)
+        : "memory");
+
+    return !tmp;
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define _RAW_RW_LOCK_UNLOCKED { 0 }
+
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+{
+    unsigned int tmp, tmp2 = 1;
+
+    asm volatile(
+        "       ldaxr   %w0, [%2]\n"
+        "       add     %w0, %w0, #1\n"
+        "       tbnz    %w0, #31, 1f\n"
+        "       stxr    %w1, %w0, [%2]\n"
+        "1:\n"
+        : "=&r" (tmp), "+r" (tmp2)
+        : "r" (&rw->lock)
+        : "memory");
+
+    return !tmp2;
+}
+
+static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+{
+    unsigned int tmp;
+
+    asm volatile(
+        "       ldaxr   %w0, [%1]\n"
+        "       cbnz    %w0, 1f\n"
+        "       stxr    %w0, %w2, [%1]\n"
+        "1:\n"
+        : "=&r" (tmp)
+        : "r" (&rw->lock), "r" (0x80000000)
+        : "memory");
+
+    return !tmp;
+}
+
+static inline void _raw_read_unlock(raw_rwlock_t *rw)
+{
+    unsigned int tmp, tmp2;
+
+    asm volatile(
+        "1:     ldxr    %w0, [%2]\n"
+        "       sub     %w0, %w0, #1\n"
+        "       stlxr   %w1, %w0, [%2]\n"
+        "       cbnz    %w1, 1b\n"
+        : "=&r" (tmp), "=&r" (tmp2)
+        : "r" (&rw->lock)
+        : "memory");
+}
+
+static inline void _raw_write_unlock(raw_rwlock_t *rw)
+{
+    asm volatile(
+        "       stlr    %w1, [%0]\n"
+        : : "r" (&rw->lock), "r" (0) : "memory");
+}
+
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/spinlock.h b/xen/include/asm-arm/spinlock.h
index b1825c9..d753210 100644
--- a/xen/include/asm-arm/spinlock.h
+++ b/xen/include/asm-arm/spinlock.h
@@ -4,134 +4,13 @@
 #include <xen/config.h>
 #include <xen/lib.h>
 
-static inline void dsb_sev(void)
-{
-    __asm__ __volatile__ (
-        "dsb\n"
-        "sev\n"
-        );
-}
-
-typedef struct {
-    volatile unsigned int lock;
-} raw_spinlock_t;
-
-#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
-
-#define _raw_spin_is_locked(x)          ((x)->lock != 0)
-
-static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
-{
-    ASSERT(_raw_spin_is_locked(lock));
-
-    smp_mb();
-
-    __asm__ __volatile__(
-"   str     %1, [%0]\n"
-    :
-    : "r" (&lock->lock), "r" (0)
-    : "cc");
-
-    dsb_sev();
-}
-
-static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
-{
-    unsigned long tmp;
-
-    __asm__ __volatile__(
-"   ldrex   %0, [%1]\n"
-"   teq     %0, #0\n"
-"   strexeq %0, %2, [%1]"
-    : "=&r" (tmp)
-    : "r" (&lock->lock), "r" (1)
-    : "cc");
-
-    if (tmp == 0) {
-        smp_mb();
-        return 1;
-    } else {
-        return 0;
-    }
-}
-
-typedef struct {
-    volatile unsigned int lock;
-} raw_rwlock_t;
-
-#define _RAW_RW_LOCK_UNLOCKED { 0 }
-
-static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
-{
-    unsigned long tmp, tmp2 = 1;
-
-    __asm__ __volatile__(
-"1: ldrex   %0, [%2]\n"
-"   adds    %0, %0, #1\n"
-"   strexpl %1, %0, [%2]\n"
-    : "=&r" (tmp), "+r" (tmp2)
-    : "r" (&rw->lock)
-    : "cc");
-
-    smp_mb();
-    return tmp2 == 0;
-}
-
-static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
-{
-    unsigned long tmp;
-
-    __asm__ __volatile__(
-"1: ldrex   %0, [%1]\n"
-"   teq     %0, #0\n"
-"   strexeq %0, %2, [%1]"
-    : "=&r" (tmp)
-    : "r" (&rw->lock), "r" (0x80000000)
-    : "cc");
-
-    if (tmp == 0) {
-        smp_mb();
-        return 1;
-    } else {
-        return 0;
-    }
-}
-
-static inline void _raw_read_unlock(raw_rwlock_t *rw)
-{
-    unsigned long tmp, tmp2;
-
-    smp_mb();
-
-    __asm__ __volatile__(
-"1: ldrex   %0, [%2]\n"
-"   sub     %0, %0, #1\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (tmp), "=&r" (tmp2)
-    : "r" (&rw->lock)
-    : "cc");
-
-    if (tmp == 0)
-        dsb_sev();
-}
-
-static inline void _raw_write_unlock(raw_rwlock_t *rw)
-{
-    smp_mb();
-
-    __asm__ __volatile__(
-    "str    %1, [%0]\n"
-    :
-    : "r" (&rw->lock), "r" (0)
-    : "cc");
-
-    dsb_sev();
-}
-
-#define _raw_rw_is_locked(x) ((x)->lock != 0)
-#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/spinlock.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/spinlock.h>
+#else
+# error "unknown ARM variant"
+#endif
 
 #endif /* __ASM_SPINLOCK_H */
 /*
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 08/46] xen: arm64: atomics
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (6 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 07/46] xen: arm64: spinlocks Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 14:57   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 09/46] xen: arm: refactor co-pro and sysreg reg handling Ian Campbell
                   ` (39 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Remove unused, #if-0'd, 64-bit atomics.
---
 xen/include/asm-arm/arm32/atomic.h |  151 +++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/atomic.h |  163 +++++++++++++++++++++++++++++++
 xen/include/asm-arm/atomic.h       |  186 +++++++-----------------------------
 3 files changed, 347 insertions(+), 153 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/atomic.h
 create mode 100644 xen/include/asm-arm/arm64/atomic.h

diff --git a/xen/include/asm-arm/arm32/atomic.h b/xen/include/asm-arm/arm32/atomic.h
new file mode 100644
index 0000000..4ee6626
--- /dev/null
+++ b/xen/include/asm-arm/arm32/atomic.h
@@ -0,0 +1,151 @@
+/*
+ *  arch/arm/include/asm/atomic.h
+ *
+ *  Copyright (C) 1996 Russell King.
+ *  Copyright (C) 2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ARCH_ARM_ARM32_ATOMIC__
+#define __ARCH_ARM_ARM32_ATOMIC__
+
+/*
+ * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        __asm__ __volatile__("@ atomic_add\n"
+"1:     ldrex   %0, [%3]\n"
+"       add     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        smp_mb();
+
+        __asm__ __volatile__("@ atomic_add_return\n"
+"1:     ldrex   %0, [%3]\n"
+"       add     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+
+        smp_mb();
+
+        return result;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        __asm__ __volatile__("@ atomic_sub\n"
+"1:     ldrex   %0, [%3]\n"
+"       sub     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        smp_mb();
+
+        __asm__ __volatile__("@ atomic_sub_return\n"
+"1:     ldrex   %0, [%3]\n"
+"       sub     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+
+        smp_mb();
+
+        return result;
+}
+
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+        unsigned long oldval, res;
+
+        smp_mb();
+
+        do {
+                __asm__ __volatile__("@ atomic_cmpxchg\n"
+                "ldrex  %1, [%3]\n"
+                "mov    %0, #0\n"
+                "teq    %1, %4\n"
+                "strexeq %0, %5, [%3]\n"
+                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
+                    : "cc");
+        } while (res);
+
+        smp_mb();
+
+        return oldval;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+        unsigned long tmp, tmp2;
+
+        __asm__ __volatile__("@ atomic_clear_mask\n"
+"1:     ldrex   %0, [%3]\n"
+"       bic     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+        : "r" (addr), "Ir" (mask)
+        : "cc");
+}
+
+#define atomic_inc(v)           atomic_add(1, v)
+#define atomic_dec(v)           atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+#endif /* __ARCH_ARM_ARM32_ATOMIC__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/atomic.h b/xen/include/asm-arm/arm64/atomic.h
new file mode 100644
index 0000000..972d50c
--- /dev/null
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -0,0 +1,163 @@
+/*
+ * Based on arch/arm64/include/asm/atomic.h
+ * which in turn is
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ARCH_ARM_ARM64_ATOMIC
+#define __ARCH_ARM_ARM64_ATOMIC
+
+/*
+ * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	asm volatile("// atomic_add\n"
+"1:	ldxr	%w0, [%3]\n"
+"	add	%w0, %w0, %w4\n"
+"	stxr	%w1, %w0, [%3]\n"
+"	cbnz	%w1, 1b"
+	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	asm volatile("// atomic_add_return\n"
+"1:	ldaxr	%w0, [%3]\n"
+"	add	%w0, %w0, %w4\n"
+"	stlxr	%w1, %w0, [%3]\n"
+"	cbnz	%w1, 1b"
+	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	asm volatile("// atomic_sub\n"
+"1:	ldxr	%w0, [%3]\n"
+"	sub	%w0, %w0, %w4\n"
+"	stxr	%w1, %w0, [%3]\n"
+"	cbnz	%w1, 1b"
+	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	asm volatile("// atomic_sub_return\n"
+"1:	ldaxr	%w0, [%3]\n"
+"	sub	%w0, %w0, %w4\n"
+"	stlxr	%w1, %w0, [%3]\n"
+"	cbnz	%w1, 1b"
+	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result;
+}
+
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+	unsigned long tmp;
+	int oldval;
+
+	asm volatile("// atomic_cmpxchg\n"
+"1:	ldaxr	%w1, [%3]\n"
+"	cmp	%w1, %w4\n"
+"	b.ne	2f\n"
+"	stlxr	%w0, %w5, [%3]\n"
+"	cbnz	%w0, 1b\n"
+"2:"
+	: "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
+	: "r" (&ptr->counter), "Ir" (old), "r" (new)
+	: "cc");
+
+	return oldval;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned long tmp, tmp2;
+
+	asm volatile("// atomic_clear_mask\n"
+"1:	ldxr	%0, [%3]\n"
+"	bic	%0, %0, %4\n"
+"	stxr	%w1, %0, [%3]\n"
+"	cbnz	%w1, 1b"
+	: "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
+	: "r" (addr), "Ir" (mask)
+	: "cc");
+}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
+		c = old;
+	return c;
+}
+
+#define atomic_inc(v)		atomic_add(1, v)
+#define atomic_dec(v)		atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+#define smp_mb__before_atomic_dec()	smp_mb()
+#define smp_mb__after_atomic_dec()	smp_mb()
+#define smp_mb__before_atomic_inc()	smp_mb()
+#define smp_mb__after_atomic_inc()	smp_mb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
index c7eadd6..b37b2d0 100644
--- a/xen/include/asm-arm/atomic.h
+++ b/xen/include/asm-arm/atomic.h
@@ -1,48 +1,49 @@
-/*
- *  arch/arm/include/asm/atomic.h
- *
- *  Copyright (C) 1996 Russell King.
- *  Copyright (C) 2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
 #ifndef __ARCH_ARM_ATOMIC__
 #define __ARCH_ARM_ATOMIC__
 
 #include <xen/config.h>
 #include <asm/system.h>
 
-#define build_atomic_read(name, size, type, reg)   \
+#define build_atomic_read(name, size, width, type, reg)\
 static inline type name(const volatile type *addr) \
 {                                                  \
     type ret;                                      \
-    asm volatile("ldr" size " %0,%1"               \
+    asm volatile("ldr" size " %" width "0,%1"      \
                  : reg (ret)                       \
                  : "m" (*(volatile type *)addr));  \
     return ret;                                    \
 }
 
-#define build_atomic_write(name, size, type, reg)      \
+#define build_atomic_write(name, size, width, type, reg) \
 static inline void name(volatile type *addr, type val) \
 {                                                      \
-    asm volatile("str" size " %1,%0"                   \
+    asm volatile("str" size " %"width"1,%0"            \
                  : "=m" (*(volatile type *)addr)       \
                  : reg (val));                         \
 }
 
-build_atomic_read(read_u8_atomic, "b", uint8_t, "=q")
-build_atomic_read(read_u16_atomic, "h", uint16_t, "=r")
-build_atomic_read(read_u32_atomic, "", uint32_t, "=r")
-//build_atomic_read(read_u64_atomic, "d", uint64_t, "=r")
-build_atomic_read(read_int_atomic, "", int, "=r")
-
-build_atomic_write(write_u8_atomic, "b", uint8_t, "q")
-build_atomic_write(write_u16_atomic, "h", uint16_t, "r")
-build_atomic_write(write_u32_atomic, "", uint32_t, "r")
-//build_atomic_write(write_u64_atomic, "d", uint64_t, "r")
-build_atomic_write(write_int_atomic, "", int, "r")
+#if defined (CONFIG_ARM_32)
+#define BYTE ""
+#define WORD ""
+#elif defined (CONFIG_ARM_64)
+#define BYTE "w"
+#define WORD "w"
+#endif
+
+build_atomic_read(read_u8_atomic,  "b", BYTE, uint8_t, "=r")
+build_atomic_read(read_u16_atomic, "h", WORD, uint16_t, "=r")
+build_atomic_read(read_u32_atomic, "",  WORD, uint32_t, "=r")
+build_atomic_read(read_int_atomic, "",  WORD, int, "=r")
+
+build_atomic_write(write_u8_atomic,  "b", BYTE, uint8_t, "r")
+build_atomic_write(write_u16_atomic, "h", WORD, uint16_t, "r")
+build_atomic_write(write_u32_atomic, "",  WORD, uint32_t, "r")
+build_atomic_write(write_int_atomic, "",  WORD, int, "r")
+
+#if 0 /* defined (CONFIG_ARM_64) */
+build_atomic_read(read_u64_atomic, "x", uint64_t, "=r")
+build_atomic_write(write_u64_atomic, "x", uint64_t, "r")
+#endif
 
 void __bad_atomic_size(void);
 
@@ -88,134 +89,13 @@ typedef struct { int counter; } atomic_t;
 #define _atomic_set(v,i) (((v).counter) = (i))
 #define atomic_set(v,i) (((v)->counter) = (i))
 
-/*
- * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        __asm__ __volatile__("@ atomic_add\n"
-"1:     ldrex   %0, [%3]\n"
-"       add     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        smp_mb();
-
-        __asm__ __volatile__("@ atomic_add_return\n"
-"1:     ldrex   %0, [%3]\n"
-"       add     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-
-        smp_mb();
-
-        return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        __asm__ __volatile__("@ atomic_sub\n"
-"1:     ldrex   %0, [%3]\n"
-"       sub     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        smp_mb();
-
-        __asm__ __volatile__("@ atomic_sub_return\n"
-"1:     ldrex   %0, [%3]\n"
-"       sub     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-
-        smp_mb();
-
-        return result;
-}
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-        unsigned long oldval, res;
-
-        smp_mb();
-
-        do {
-                __asm__ __volatile__("@ atomic_cmpxchg\n"
-                "ldrex  %1, [%3]\n"
-                "mov    %0, #0\n"
-                "teq    %1, %4\n"
-                "strexeq %0, %5, [%3]\n"
-                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
-                    : "cc");
-        } while (res);
-
-        smp_mb();
-
-        return oldval;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-        unsigned long tmp, tmp2;
-
-        __asm__ __volatile__("@ atomic_clear_mask\n"
-"1:     ldrex   %0, [%3]\n"
-"       bic     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
-        : "r" (addr), "Ir" (mask)
-        : "cc");
-}
-
-#define atomic_inc(v)           atomic_add(1, v)
-#define atomic_dec(v)           atomic_sub(1, v)
-
-#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/atomic.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/atomic.h>
+#else
+# error "unknown ARM variant"
+#endif
 
 static inline atomic_t atomic_compareandswap(
     atomic_t old, atomic_t new, atomic_t *v)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 09/46] xen: arm: refactor co-pro and sysreg reg handling.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (7 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 08/46] xen: arm64: atomics Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 10/46] xen: arm64: TLB flushes Ian Campbell
                   ` (38 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

AArch64 has removed the concept of co-processors replacing them with a
combination of specific instructions (cache and tlb flushes etc) and
system registers (which are understood by name in the assembler).

However most system registers are equivalent to a particular AArch32
co-pro register and can be used by generic code in the same way. Note
that the names of the registers differ (often only slightly)

For consistency it would be better to use only set of names in the
common code. Therefore move the {READ,WRITE}_CP{32,64} accessors into
arm32/processor.h and provide {READ,WRITE}_SYSREG. Where the names
differ #defines will be provided on 32-bit.

HSR_CPREG and friends are required even on 64-bit in order to decode
traps from 32 bit guests.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/arm32/processor.h |   68 +++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/processor.h |   37 ++++++++++++++++++
 xen/include/asm-arm/cpregs.h          |   40 +++----------------
 xen/include/asm-arm/processor.h       |    9 +++-
 4 files changed, 118 insertions(+), 36 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/processor.h
 create mode 100644 xen/include/asm-arm/arm64/processor.h

diff --git a/xen/include/asm-arm/arm32/processor.h b/xen/include/asm-arm/arm32/processor.h
new file mode 100644
index 0000000..843fbd2
--- /dev/null
+++ b/xen/include/asm-arm/arm32/processor.h
@@ -0,0 +1,68 @@
+#ifndef __ASM_ARM_ARM32_PROCESSOR_H
+#define __ASM_ARM_ARM32_PROCESSOR_H
+
+/* Layout as used in assembly, with src/dest registers mixed in */
+#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2
+#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm
+#define CP32(r, name...) __CP32(r, name)
+#define CP64(r, name...) __CP64(r, name)
+
+/* Stringified for inline assembly */
+#define LOAD_CP32(r, name...)  "mrc " __stringify(CP32(%r, name)) ";"
+#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";"
+#define LOAD_CP64(r, name...)  "mrrc " __stringify(CP64(%r, %H##r, name)) ";"
+#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";"
+
+#ifndef __ASSEMBLY__
+
+/* C wrappers */
+#define READ_CP32(name...) ({                                   \
+    register uint32_t _r;                                       \
+    asm volatile(LOAD_CP32(0, name) : "=r" (_r));               \
+    _r; })
+
+#define WRITE_CP32(v, name...) do {                             \
+    register uint32_t _r = (v);                                 \
+    asm volatile(STORE_CP32(0, name) : : "r" (_r));             \
+} while (0)
+
+#define READ_CP64(name...) ({                                   \
+    register uint64_t _r;                                       \
+    asm volatile(LOAD_CP64(0, name) : "=r" (_r));               \
+    _r; })
+
+#define WRITE_CP64(v, name...) do {                             \
+    register uint64_t _r = (v);                                 \
+    asm volatile(STORE_CP64(0, name) : : "r" (_r));             \
+} while (0)
+
+/*
+ * C wrappers for accessing system registers.
+ *
+ * Registers come in 3 types:
+ * - those which are always 32-bit regardless of AArch32 vs AArch64
+ *   (use {READ,WRITE}_SYSREG32).
+ * - those which are always 64-bit regardless of AArch32 vs AArch64
+ *   (use {READ,WRITE}_SYSREG64).
+ * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG).
+ */
+#define READ_SYSREG32(R...)     READ_CP32(R)
+#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R)
+
+#define READ_SYSREG64(R...)     READ_CP64(R)
+#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R)
+
+#define READ_SYSREG(R...)       READ_SYSREG32(R)
+#define WRITE_SYSREG(V, R...)   WRITE_SYSREG32(V, R)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM_ARM32_PROCESSOR_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h
new file mode 100644
index 0000000..fdb0dab
--- /dev/null
+++ b/xen/include/asm-arm/arm64/processor.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_ARM_ARM64_PROCESSOR_H
+#define __ASM_ARM_ARM64_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#define READ_SYSREG32(name) ({                          \
+    uint32_t _r;                                        \
+    asm volatile("mrs  %0, "#name : "=r" (_r));         \
+    _r; })
+#define WRITE_SYSREG32(v, name) do {                    \
+    uint32_t _r = v;                                    \
+    asm volatile("msr "#name", %0" : : "r" (_r));       \
+} while (0)
+
+#define WRITE_SYSREG64(v, name) do {                    \
+    uint64_t _r = v;                                    \
+    asm volatile("msr "#name", %0" : : "r" (_r));       \
+} while (0)
+#define READ_SYSREG64(name) ({                          \
+    uint64_t _r;                                        \
+    asm volatile("mrs  %0, "#name : "=r" (_r));         \
+    _r; })
+
+#define READ_SYSREG(name)     READ_SYSREG64(name)
+#define WRITE_SYSREG(v, name) WRITE_SYSREG64(v, name)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM_ARM64_PROCESSOR_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 3b51845..7eaa50f 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -3,40 +3,12 @@
 
 #include <xen/stringify.h>
 
-/* Co-processor registers */
-
-/* Layout as used in assembly, with src/dest registers mixed in */
-#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2
-#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm
-#define CP32(r, name...) __CP32(r, name)
-#define CP64(r, name...) __CP64(r, name)
-
-/* Stringified for inline assembly */
-#define LOAD_CP32(r, name...)  "mrc " __stringify(CP32(%r, name)) ";"
-#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";"
-#define LOAD_CP64(r, name...)  "mrrc " __stringify(CP64(%r, %H##r, name)) ";"
-#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";"
-
-/* C wrappers */
-#define READ_CP32(name...) ({                                   \
-    register uint32_t _r;                                       \
-    asm volatile(LOAD_CP32(0, name) : "=r" (_r));               \
-    _r; })
-
-#define WRITE_CP32(v, name...) do {                             \
-    register uint32_t _r = (v);                                 \
-    asm volatile(STORE_CP32(0, name) : : "r" (_r));             \
-} while (0)
-
-#define READ_CP64(name...) ({                                   \
-    register uint64_t _r;                                       \
-    asm volatile(LOAD_CP64(0, name) : "=r" (_r));               \
-    _r; })
-
-#define WRITE_CP64(v, name...) do {                             \
-    register uint64_t _r = (v);                                 \
-    asm volatile(STORE_CP64(0, name) : : "r" (_r));             \
-} while (0)
+/*
+ * AArch32 Co-processor registers.
+ *
+ * Note that AArch64 requires many of these definitions in order to
+ * support 32-bit guests.
+ */
 
 #define __HSR_CPREG_c0  0
 #define __HSR_CPREG_c1  1
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 0c94f6b..0768cd4 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -225,8 +225,13 @@ union hsr {
 #define ID_PFR1_GT_MASK  0x000F0000  /* Generic Timer interface support */
 #define ID_PFR1_GT_v1    0x00010000
 
-#define MSR(reg,val)        asm volatile ("msr "#reg", %0\n" : : "r" (val))
-#define MRS(val,reg)        asm volatile ("mrs %0,"#reg"\n" : "=r" (v))
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/processor.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/processor.h>
+#else
+# error "unknown ARM variant"
+#endif
 
 #ifndef __ASSEMBLY__
 extern uint32_t hyp_traps_vector[8];
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 10/46] xen: arm64: TLB flushes
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (8 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 09/46] xen: arm: refactor co-pro and sysreg reg handling Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:00   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 11/46] xen: arm64: PTE handling Ian Campbell
                   ` (37 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: remove comment wondering if they should be inner-shareable flushes, they
    shouldn't for now.

    combine with other patch titled "TLB flushes." which followed in a couple
    of patches time.

    remove flush_guest_tlb(), nothing was calling it

    remove stray reference to flush branch predictor, which isn't necessary on
    64-bit.
---
 xen/include/asm-arm/arm32/flushtlb.h |   34 +++++++++++++++++
 xen/include/asm-arm/arm32/page.h     |   69 ++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/flushtlb.h |   34 +++++++++++++++++
 xen/include/asm-arm/arm64/page.h     |   67 +++++++++++++++++++++++++++++++++
 xen/include/asm-arm/flushtlb.h       |   34 +++++------------
 xen/include/asm-arm/page.h           |   67 ++++-----------------------------
 6 files changed, 222 insertions(+), 83 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/flushtlb.h
 create mode 100644 xen/include/asm-arm/arm32/page.h
 create mode 100644 xen/include/asm-arm/arm64/flushtlb.h
 create mode 100644 xen/include/asm-arm/arm64/page.h

diff --git a/xen/include/asm-arm/arm32/flushtlb.h b/xen/include/asm-arm/arm32/flushtlb.h
new file mode 100644
index 0000000..3c2d5b6
--- /dev/null
+++ b/xen/include/asm-arm/arm32/flushtlb.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__
+#define __ASM_ARM_ARM32_FLUSHTLB_H__
+
+/* Flush local TLBs, current VMID only */
+static inline void flush_tlb_local(void)
+{
+    dsb();
+
+    WRITE_CP32((uint32_t) 0, TLBIALLIS);
+
+    dsb();
+    isb();
+}
+
+/* Flush local TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_tlb_all_local(void)
+{
+    dsb();
+
+    WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
+
+    dsb();
+    isb();
+}
+
+#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
new file mode 100644
index 0000000..073b8d1
--- /dev/null
+++ b/xen/include/asm-arm/arm32/page.h
@@ -0,0 +1,69 @@
+#ifndef __ARM_ARM32_PAGE_H__
+#define __ARM_ARM32_PAGE_H__
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Flush all hypervisor mappings from the TLB and branch predictor.
+ * This is needed after changing Xen code mappings.
+ *
+ * The caller needs to issue the necessary DSB and D-cache flushes
+ * before calling flush_xen_text_tlb.
+ */
+static inline void flush_xen_text_tlb(void)
+{
+    register unsigned long r0 asm ("r0");
+    asm volatile (
+        "isb;"                        /* Ensure synchronization with previous changes to text */
+        STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
+        STORE_CP32(0, ICIALLU)        /* Flush I-cache */
+        STORE_CP32(0, BPIALL)         /* Flush branch predictor */
+        "dsb;"                        /* Ensure completion of TLB+BP flush */
+        "isb;"
+        : : "r" (r0) /*dummy*/ : "memory");
+}
+
+/*
+ * Flush all hypervisor mappings from the data TLB. This is not
+ * sufficient when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb(void)
+{
+    register unsigned long r0 asm ("r0");
+    asm volatile("dsb;" /* Ensure preceding are visible */
+                 STORE_CP32(0, TLBIALLH)
+                 "dsb;" /* Ensure completion of the TLB flush */
+                 "isb;"
+                 : : "r" (r0) /* dummy */: "memory");
+}
+
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB. This is not
+ * sufficient when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
+{
+    unsigned long end = va + size;
+    dsb(); /* Ensure preceding are visible */
+    while ( va < end ) {
+        asm volatile(STORE_CP32(0, TLBIMVAH)
+                     : : "r" (va) : "memory");
+        va += PAGE_SIZE;
+    }
+    dsb(); /* Ensure completion of the TLB flush */
+    isb();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARM_ARM32_PAGE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/flushtlb.h b/xen/include/asm-arm/arm64/flushtlb.h
new file mode 100644
index 0000000..ca74fe3
--- /dev/null
+++ b/xen/include/asm-arm/arm64/flushtlb.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_ARM_ARM64_FLUSHTLB_H__
+#define __ASM_ARM_ARM64_FLUSHTLB_H__
+
+/* Flush local TLBs, current VMID only */
+static inline void flush_tlb_local(void)
+{
+    asm volatile(
+        "dsb sy;"
+        "tlbi vmalle1;"
+        "dsb sy;"
+        "isb;"
+        : : : "memory");
+}
+
+/* Flush local TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_tlb_all_local(void)
+{
+    asm volatile(
+        "dsb sy;"
+        "tlbi alle1;"
+        "dsb sy;"
+        "isb;"
+        : : : "memory");
+}
+
+#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
new file mode 100644
index 0000000..636fb63
--- /dev/null
+++ b/xen/include/asm-arm/arm64/page.h
@@ -0,0 +1,67 @@
+#ifndef __ARM_ARM64_PAGE_H__
+#define __ARM_ARM64_PAGE_H__
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Flush all hypervisor mappings from the TLB
+ * This is needed after changing Xen code mappings.
+ *
+ * The caller needs to issue the necessary DSB and D-cache flushes
+ * before calling flush_xen_text_tlb.
+ */
+static inline void flush_xen_text_tlb(void)
+{
+    asm volatile (
+        "isb;"       /* Ensure synchronization with previous changes to text */
+        "tlbi   alle2;"                 /* Flush hypervisor TLB */
+        "ic     iallu;"                 /* Flush I-cache */
+        "dsb    sy;"                    /* Ensure completion of TLB flush */
+        "isb;"
+        : : : "memory");
+}
+
+/*
+ * Flush all hypervisor mappings from the data TLB. This is not
+ * sufficient when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb(void)
+{
+    asm volatile (
+        "dsb    sy;"                    /* Ensure visibility of PTE writes */
+        "tlbi   alle2;"                 /* Flush hypervisor TLB */
+        "dsb    sy;"                    /* Ensure completion of TLB flush */
+        "isb;"
+        : : : "memory");
+}
+
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB. This is not
+ * sufficient when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
+{
+    unsigned long end = va + size;
+    dsb(); /* Ensure preceding are visible */
+    while ( va < end ) {
+        asm volatile("tlbi vae2, %0;"
+                     : : "r" (va>>PAGE_SHIFT) : "memory");
+        va += PAGE_SIZE;
+    }
+    dsb(); /* Ensure completion of the TLB flush */
+    isb();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARM_ARM64_PAGE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/flushtlb.h b/xen/include/asm-arm/flushtlb.h
index 210abfa..e7ce27b 100644
--- a/xen/include/asm-arm/flushtlb.h
+++ b/xen/include/asm-arm/flushtlb.h
@@ -1,5 +1,5 @@
-#ifndef __FLUSHTLB_H__
-#define __FLUSHTLB_H__
+#ifndef __ASM_ARM_FLUSHTLB_H__
+#define __ASM_ARM_FLUSHTLB_H__
 
 #include <xen/cpumask.h>
 
@@ -14,32 +14,18 @@ do {                                                                    \
 
 #define tlbflush_current_time()                 (0)
 
-/* Flush local TLBs, current VMID only */
-static inline void flush_tlb_local(void)
-{
-    dsb();
-
-    WRITE_CP32((uint32_t) 0, TLBIALLIS);
-
-    dsb();
-    isb();
-}
-
-/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_tlb_all_local(void)
-{
-    dsb();
-
-    WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
-
-    dsb();
-    isb();
-}
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/flushtlb.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/flushtlb.h>
+#else
+# error "unknown ARM variant"
+#endif
 
 /* Flush specified CPUs' TLBs */
 void flush_tlb_mask(const cpumask_t *mask);
 
-#endif /* __FLUSHTLB_H__ */
+#endif /* __ASM_ARM_FLUSHTLB_H__ */
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index e0a636f..709a508 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -250,6 +250,14 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
         : : "r" (pte.bits), "r" (p) : "memory");
 }
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/page.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/page.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 /* Architectural minimum cacheline size is 4 32-bit words. */
 #define MIN_CACHELINE_BYTES 16
 /* Actual cacheline size on the boot CPU. */
@@ -282,65 +290,6 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
             : : "r" (_p), "m" (*_p));                                   \
 } while (0)
 
-
-/*
- * Flush all hypervisor mappings from the TLB and branch predictor.
- * This is needed after changing Xen code mappings.
- *
- * The caller needs to issue the necessary DSB and D-cache flushes
- * before calling flush_xen_text_tlb.
- */
-static inline void flush_xen_text_tlb(void)
-{
-    register unsigned long r0 asm ("r0");
-    asm volatile (
-        "isb;"                        /* Ensure synchronization with previous changes to text */
-        STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
-        STORE_CP32(0, ICIALLU)        /* Flush I-cache */
-        STORE_CP32(0, BPIALL)         /* Flush branch predictor */
-        "dsb;"                        /* Ensure completion of TLB+BP flush */
-        "isb;"
-        : : "r" (r0) /*dummy*/ : "memory");
-}
-
-/*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
- */
-static inline void flush_xen_data_tlb(void)
-{
-    register unsigned long r0 asm ("r0");
-    asm volatile("dsb;" /* Ensure preceding are visible */
-                 STORE_CP32(0, TLBIALLH)
-                 "dsb;" /* Ensure completion of the TLB flush */
-                 "isb;"
-                 : : "r" (r0) /* dummy */: "memory");
-}
-
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
-{
-    unsigned long end = va + size;
-    dsb(); /* Ensure preceding are visible */
-    while ( va < end ) {
-        asm volatile(STORE_CP32(0, TLBIMVAH)
-                     : : "r" (va) : "memory");
-        va += PAGE_SIZE;
-    }
-    dsb(); /* Ensure completion of the TLB flush */
-    isb();
-}
-
-/* Flush all non-hypervisor mappings from the TLB */
-static inline void flush_guest_tlb(void)
-{
-    register unsigned long r0 asm ("r0");
-    WRITE_CP32(r0 /* dummy */, TLBIALLNSNH);
-}
-
 /* Print a walk of an arbitrary page table */
 void dump_pt_walk(lpae_t *table, paddr_t addr);
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 11/46] xen: arm64: PTE handling
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (9 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 10/46] xen: arm64: TLB flushes Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 12/46] xen: arm64: dcache flush Ian Campbell
                   ` (36 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/arm32/page.h |   20 ++++++++++++++++++++
 xen/include/asm-arm/arm64/page.h |   15 +++++++++++++++
 xen/include/asm-arm/page.h       |   20 --------------------
 3 files changed, 35 insertions(+), 20 deletions(-)

diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index 073b8d1..a384f04 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -3,6 +3,26 @@
 
 #ifndef __ASSEMBLY__
 
+/* Write a pagetable entry.
+ *
+ * If the table entry is changing a text mapping, it is responsibility
+ * of the caller to issue an ISB after write_pte.
+ */
+static inline void write_pte(lpae_t *p, lpae_t pte)
+{
+    asm volatile (
+        /* Ensure any writes have completed with the old mappings. */
+        "dsb;"
+        /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
+        "strd %0, %H0, [%1];"
+        "dsb;"
+        /* Push this cacheline to the PoC so the rest of the system sees it. */
+        STORE_CP32(1, DCCMVAC)
+        /* Ensure that the data flush is completed before proceeding */
+        "dsb;"
+        : : "r" (pte.bits), "r" (p) : "memory");
+}
+
 /*
  * Flush all hypervisor mappings from the TLB and branch predictor.
  * This is needed after changing Xen code mappings.
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 636fb63..99b7296 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -3,6 +3,21 @@
 
 #ifndef __ASSEMBLY__
 
+/* Write a pagetable entry */
+static inline void write_pte(lpae_t *p, lpae_t pte)
+{
+    asm volatile (
+        /* Ensure any writes have completed with the old mappings. */
+        "dsb sy;"
+        "str %0, [%1];"         /* Write the entry */
+        "dsb sy;"
+        /* Push this cacheline to the PoC so the rest of the system sees it. */
+        "dc cvac, %1;"
+        /* Ensure that the data flush is completed before proceeding */
+        "dsb sy;"
+        : : "r" (pte.bits), "r" (p) : "memory");
+}
+
 /*
  * Flush all hypervisor mappings from the TLB
  * This is needed after changing Xen code mappings.
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 709a508..4e245a9 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -230,26 +230,6 @@ static inline lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr)
     return e;
 }
 
-/* Write a pagetable entry.
- *
- * If the table entry is changing a text mapping, it is responsibility
- * of the caller to issue an ISB after write_pte.
- */
-static inline void write_pte(lpae_t *p, lpae_t pte)
-{
-    asm volatile (
-        /* Ensure any writes have completed with the old mappings. */
-        "dsb;"
-        /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
-        "strd %0, %H0, [%1];"
-        "dsb;"
-        /* Push this cacheline to the PoC so the rest of the system sees it. */
-        STORE_CP32(1, DCCMVAC)
-        /* Ensure that the data flush is completed before proceeding */
-        "dsb;"
-        : : "r" (pte.bits), "r" (p) : "memory");
-}
-
 #if defined(CONFIG_ARM_32)
 # include <asm/arm32/page.h>
 #elif defined(CONFIG_ARM_64)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 12/46] xen: arm64: dcache flush
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (10 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 11/46] xen: arm64: PTE handling Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 13/46] xen: arm64: address translation Ian Campbell
                   ` (35 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Use "dsb sy" instead of bare "dsb", they mean the same on 32-bit but only the
former is valid on 64-bit.

Abstract the actual flush operation into a macro.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: revert to inline asm
---
 xen/include/asm-arm/arm32/page.h |    3 +++
 xen/include/asm-arm/arm64/page.h |    3 +++
 xen/include/asm-arm/page.h       |    8 ++++----
 3 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index a384f04..2b15c22 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -23,6 +23,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
         : : "r" (pte.bits), "r" (p) : "memory");
 }
 
+/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
+#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+
 /*
  * Flush all hypervisor mappings from the TLB and branch predictor.
  * This is needed after changing Xen code mappings.
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 99b7296..4911ba3 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -18,6 +18,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
         : : "r" (pte.bits), "r" (p) : "memory");
 }
 
+/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
+#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
+
 /*
  * Flush all hypervisor mappings from the TLB
  * This is needed after changing Xen code mappings.
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 4e245a9..b89238b 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -251,7 +251,7 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
     void *end;
     dsb();           /* So the CPU issues all writes to the range */
     for ( end = p + size; p < end; p += cacheline_bytes )
-        WRITE_CP32((uint32_t) p, DCCMVAC);
+        asm volatile (__flush_xen_dcache_one(0) : : "r" (p));
     dsb();           /* So we know the flushes happen before continuing */
 }
 
@@ -264,9 +264,9 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
         flush_xen_dcache_va_range(_p, sizeof(x));                       \
     else                                                                \
         asm volatile (                                                  \
-            "dsb;"   /* Finish all earlier writes */                    \
-            STORE_CP32(0, DCCMVAC)                                      \
-            "dsb;"   /* Finish flush before continuing */               \
+            "dsb sy;"   /* Finish all earlier writes */                 \
+            __flush_xen_dcache_one(0)                                   \
+            "dsb sy;"   /* Finish flush before continuing */            \
             : : "r" (_p), "m" (*_p));                                   \
 } while (0)
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 13/46] xen: arm64: address translation
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (11 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 12/46] xen: arm64: dcache flush Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events Ian Campbell
                   ` (34 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
I'm torn between unsigned long and vaddr_t...
---
 xen/include/asm-arm/arm32/page.h |   34 ++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/page.h |   35 +++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/page.h       |   38 ++------------------------------------
 xen/include/asm-arm/types.h      |    4 ++++
 4 files changed, 75 insertions(+), 36 deletions(-)

diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index 2b15c22..d295316 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -77,6 +77,40 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long s
     isb();
 }
 
+/* Ask the MMU to translate a VA for us */
+static inline uint64_t __va_to_par(vaddr_t va)
+{
+    uint64_t par, tmp;
+    tmp = READ_CP64(PAR);
+    WRITE_CP32(va, ATS1HR);
+    isb(); /* Ensure result is available. */
+    par = READ_CP64(PAR);
+    WRITE_CP64(tmp, PAR);
+    return par;
+}
+
+/* Ask the MMU to translate a Guest VA for us */
+static inline uint64_t gva_to_ma_par(vaddr_t va)
+{
+    uint64_t par, tmp;
+    tmp = READ_CP64(PAR);
+    WRITE_CP32(va, ATS12NSOPR);
+    isb(); /* Ensure result is available. */
+    par = READ_CP64(PAR);
+    WRITE_CP64(tmp, PAR);
+    return par;
+}
+static inline uint64_t gva_to_ipa_par(vaddr_t va)
+{
+    uint64_t par, tmp;
+    tmp = READ_CP64(PAR);
+    WRITE_CP32(va, ATS1CPR);
+    isb(); /* Ensure result is available. */
+    par = READ_CP64(PAR);
+    WRITE_CP64(tmp, PAR);
+    return par;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ARM_ARM32_PAGE_H__ */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 4911ba3..9bf41fb 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -70,6 +70,41 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long s
     isb();
 }
 
+/* Ask the MMU to translate a VA for us */
+static inline uint64_t __va_to_par(vaddr_t va)
+{
+    uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
+
+    asm volatile ("at s1e2r, %0;" : : "r" (va));
+    isb();
+    par = READ_SYSREG64(PAR_EL1);
+    WRITE_SYSREG64(tmp, PAR_EL1);
+    return par;
+}
+
+/* Ask the MMU to translate a Guest VA for us */
+static inline uint64_t gva_to_ma_par(vaddr_t va)
+{
+    uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
+
+    asm volatile ("at s12e1r, %0;" : : "r" (va));
+    isb();
+    par = READ_SYSREG64(PAR_EL1);
+    WRITE_SYSREG64(tmp, PAR_EL1);
+    return par;
+}
+
+static inline uint64_t gva_to_ipa_par(vaddr_t va)
+{
+    uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
+
+    asm volatile ("at s1e1r, %0;" : : "r" (va));
+    isb();
+    par = READ_SYSREG64(PAR_EL1);
+    WRITE_SYSREG64(tmp, PAR_EL1);
+    return par;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ARM_ARM64_PAGE_H__ */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index b89238b..ad52567 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -278,19 +278,7 @@ extern void dump_hyp_walk(uint32_t addr);
 /* Print a walk of the p2m for a domain for a physical address. */
 extern void dump_p2m_lookup(struct domain *d, paddr_t addr);
 
-/* Ask the MMU to translate a VA for us */
-static inline uint64_t __va_to_par(uint32_t va)
-{
-    uint64_t par, tmp;
-    tmp = READ_CP64(PAR);
-    WRITE_CP32(va, ATS1HR);
-    isb(); /* Ensure result is available. */
-    par = READ_CP64(PAR);
-    WRITE_CP64(tmp, PAR);
-    return par;
-}
-
-static inline uint64_t va_to_par(uint32_t va)
+static inline uint64_t va_to_par(vaddr_t va)
 {
     uint64_t par = __va_to_par(va);
     /* It is not OK to call this with an invalid VA */
@@ -302,29 +290,7 @@ static inline uint64_t va_to_par(uint32_t va)
     return par;
 }
 
-/* Ask the MMU to translate a Guest VA for us */
-static inline uint64_t gva_to_ma_par(uint32_t va)
-{
-    uint64_t par, tmp;
-    tmp = READ_CP64(PAR);
-    WRITE_CP32(va, ATS12NSOPR);
-    isb(); /* Ensure result is available. */
-    par = READ_CP64(PAR);
-    WRITE_CP64(tmp, PAR);
-    return par;
-}
-static inline uint64_t gva_to_ipa_par(uint32_t va)
-{
-    uint64_t par, tmp;
-    tmp = READ_CP64(PAR);
-    WRITE_CP32(va, ATS1CPR);
-    isb(); /* Ensure result is available. */
-    par = READ_CP64(PAR);
-    WRITE_CP64(tmp, PAR);
-    return par;
-}
-
-static inline int gva_to_ipa(uint32_t va, paddr_t *paddr)
+static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
 {
     uint64_t par = gva_to_ipa_par(va);
     if ( par & PAR_F )
diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
index 07f7898..d3e16d8 100644
--- a/xen/include/asm-arm/types.h
+++ b/xen/include/asm-arm/types.h
@@ -36,12 +36,16 @@ typedef unsigned int u32;
 #if defined(CONFIG_ARM_32)
 typedef signed long long s64;
 typedef unsigned long long u64;
+typedef u32 vaddr_t;
+#define PRIvaddr PRIx32
 typedef u64 paddr_t;
 #define INVALID_PADDR (~0ULL)
 #define PRIpaddr "016llx"
 #elif defined (CONFIG_ARM_64)
 typedef signed long s64;
 typedef unsigned long u64;
+typedef u64 vaddr_t;
+#define PRIvaddr PRIx64
 typedef u64 paddr_t;
 #define INVALID_PADDR (~0UL)
 #define PRIpaddr "016lx"
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (12 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 13/46] xen: arm64: address translation Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:01   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 15/46] xen: arm64: xchg and cmpxchg Ian Campbell
                   ` (33 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/include/asm-arm/arm32/system.h |   29 +++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/system.h |   28 ++++++++++++++++++++++++++++
 xen/include/asm-arm/system.h       |   20 ++++++++------------
 3 files changed, 65 insertions(+), 12 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/system.h
 create mode 100644 xen/include/asm-arm/arm64/system.h

diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h
new file mode 100644
index 0000000..91098a0
--- /dev/null
+++ b/xen/include/asm-arm/arm32/system.h
@@ -0,0 +1,29 @@
+/* Portions taken from Linux arch arm */
+#ifndef __ASM_ARM32_SYSTEM_H
+#define __ASM_ARM32_SYSTEM_H
+
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           dsb()
+#define wmb()           mb()
+
+#define smp_mb()        dmb()
+#define smp_rmb()       dmb()
+#define smp_wmb()       dmb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h
new file mode 100644
index 0000000..33c031d
--- /dev/null
+++ b/xen/include/asm-arm/arm64/system.h
@@ -0,0 +1,28 @@
+/* Portions taken from Linux arch arm64 */
+#ifndef __ASM_ARM64_SYSTEM_H
+#define __ASM_ARM64_SYSTEM_H
+
+#define sev()           asm volatile("sev" : : : "memory")
+#define wfe()           asm volatile("wfe" : : : "memory")
+#define wfi()           asm volatile("wfi" : : : "memory")
+
+#define isb()           asm volatile("isb" : : : "memory")
+#define dsb()           asm volatile("dsb sy" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           asm volatile("dsb ld" : : : "memory")
+#define wmb()           asm volatile("dsb st" : : : "memory")
+
+#define smp_mb()        asm volatile("dmb ish" : : : "memory")
+#define smp_rmb()       asm volatile("dmb ishld" : : : "memory")
+#define smp_wmb()       asm volatile("dmb ishst" : : : "memory")
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index 216ef1f..8b4c97a 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -11,18 +11,6 @@
 #define xchg(ptr,x) \
         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
-
-#define mb()            dsb()
-#define rmb()           dsb()
-#define wmb()           mb()
-
-#define smp_mb()        dmb()
-#define smp_rmb()       dmb()
-#define smp_wmb()       dmb()
-
 /*
  * This is used to ensure the compiler did actually allocate the register we
  * asked it for some inline assembly sequences.  Apparently we can't trust
@@ -33,6 +21,14 @@
  */
 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/system.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/system.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 extern void __bad_xchg(volatile void *, int);
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 15/46] xen: arm64: xchg and cmpxchg
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (13 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 16/46] xen: arm64: interrupt/abort mask/unmask Ian Campbell
                   ` (32 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/arm32/system.h |  115 ++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/system.h |  155 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/system.h       |  114 --------------------------
 3 files changed, 270 insertions(+), 114 deletions(-)

diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h
index 91098a0..9dbe8e3 100644
--- a/xen/include/asm-arm/arm32/system.h
+++ b/xen/include/asm-arm/arm32/system.h
@@ -18,6 +18,121 @@
 #define smp_rmb()       dmb()
 #define smp_wmb()       dmb()
 
+extern void __bad_xchg(volatile void *, int);
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+        unsigned long ret;
+        unsigned int tmp;
+
+        smp_mb();
+
+        switch (size) {
+        case 1:
+                asm volatile("@ __xchg1\n"
+                "1:     ldrexb  %0, [%3]\n"
+                "       strexb  %1, %2, [%3]\n"
+                "       teq     %1, #0\n"
+                "       bne     1b"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        case 4:
+                asm volatile("@ __xchg4\n"
+                "1:     ldrex   %0, [%3]\n"
+                "       strex   %1, %2, [%3]\n"
+                "       teq     %1, #0\n"
+                "       bne     1b"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        default:
+                __bad_xchg(ptr, size), ret = 0;
+                break;
+        }
+        smp_mb();
+
+        return ret;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+static always_inline unsigned long __cmpxchg(
+    volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+    unsigned long /*long*/ oldval, res;
+
+    switch (size) {
+    case 1:
+        do {
+            asm volatile("@ __cmpxchg1\n"
+                         "       ldrexb  %1, [%2]\n"
+                         "       mov     %0, #0\n"
+                         "       teq     %1, %3\n"
+                         "       strexbeq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+    case 2:
+        do {
+            asm volatile("@ __cmpxchg2\n"
+                         "       ldrexh  %1, [%2]\n"
+                         "       mov     %0, #0\n"
+                         "       teq     %1, %3\n"
+                         "       strexheq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+    case 4:
+        do {
+            asm volatile("@ __cmpxchg4\n"
+                         "       ldrex   %1, [%2]\n"
+                         "       mov     %0, #0\n"
+                         "       teq     %1, %3\n"
+                         "       strexeq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+#if 0
+    case 8:
+        do {
+            asm volatile("@ __cmpxchg8\n"
+                         "       ldrexd   %1, [%2]\n"
+                         "       mov      %0, #0\n"
+                         "       teq      %1, %3\n"
+                         "       strexdeq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+#endif
+    default:
+        __bad_cmpxchg(ptr, size);
+        oldval = 0;
+    }
+
+    return oldval;
+}
+
+#define cmpxchg(ptr,o,n)                                                \
+    ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),            \
+                                   (unsigned long)(n),sizeof(*(ptr))))
+
 #endif
 /*
  * Local variables:
diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h
index 33c031d..6fd26f8 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -17,6 +17,161 @@
 #define smp_rmb()       asm volatile("dmb ishld" : : : "memory")
 #define smp_wmb()       asm volatile("dmb ishst" : : : "memory")
 
+
+extern void __bad_xchg(volatile void *, int);
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+        unsigned long ret, tmp;
+
+        switch (size) {
+        case 1:
+                asm volatile("//        __xchg1\n"
+                "1:     ldaxrb  %w0, [%3]\n"
+                "       stlxrb  %w1, %w2, [%3]\n"
+                "       cbnz    %w1, 1b\n"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        case 2:
+                asm volatile("//        __xchg2\n"
+                "1:     ldaxrh  %w0, [%3]\n"
+                "       stlxrh  %w1, %w2, [%3]\n"
+                "       cbnz    %w1, 1b\n"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        case 4:
+                asm volatile("//        __xchg4\n"
+                "1:     ldaxr   %w0, [%3]\n"
+                "       stlxr   %w1, %w2, [%3]\n"
+                "       cbnz    %w1, 1b\n"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        case 8:
+                asm volatile("//        __xchg8\n"
+                "1:     ldaxr   %0, [%3]\n"
+                "       stlxr   %w1, %2, [%3]\n"
+                "       cbnz    %w1, 1b\n"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        default:
+                __bad_xchg(ptr, size), ret = 0;
+                break;
+        }
+
+        return ret;
+}
+
+#define xchg(ptr,x) \
+        ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                      unsigned long new, int size)
+{
+        unsigned long oldval = 0, res;
+
+        switch (size) {
+        case 1:
+                do {
+                        asm volatile("// __cmpxchg1\n"
+                        "       ldxrb   %w1, [%2]\n"
+                        "       mov     %w0, #0\n"
+                        "       cmp     %w1, %w3\n"
+                        "       b.ne    1f\n"
+                        "       stxrb   %w0, %w4, [%2]\n"
+                        "1:\n"
+                                : "=&r" (res), "=&r" (oldval)
+                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "cc");
+                } while (res);
+                break;
+
+        case 2:
+                do {
+                        asm volatile("// __cmpxchg2\n"
+                        "       ldxrh   %w1, [%2]\n"
+                        "       mov     %w0, #0\n"
+                        "       cmp     %w1, %w3\n"
+                        "       b.ne    1f\n"
+                        "       stxrh   %w0, %w4, [%2]\n"
+                        "1:\n"
+                                : "=&r" (res), "=&r" (oldval)
+                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "memory", "cc");
+                } while (res);
+                break;
+
+        case 4:
+                do {
+                        asm volatile("// __cmpxchg4\n"
+                        "       ldxr    %w1, [%2]\n"
+                        "       mov     %w0, #0\n"
+                        "       cmp     %w1, %w3\n"
+                        "       b.ne    1f\n"
+                        "       stxr    %w0, %w4, [%2]\n"
+                        "1:\n"
+                                : "=&r" (res), "=&r" (oldval)
+                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "cc");
+                } while (res);
+                break;
+
+        case 8:
+                do {
+                        asm volatile("// __cmpxchg8\n"
+                        "       ldxr    %1, [%2]\n"
+                        "       mov     %w0, #0\n"
+                        "       cmp     %1, %3\n"
+                        "       b.ne    1f\n"
+                        "       stxr    %w0, %4, [%2]\n"
+                        "1:\n"
+                                : "=&r" (res), "=&r" (oldval)
+                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "cc");
+                } while (res);
+                break;
+
+        default:
+		__bad_cmpxchg(ptr, size);
+		oldval = 0;
+        }
+
+        return oldval;
+}
+
+static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+                                         unsigned long new, int size)
+{
+        unsigned long ret;
+
+        smp_mb();
+        ret = __cmpxchg(ptr, old, new, size);
+        smp_mb();
+
+        return ret;
+}
+
+#define cmpxchg(ptr,o,n)                                                \
+        ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
+                                          (unsigned long)(o),           \
+                                          (unsigned long)(n),           \
+                                          sizeof(*(ptr))))
+
+#define cmpxchg_local(ptr,o,n)                                          \
+        ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
+                                       (unsigned long)(o),              \
+                                       (unsigned long)(n),              \
+                                       sizeof(*(ptr))))
+
 #endif
 /*
  * Local variables:
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index 8b4c97a..e4cb99c 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -29,120 +29,6 @@
 # error "unknown ARM variant"
 #endif
 
-extern void __bad_xchg(volatile void *, int);
-
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-{
-        unsigned long ret;
-        unsigned int tmp;
-
-        smp_mb();
-
-        switch (size) {
-        case 1:
-                asm volatile("@ __xchg1\n"
-                "1:     ldrexb  %0, [%3]\n"
-                "       strexb  %1, %2, [%3]\n"
-                "       teq     %1, #0\n"
-                "       bne     1b"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
-                break;
-        case 4:
-                asm volatile("@ __xchg4\n"
-                "1:     ldrex   %0, [%3]\n"
-                "       strex   %1, %2, [%3]\n"
-                "       teq     %1, #0\n"
-                "       bne     1b"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
-                break;
-        default:
-                __bad_xchg(ptr, size), ret = 0;
-                break;
-        }
-        smp_mb();
-
-        return ret;
-}
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-extern void __bad_cmpxchg(volatile void *ptr, int size);
-
-static always_inline unsigned long __cmpxchg(
-    volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
-    unsigned long /*long*/ oldval, res;
-
-    switch (size) {
-    case 1:
-        do {
-            asm volatile("@ __cmpxchg1\n"
-                         "       ldrexb  %1, [%2]\n"
-                         "       mov     %0, #0\n"
-                         "       teq     %1, %3\n"
-                         "       strexbeq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-    case 2:
-        do {
-            asm volatile("@ __cmpxchg2\n"
-                         "       ldrexh  %1, [%2]\n"
-                         "       mov     %0, #0\n"
-                         "       teq     %1, %3\n"
-                         "       strexheq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-    case 4:
-        do {
-            asm volatile("@ __cmpxchg4\n"
-                         "       ldrex   %1, [%2]\n"
-                         "       mov     %0, #0\n"
-                         "       teq     %1, %3\n"
-                         "       strexeq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-#if 0
-    case 8:
-        do {
-            asm volatile("@ __cmpxchg8\n"
-                         "       ldrexd   %1, [%2]\n"
-                         "       mov      %0, #0\n"
-                         "       teq      %1, %3\n"
-                         "       strexdeq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-#endif
-    default:
-        __bad_cmpxchg(ptr, size);
-        oldval = 0;
-    }
-
-    return oldval;
-}
-#define cmpxchg(ptr,o,n)                                                \
-    ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),            \
-                                   (unsigned long)(n),sizeof(*(ptr))))
-
 #define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" )
 #define local_irq_enable()  asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" )
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 16/46] xen: arm64: interrupt/abort mask/unmask
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (14 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 15/46] xen: arm64: xchg and cmpxchg Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 17/46] xen: arm64: div64 Ian Campbell
                   ` (31 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/arm32/system.h |   44 +++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/system.h |   54 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/system.h       |   44 -----------------------------
 3 files changed, 98 insertions(+), 44 deletions(-)

diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h
index 9dbe8e3..ac8fcb0 100644
--- a/xen/include/asm-arm/arm32/system.h
+++ b/xen/include/asm-arm/arm32/system.h
@@ -133,6 +133,50 @@ static always_inline unsigned long __cmpxchg(
     ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),            \
                                    (unsigned long)(n),sizeof(*(ptr))))
 
+#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" )
+#define local_irq_enable()  asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" )
+
+#define local_save_flags(x)                                      \
+({                                                               \
+    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
+    asm volatile ( "mrs %0, cpsr     @ local_save_flags\n"       \
+                  : "=r" (x) :: "memory", "cc" );                \
+})
+#define local_irq_save(x)                                        \
+({                                                               \
+    local_save_flags(x);                                         \
+    local_irq_disable();                                         \
+})
+#define local_irq_restore(x)                                     \
+({                                                               \
+    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
+    asm volatile (                                               \
+            "msr     cpsr_c, %0      @ local_irq_restore\n"      \
+            :                                                    \
+            : "r" (flags)                                        \
+            : "memory", "cc");                                   \
+})
+
+static inline int local_irq_is_enabled(void)
+{
+    unsigned long flags;
+    local_save_flags(flags);
+    return !(flags & PSR_IRQ_MASK);
+}
+
+#define local_fiq_enable()  __asm__("cpsie f   @ __stf\n" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f   @ __clf\n" : : : "memory", "cc")
+
+#define local_abort_enable() __asm__("cpsie a  @ __sta\n" : : : "memory", "cc")
+#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc")
+
+static inline int local_fiq_is_enabled(void)
+{
+    unsigned long flags;
+    local_save_flags(flags);
+    return !(flags & PSR_FIQ_MASK);
+}
+
 #endif
 /*
  * Local variables:
diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h
index 6fd26f8..cc7b959 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -172,6 +172,60 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
                                        (unsigned long)(n),              \
                                        sizeof(*(ptr))))
 
+/* Uses uimm4 as a bitmask to select the clearing of one or more of
+ * the DAIF exception mask bits:
+ * bit 3 selects the D mask,
+ * bit 2 the A mask,
+ * bit 1 the I mask and
+ * bit 0 the F mask.
+*/
+
+#define local_fiq_disable()   asm volatile ( "msr daifset, #1\n" ::: "memory" )
+#define local_fiq_enable()    asm volatile ( "msr daifclr, #1\n" ::: "memory" )
+#define local_irq_disable()   asm volatile ( "msr daifset, #2\n" ::: "memory" )
+#define local_irq_enable()    asm volatile ( "msr daifclr, #2\n" ::: "memory" )
+#define local_abort_disable() asm volatile ( "msr daifset, #4\n" ::: "memory" )
+#define local_abort_enable()  asm volatile ( "msr daifclr, #4\n" ::: "memory" )
+
+#define local_save_flags(x)                                      \
+({                                                               \
+    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
+    asm volatile(                                                \
+        "mrs    %0, daif    // local_save_flags\n"               \
+                : "=r" (x)                                       \
+                :                                                \
+                : "memory");                                     \
+})
+
+#define local_irq_save(x)                                        \
+({                                                               \
+    local_save_flags(x);                                         \
+    local_irq_disable();                                         \
+})
+#define local_irq_restore(x)                                     \
+({                                                               \
+    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
+    asm volatile (                                               \
+        "msr    daif, %0                // local_irq_restore"    \
+        :                                                        \
+        : "r" (flags)                                            \
+        : "memory");                                             \
+})
+
+static inline int local_irq_is_enabled(void)
+{
+    unsigned long flags;
+    local_save_flags(flags);
+    return !(flags & PSR_IRQ_MASK);
+}
+
+static inline int local_fiq_is_enabled(void)
+{
+    unsigned long flags;
+    local_save_flags(flags);
+    return !(flags & PSR_FIQ_MASK);
+}
+
 #endif
 /*
  * Local variables:
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index e4cb99c..a26936b 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -29,50 +29,6 @@
 # error "unknown ARM variant"
 #endif
 
-#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" )
-#define local_irq_enable()  asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" )
-
-#define local_save_flags(x)                                      \
-({                                                               \
-    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
-    asm volatile ( "mrs %0, cpsr     @ local_save_flags\n"       \
-                  : "=r" (x) :: "memory", "cc" );                \
-})
-#define local_irq_save(x)                                        \
-({                                                               \
-    local_save_flags(x);                                         \
-    local_irq_disable();                                         \
-})
-#define local_irq_restore(x)                                     \
-({                                                               \
-    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
-    asm volatile (                                               \
-            "msr     cpsr_c, %0      @ local_irq_restore\n"      \
-            :                                                    \
-            : "r" (flags)                                        \
-            : "memory", "cc");                                   \
-})
-
-static inline int local_irq_is_enabled(void)
-{
-    unsigned long flags;
-    local_save_flags(flags);
-    return !(flags & PSR_IRQ_MASK);
-}
-
-#define local_fiq_enable()  __asm__("cpsie f   @ __stf\n" : : : "memory", "cc")
-#define local_fiq_disable() __asm__("cpsid f   @ __clf\n" : : : "memory", "cc")
-
-#define local_abort_enable() __asm__("cpsie a  @ __sta\n" : : : "memory", "cc")
-#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc")
-
-static inline int local_fiq_is_enabled(void)
-{
-    unsigned long flags;
-    local_save_flags(flags);
-    return !!(flags & PSR_FIQ_MASK);
-}
-
 extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next);
 
 #endif
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 17/46] xen: arm64: div64
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (15 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 16/46] xen: arm64: interrupt/abort mask/unmask Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 18/46] xen: arm64: start of day changes to setup.c Ian Campbell
                   ` (30 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/div64.h |   17 ++++++++++++++++-
 1 files changed, 16 insertions(+), 1 deletions(-)

diff --git a/xen/include/asm-arm/div64.h b/xen/include/asm-arm/div64.h
index 7b00808..d5bdc76 100644
--- a/xen/include/asm-arm/div64.h
+++ b/xen/include/asm-arm/div64.h
@@ -21,6 +21,19 @@
  * calling convention for arguments and results (beware).
  */
 
+
+#if BITS_PER_LONG == 64
+
+# define do_div(n,base) ({                                      \
+        uint32_t __base = (base);                               \
+        uint32_t __rem;                                         \
+        __rem = ((uint64_t)(n)) % __base;                       \
+        (n) = ((uint64_t)(n)) / __base;                         \
+        __rem;                                                  \
+ })
+
+#elif BITS_PER_LONG == 32
+
 #ifdef __ARMEB__
 #define __xh "r0"
 #define __xl "r1"
@@ -222,7 +235,9 @@
 	__nr;								\
 })
 
-#endif
+#endif /* GCC version */
+
+#endif /* BITS_PER_LONG */
 
 #endif
 /*
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 18/46] xen: arm64: start of day changes to setup.c
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (16 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 17/46] xen: arm64: div64 Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c Ian Campbell
                   ` (29 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: s/CSSELR_EL1/CCSIDR_EL1
---
 xen/arch/arm/setup.c         |   54 ++++++++++++++++++++++++++++--------------
 xen/include/asm-arm/cpregs.h |   25 +++++++++++++++++++
 2 files changed, 61 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 4e50b2b..c1f06c9 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -56,16 +56,34 @@ static void __init init_idle_domain(void)
 
 static void __init processor_id(void)
 {
-    printk("Processor Features: %08x %08x\n",
-           READ_CP32(ID_PFR0), READ_CP32(ID_PFR0));
-    printk("Debug Features: %08x\n", READ_CP32(ID_DFR0));
-    printk("Auxiliary Features: %08x\n", READ_CP32(ID_AFR0));
-    printk("Memory Model Features: %08x %08x %08x %08x\n",
-           READ_CP32(ID_MMFR0), READ_CP32(ID_MMFR1),
-           READ_CP32(ID_MMFR2), READ_CP32(ID_MMFR3));
-    printk("ISA Features: %08x %08x %08x %08x %08x %08x\n",
-           READ_CP32(ID_ISAR0), READ_CP32(ID_ISAR1), READ_CP32(ID_ISAR2),
-           READ_CP32(ID_ISAR3), READ_CP32(ID_ISAR4), READ_CP32(ID_ISAR5));
+#if defined(CONFIG_ARM_64)
+    printk("64-bit Processor Features: %016"PRIx64" %016"PRIx64"\n",
+           READ_SYSREG64(ID_AA64PFR0_EL1), READ_SYSREG64(ID_AA64PFR1_EL1));
+    printk("64-bit Debug Features: %016"PRIx64" %016"PRIx64"\n",
+           READ_SYSREG64(ID_AA64DFR0_EL1), READ_SYSREG64(ID_AA64DFR1_EL1));
+    printk("64-bit Auxiliary Features: %016"PRIx64" %016"PRIx64"\n",
+           READ_SYSREG64(ID_AA64AFR0_EL1), READ_SYSREG64(ID_AA64AFR1_EL1));
+    printk("64-bit Memory Model Features: %016"PRIx64" %016"PRIx64"\n",
+           READ_SYSREG64(ID_AA64MMFR0_EL1), READ_SYSREG64(ID_AA64MMFR1_EL1));
+    printk("64-bit ISA Features:  %016"PRIx64" %016"PRIx64"\n",
+           READ_SYSREG64(ID_AA64ISAR0_EL1), READ_SYSREG64(ID_AA64ISAR1_EL1));
+#endif
+    /*
+     * On AArch64 these refer to the capabilities when running in
+     * AArch32 mode.
+     */
+    printk("32-bit Processor Features: %08x %08x\n",
+           READ_SYSREG32(ID_PFR0_EL1), READ_SYSREG32(ID_PFR1_EL1));
+    printk("32-bit Debug Features: %08x\n", READ_SYSREG32(ID_DFR0_EL1));
+    printk("32-bit Auxiliary Features: %08x\n", READ_SYSREG32(ID_AFR0_EL1));
+    printk("32-bit Memory Model Features: %08x %08x %08x %08x\n",
+           READ_SYSREG32(ID_MMFR0_EL1), READ_SYSREG32(ID_MMFR1_EL1),
+           READ_SYSREG32(ID_MMFR2_EL1), READ_SYSREG32(ID_MMFR3_EL1));
+    printk("32-bit ISA Features: %08x %08x %08x %08x %08x %08x\n",
+           READ_SYSREG32(ID_ISAR0_EL1), READ_SYSREG32(ID_ISAR1_EL1),
+           READ_SYSREG32(ID_ISAR2_EL1), READ_SYSREG32(ID_ISAR3_EL1),
+           READ_SYSREG32(ID_ISAR4_EL1), READ_SYSREG32(ID_ISAR5_EL1));
+
 }
 
 void __init discard_initial_modules(void)
@@ -250,7 +268,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
 
     domheap_pages = heap_pages - xenheap_pages;
 
-    printk("Xen heap: %lu pages  Dom heap: %lu pages\n", xenheap_pages, domheap_pages);
+    printk("Xen heap: %lu pages  Dom heap: %lu pages\n",
+           xenheap_pages, domheap_pages);
 
     setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);
 
@@ -320,8 +339,8 @@ void __init setup_cache(void)
     uint32_t ccsid;
 
     /* Read the cache size ID register for the level-0 data cache */
-    WRITE_CP32(0, CSSELR);
-    ccsid = READ_CP32(CCSIDR);
+    WRITE_SYSREG32(0, CSSELR_EL1);
+    ccsid = READ_SYSREG32(CCSIDR_EL1);
 
     /* Low 3 bits are log2(cacheline size in words) - 2. */
     cacheline_bytes = 1U << (4 + (ccsid & 0x7));
@@ -368,16 +387,15 @@ void __init start_xen(unsigned long boot_phys_offset,
     setup_mm(fdt_paddr, fdt_size);
 
     /* Setup Hyp vector base */
-    WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
-    printk("Set hyp vector base to %"PRIx32" (expected %p)\n",
-           READ_CP32(HVBAR), hyp_traps_vector);
+    WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2);
+    isb();
 
     /* Setup Stage 2 address translation */
     /* SH0=00, ORGN0=IRGN0=01
      * SL0=01 (Level-1)
      * T0SZ=(1)1000 = -8 (40 bit physical addresses)
      */
-    WRITE_CP32(0x80002558, VTCR); isb();
+    WRITE_SYSREG32(0x80002558, VTCR_EL2); isb();
 
     processor_id();
 
@@ -455,7 +473,7 @@ void __init start_xen(unsigned long boot_phys_offset,
 
     /* Switch on to the dynamically allocated stack for the idle vcpu
      * since the static one we're running on is about to be freed. */
-    memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(), 
+    memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(),
            sizeof(struct cpu_info));
     switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done);
 }
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 7eaa50f..559be75 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -222,6 +222,31 @@
 
 /* CP15 CR15: Implementation Defined Registers */
 
+/* Aliases of AArch64 names for use in common code when building for AArch32 */
+#ifdef CONFIG_ARM_32
+/* Alphabetically... */
+#define CCSIDR_EL1              CCSIDR
+#define CLIDR_EL1               CLIDR
+#define CSSELR_EL1              CSSELR
+#define ID_AFR0_EL1             ID_AFR0
+#define ID_DFR0_EL1             ID_DFR0
+#define ID_ISAR0_EL1            ID_ISAR0
+#define ID_ISAR1_EL1            ID_ISAR1
+#define ID_ISAR2_EL1            ID_ISAR2
+#define ID_ISAR3_EL1            ID_ISAR3
+#define ID_ISAR4_EL1            ID_ISAR4
+#define ID_ISAR5_EL1            ID_ISAR5
+#define ID_MMFR0_EL1            ID_MMFR0
+#define ID_MMFR1_EL1            ID_MMFR1
+#define ID_MMFR2_EL1            ID_MMFR2
+#define ID_MMFR3_EL1            ID_MMFR3
+#define ID_PFR0_EL1             ID_PFR0
+#define ID_PFR1_EL1             ID_PFR1
+#define VBAR_EL2                HVBAR
+#define VTCR_EL2                VTCR
+
+#endif
+
 #endif
 /*
  * Local variables:
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (17 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 18/46] xen: arm64: start of day changes to setup.c Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:04   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 20/46] xen: arm64: add to foreign struct checks Ian Campbell
                   ` (28 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Make *_table_offset return an unsigned int and adjust callers where
    necessary.
    Print "TTBR" instead of "TTBR0_EL2" when it is obvious from the ctxt.
---
 xen/arch/arm/arm32/head.S    |    2 +-
 xen/arch/arm/mm.c            |   46 +++++++++++++++++++++++------------------
 xen/include/asm-arm/cpregs.h |    2 +
 xen/include/asm-arm/page.h   |   10 +++++---
 4 files changed, 35 insertions(+), 25 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 5ec46c3..db3baa0 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -292,7 +292,7 @@ paging:
 
         /* Non-boot CPUs need to move on to the relocated pagetables */
         mov   r0, #0
-        ldr   r4, =boot_httbr        /* VA of HTTBR value stashed by CPU 0 */
+        ldr   r4, =boot_ttbr         /* VA of HTTBR value stashed by CPU 0 */
         add   r4, r4, r10            /* PA of it */
         ldrd  r4, r5, [r4]           /* Actual value */
         dsb
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index bcc109d..fa57efe 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -40,13 +40,17 @@
 struct domain *dom_xen, *dom_io, *dom_cow;
 
 /* Static start-of-day pagetables that we use before the allocators are up */
+/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
 lpae_t xen_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
+#ifdef CONFIG_ARM_64
+lpae_t xen_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
+#endif
 lpae_t xen_second[LPAE_ENTRIES*4] __attribute__((__aligned__(4096*4)));
 lpae_t xen_fixmap[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
 static lpae_t xen_xenmap[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
 
 /* Non-boot CPUs use this to find the correct pagetables. */
-uint64_t boot_httbr;
+uint64_t boot_ttbr;
 
 static paddr_t phys_offset;
 
@@ -70,24 +74,21 @@ void dump_pt_walk(lpae_t *first, paddr_t addr)
     if ( first_table_offset(addr) >= LPAE_ENTRIES )
         return;
 
-    printk("1ST[0x%llx] = 0x%"PRIpaddr"\n",
-           first_table_offset(addr),
+    printk("1ST[0x%x] = 0x%"PRIpaddr"\n", first_table_offset(addr),
            first[first_table_offset(addr)].bits);
     if ( !first[first_table_offset(addr)].walk.valid ||
          !first[first_table_offset(addr)].walk.table )
         goto done;
 
     second = map_domain_page(first[first_table_offset(addr)].walk.base);
-    printk("2ND[0x%llx] = 0x%"PRIpaddr"\n",
-           second_table_offset(addr),
+    printk("2ND[0x%x] = 0x%"PRIpaddr"\n", second_table_offset(addr),
            second[second_table_offset(addr)].bits);
     if ( !second[second_table_offset(addr)].walk.valid ||
          !second[second_table_offset(addr)].walk.table )
         goto done;
 
     third = map_domain_page(second[second_table_offset(addr)].walk.base);
-    printk("3RD[0x%llx] = 0x%"PRIpaddr"\n",
-           third_table_offset(addr),
+    printk("3RD[0x%x] = 0x%"PRIpaddr"\n", third_table_offset(addr),
            third[third_table_offset(addr)].bits);
 
 done:
@@ -96,14 +97,14 @@ done:
 
 }
 
-void dump_hyp_walk(uint32_t addr)
+void dump_hyp_walk(vaddr_t addr)
 {
-    uint64_t httbr = READ_CP64(HTTBR);
+    uint64_t ttbr = READ_SYSREG64(TTBR0_EL2);
 
-    printk("Walking Hypervisor VA 0x%08"PRIx32" via HTTBR 0x%016"PRIx64"\n",
-           addr, httbr);
+    printk("Walking Hypervisor VA 0x%"PRIvaddr" via TTBR 0x%016"PRIx64"\n",
+           addr, ttbr);
 
-    BUG_ON( (lpae_t *)(unsigned long)(httbr - phys_offset) != xen_pgtable );
+    BUG_ON( (lpae_t *)(unsigned long)(ttbr - phys_offset) != xen_pgtable );
     dump_pt_walk(xen_pgtable, addr);
 }
 
@@ -132,7 +133,7 @@ void *map_domain_page(unsigned long mfn)
     unsigned long flags;
     lpae_t *map = xen_second + second_linear_offset(DOMHEAP_VIRT_START);
     unsigned long slot_mfn = mfn & ~LPAE_ENTRY_MASK;
-    uint32_t va;
+    vaddr_t va;
     lpae_t pte;
     int i, slot;
 
@@ -272,26 +273,31 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
 
     /* Update the copy of xen_pgtable to use the new paddrs */
     p = (void *) xen_pgtable + dest_va - (unsigned long) _start;
+#ifdef CONFIG_ARM_64
+    p[0].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;
+    p = (void *) xen_first + dest_va - (unsigned long) _start;
+#endif
     for ( i = 0; i < 4; i++)
         p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;
+
     p = (void *) xen_second + dest_va - (unsigned long) _start;
     if ( boot_phys_offset != 0 )
     {
         /* Remove the old identity mapping of the boot paddr */
-        unsigned long va = (unsigned long)_start + boot_phys_offset;
+        vaddr_t va = (vaddr_t)_start + boot_phys_offset;
         p[second_linear_offset(va)].bits = 0;
     }
     for ( i = 0; i < 4 * LPAE_ENTRIES; i++)
         if ( p[i].pt.valid )
-                p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;
+            p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;
 
     /* Change pagetables to the copy in the relocated Xen */
-    boot_httbr = (unsigned long) xen_pgtable + phys_offset;
-    flush_xen_dcache(boot_httbr);
+    boot_ttbr = (uintptr_t) xen_pgtable + phys_offset;
+    flush_xen_dcache(boot_ttbr);
     flush_xen_dcache_va_range((void*)dest_va, _end - _start);
     flush_xen_text_tlb();
 
-    WRITE_CP64(boot_httbr, HTTBR); /* Change translation base */
+    WRITE_SYSREG64(boot_ttbr, TTBR0_EL2);
     dsb();                         /* Ensure visibility of HTTBR update */
     flush_xen_text_tlb();
 
@@ -336,7 +342,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
     /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */
 
     /* From now on, no mapping may be both writable and executable. */
-    WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
+    WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
     /* Flush everything after setting WXN bit. */
     flush_xen_text_tlb();
 }
@@ -345,7 +351,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
 void __cpuinit mmu_init_secondary_cpu(void)
 {
     /* From now on, no mapping may be both writable and executable. */
-    WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
+    WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
     flush_xen_text_tlb();
 }
 
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 559be75..36da12e 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -242,6 +242,8 @@
 #define ID_MMFR3_EL1            ID_MMFR3
 #define ID_PFR0_EL1             ID_PFR0
 #define ID_PFR1_EL1             ID_PFR1
+#define SCTLR_EL2               HSCTLR
+#define TTBR0_EL2               HTTBR
 #define VBAR_EL2                HVBAR
 #define VTCR_EL2                VTCR
 
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index ad52567..11b5930 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -274,7 +274,7 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
 void dump_pt_walk(lpae_t *table, paddr_t addr);
 
 /* Print a walk of the hypervisor's page tables for a virtual addr. */
-extern void dump_hyp_walk(uint32_t addr);
+extern void dump_hyp_walk(vaddr_t addr);
 /* Print a walk of the p2m for a domain for a physical address. */
 extern void dump_p2m_lookup(struct domain *d, paddr_t addr);
 
@@ -326,9 +326,11 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
 #define first_linear_offset(va) (va >> FIRST_SHIFT)
 #define second_linear_offset(va) (va >> SECOND_SHIFT)
 #define third_linear_offset(va) (va >> THIRD_SHIFT)
-#define first_table_offset(va) (first_linear_offset(va))
-#define second_table_offset(va) (second_linear_offset(va) & LPAE_ENTRY_MASK)
-#define third_table_offset(va) (third_linear_offset(va) & LPAE_ENTRY_MASK)
+
+#define TABLE_OFFSET(offs) ((unsigned int)(offs) & LPAE_ENTRY_MASK)
+#define first_table_offset(va)  TABLE_OFFSET(first_linear_offset(va))
+#define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va))
+#define third_table_offset(va)  TABLE_OFFSET(third_linear_offset(va))
 
 #define clear_page(page)memset((void *)(page), 0, PAGE_SIZE)
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 20/46] xen: arm64: add to foreign struct checks
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (18 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 16:33   ` Stefano Stabellini
  2013-02-14 16:47 ` [PATCH V2 21/46] xen: arm: extend HSR struct definitions to 64-bit Ian Campbell
                   ` (27 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 .gitignore                               |    1 +
 tools/include/xen-foreign/Makefile       |    5 ++++-
 tools/include/xen-foreign/mkheader.py    |   19 +++++++++++++++++++
 tools/include/xen-foreign/reference.size |   20 ++++++++++----------
 tools/include/xen-foreign/structs.py     |    1 +
 5 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/.gitignore b/.gitignore
index 73c5b77..2242344 100644
--- a/.gitignore
+++ b/.gitignore
@@ -364,6 +364,7 @@ tools/include/xen-foreign/structs.pyc
 tools/include/xen-foreign/x86_32.h
 tools/include/xen-foreign/x86_64.h
 tools/include/xen-foreign/arm32.h
+tools/include/xen-foreign/arm64.h
 
 .git
 tools/misc/xen-hptool
diff --git a/tools/include/xen-foreign/Makefile b/tools/include/xen-foreign/Makefile
index 53cc6b4..06b844c 100644
--- a/tools/include/xen-foreign/Makefile
+++ b/tools/include/xen-foreign/Makefile
@@ -3,7 +3,7 @@ include $(XEN_ROOT)/tools/Rules.mk
 
 ROOT = $(XEN_ROOT)/xen/include/public
 
-architectures := arm32 x86_32 x86_64
+architectures := arm32 arm64 x86_32 x86_64
 headers := $(patsubst %, %.h, $(architectures))
 
 .PHONY: all clean check-headers
@@ -25,6 +25,9 @@ check-headers: checker
 arm32.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h
 	$(PYTHON) $< $* $@ $(filter %.h,$^)
 
+arm64.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h
+	$(PYTHON) $< $* $@ $(filter %.h,$^)
+
 x86_32.h: mkheader.py structs.py $(ROOT)/arch-x86/xen-x86_32.h $(ROOT)/arch-x86/xen.h $(ROOT)/xen.h
 	$(PYTHON) $< $* $@ $(filter %.h,$^)
 
diff --git a/tools/include/xen-foreign/mkheader.py b/tools/include/xen-foreign/mkheader.py
index b7c34b1..4858687 100644
--- a/tools/include/xen-foreign/mkheader.py
+++ b/tools/include/xen-foreign/mkheader.py
@@ -26,6 +26,22 @@ inttypes["arm32"] = {
 header["arm32"] = """
 #define __arm___ARM32 1
 """;
+footer["arm32"] = """
+#undef __DECL_REG
+"""
+
+inttypes["arm64"] = {
+    "unsigned long" : "__danger_unsigned_long_on_arm64",
+    "long"          : "__danger_long_on_arm64",
+    "xen_pfn_t"     : "uint64_t",
+    "xen_ulong_t"   : "uint64_t",
+};
+header["arm64"] = """
+#define __aarch64___ARM64 1
+""";
+footer["arm64"] = """
+#undef __DECL_REG
+"""
 
 # x86_32
 inttypes["x86_32"] = {
@@ -59,6 +75,9 @@ header["x86_64"] = """
 #endif
 #define __x86_64___X86_64 1
 """;
+footer["x86_64"] = """
+#undef __DECL_REG
+"""
 
 ###########################################################################
 # main
diff --git a/tools/include/xen-foreign/reference.size b/tools/include/xen-foreign/reference.size
index 0e5529d..7659c64 100644
--- a/tools/include/xen-foreign/reference.size
+++ b/tools/include/xen-foreign/reference.size
@@ -1,13 +1,13 @@
 
-structs                   |   arm32  x86_32  x86_64
+structs                   |   arm32   arm64  x86_32  x86_64
 
-start_info                |       -    1112    1168
-trap_info                 |       -       8      16
-cpu_user_regs             |     160      68     200
-vcpu_guest_context        |     180    2800    5168
-arch_vcpu_info            |       0      24      16
-vcpu_time_info            |      32      32      32
-vcpu_info                 |      48      64      64
-arch_shared_info          |       0     268     280
-shared_info               |    1088    2584    3368
+start_info                |       -       -    1112    1168
+trap_info                 |       -       -       8      16
+cpu_user_regs             |     160     160      68     200
+vcpu_guest_context        |     180     180    2800    5168
+arch_vcpu_info            |       0       0      24      16
+vcpu_time_info            |      32      32      32      32
+vcpu_info                 |      48      48      64      64
+arch_shared_info          |       0       0     268     280
+shared_info               |    1088    1088    2584    3368
 
diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py
index 51a77c0..5aec2c5 100644
--- a/tools/include/xen-foreign/structs.py
+++ b/tools/include/xen-foreign/structs.py
@@ -14,6 +14,7 @@ structs = [ "start_info",
             "shared_info" ];
 
 defines = [ "__arm__",
+            "__aarch64__",
             "__i386__",
             "__x86_64__",
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 21/46] xen: arm: extend HSR struct definitions to 64-bit
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (19 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 20/46] xen: arm64: add to foreign struct checks Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 22/46] xen: arm: use vaddr_t more widely Ian Campbell
                   ` (26 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

The main change is that the 4-bit register specifiers are extended
to 5 bits by taking in an adjacent SBZP bit.

Also 64-bit has two other properties indicting whether or not the
target register was 64-bit (x<n>) or 32-bit (w<n>) and whether the
instruction has acquire/release semantics.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/processor.h |   20 ++++++++++++--------
 1 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 0768cd4..8183d36 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -99,11 +99,11 @@ union hsr {
         unsigned long ec:6;    /* Exception Class */
     };
 
+    /* reg, reg0, reg1 are 4 bits on AArch32, the fifth bit is sbzp. */
     struct hsr_cp32 {
         unsigned long read:1;  /* Direction */
         unsigned long crm:4;   /* CRm */
-        unsigned long reg:4;   /* Rt */
-        unsigned long sbzp:1;
+        unsigned long reg:5;   /* Rt */
         unsigned long crn:4;   /* CRn */
         unsigned long op1:3;   /* Op1 */
         unsigned long op2:3;   /* Op2 */
@@ -116,10 +116,9 @@ union hsr {
     struct hsr_cp64 {
         unsigned long read:1;   /* Direction */
         unsigned long crm:4;    /* CRm */
-        unsigned long reg1:4;   /* Rt1 */
-        unsigned long sbzp1:1;
-        unsigned long reg2:4;   /* Rt2 */
-        unsigned long sbzp2:2;
+        unsigned long reg1:5;   /* Rt1 */
+        unsigned long reg2:5;   /* Rt2 */
+        unsigned long sbzp2:1;
         unsigned long op1:4;   /* Op1 */
         unsigned long cc:4;    /* Condition Code */
         unsigned long ccvalid:1;/* CC Valid */
@@ -133,9 +132,14 @@ union hsr {
         unsigned long s1ptw:1; /* */
         unsigned long cache:1; /* Cache Maintenance */
         unsigned long eat:1;   /* External Abort Type */
+#ifdef CONFIG_ARM_32
         unsigned long sbzp0:6;
-        unsigned long reg:4;   /* Register */
-        unsigned long sbzp1:1;
+#else
+        unsigned long sbzp0:4;
+        unsigned long ar:1;    /* Acquire Release */
+        unsigned long sf:1;    /* Sixty Four bit register */
+#endif
+        unsigned long reg:5;   /* Register */
         unsigned long sign:1;  /* Sign extend */
         unsigned long size:2;  /* Access Size */
         unsigned long valid:1; /* Syndrome Valid */
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 22/46] xen: arm: use vaddr_t more widely.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (20 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 21/46] xen: arm: extend HSR struct definitions to 64-bit Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor Ian Campbell
                   ` (25 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/guestcopy.c |   16 +++++++++-------
 xen/include/asm-arm/mm.h |    6 +++---
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 5504e19..de1a216 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -8,7 +8,7 @@
 unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
 {
     /* XXX needs to handle faults */
-    unsigned offset = ((unsigned long)to & ~PAGE_MASK);
+    unsigned offset = (vaddr_t)to & ~PAGE_MASK;
 
     while ( len )
     {
@@ -17,7 +17,7 @@ unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
         void *p;
         unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
 
-        rc = gvirt_to_maddr((uint32_t) to, &g);
+        rc = gvirt_to_maddr((vaddr_t) to, &g);
         if ( rc )
             return rc;
 
@@ -38,7 +38,7 @@ unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
 unsigned long raw_clear_guest(void *to, unsigned len)
 {
     /* XXX needs to handle faults */
-    unsigned offset = ((unsigned long)to & ~PAGE_MASK);
+    unsigned offset = (vaddr_t)to & ~PAGE_MASK;
 
     while ( len )
     {
@@ -47,7 +47,7 @@ unsigned long raw_clear_guest(void *to, unsigned len)
         void *p;
         unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
 
-        rc = gvirt_to_maddr((uint32_t) to, &g);
+        rc = gvirt_to_maddr((vaddr_t) to, &g);
         if ( rc )
             return rc;
 
@@ -66,19 +66,21 @@ unsigned long raw_clear_guest(void *to, unsigned len)
 
 unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len)
 {
+    unsigned offset = (vaddr_t)from & ~PAGE_MASK;
+
     while ( len )
     {
         int rc;
         paddr_t g;
         void *p;
-        unsigned size = min(len, (unsigned)(PAGE_SIZE - ((unsigned)from & (~PAGE_MASK))));
+        unsigned size = min(len, (unsigned)(PAGE_SIZE - offset));
 
-        rc = gvirt_to_maddr((uint32_t) from & PAGE_MASK, &g);
+        rc = gvirt_to_maddr((vaddr_t) from & PAGE_MASK, &g);
         if ( rc )
             return rc;
 
         p = map_domain_page(g>>PAGE_SHIFT);
-        p += ((unsigned long)from & (~PAGE_MASK));
+        p += ((vaddr_t)from & (~PAGE_MASK));
 
         memcpy(to, p, size);
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index f04829d..ff838b3 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -184,8 +184,8 @@ void* early_ioremap(paddr_t start, size_t len, unsigned attributes);
 
 static inline paddr_t virt_to_maddr(const void *va)
 {
-    uint64_t par = va_to_par((uint32_t)va);
-    return (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
+    uint64_t par = va_to_par((vaddr_t)va);
+    return (par & PADDR_MASK & PAGE_MASK) | ((vaddr_t) va & ~PAGE_MASK);
 }
 
 static inline void *maddr_to_virt(paddr_t ma)
@@ -195,7 +195,7 @@ static inline void *maddr_to_virt(paddr_t ma)
     return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
 }
 
-static inline int gvirt_to_maddr(uint32_t va, paddr_t *pa)
+static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa)
 {
     uint64_t par = gva_to_ma_par(va);
     if ( par & PAR_F )
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (21 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 22/46] xen: arm: use vaddr_t more widely Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 16:01   ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 24/46] xen: arm: separate guest user regs from internal guest state Ian Campbell
                   ` (24 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
but:
        This is mostly a matter of coding taste, so I'd like Stefano's
        ack/nack here as well.
---
---
 xen/arch/arm/domain_build.c |    2 +-
 xen/arch/arm/smpboot.c      |    2 +-
 xen/arch/arm/traps.c        |   44 ++++++++++++++++++++++--------------------
 xen/arch/arm/vgic.c         |   18 ++++++++--------
 xen/arch/arm/vpl011.c       |    6 ++--
 xen/arch/arm/vtimer.c       |    6 ++--
 xen/include/asm-arm/regs.h  |    2 +-
 xen/include/asm-arm/types.h |    4 +++
 8 files changed, 45 insertions(+), 39 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 7403f1a..30d014a 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo)
 
 static void dtb_load(struct kernel_info *kinfo)
 {
-    void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr;
+    void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr;
 
     raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt));
     xfree(kinfo->fdt);
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index 86379b7..d8eb5d3 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
     set_processor_id(cpuid);
 
     /* Setup Hyp vector base */
-    WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
+    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
 
     mmu_init_secondary_cpu();
     enable_vfp();
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index eaf1f52..0299b33 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -68,7 +68,7 @@ static void print_xen_info(void)
            debug_build() ? 'y' : 'n', print_tainted(taint_str));
 }
 
-uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
+register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
 {
     BUG_ON( !guest_mode(regs) );
 
@@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
 
     switch ( reg ) {
     case 0 ... 7: /* Unbanked registers */
-        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7));
+        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7));
         return &regs->r0 + reg;
     case 8 ... 12: /* Register banked in FIQ mode */
-        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq));
+        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq));
         if ( fiq_mode(regs) )
             return &regs->r8_fiq + reg - 8;
         else
             return &regs->r8 + reg - 8;
     case 13 ... 14: /* Banked SP + LR registers */
-        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq));
-        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq));
-        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc));
-        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt));
-        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und));
+        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq));
+        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq));
+        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc));
+        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt));
+        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und));
         switch ( regs->cpsr & PSR_MODE_MASK )
         {
         case PSR_MODE_USR:
@@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs)
     printk("GUEST STACK GOES HERE\n");
 }
 
-#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp)
+#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp)
 
 static void show_trace(struct cpu_user_regs *regs)
 {
-    uint32_t *frame, next, addr, low, high;
+    register_t *frame, next, addr, low, high;
 
     printk("Xen call trace:\n   ");
 
@@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs)
     print_symbol(" %s\n   ", regs->pc);
 
     /* Bounds for range of valid frame pointer. */
-    low  = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
+    low  = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
     high = (low & ~(STACK_SIZE - 1)) +
         (STACK_SIZE - sizeof(struct cpu_info));
 
@@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs)
             break;
         {
             /* Ordinary stack frame. */
-            frame = (uint32_t *)next;
+            frame = (register_t *)next;
             next  = frame[-1];
             addr  = frame[0];
         }
@@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs)
         printk("[<%p>]", _p(addr));
         print_symbol(" %s\n   ", addr);
 
-        low = (uint32_t)&frame[1];
+        low = (register_t)&frame[1];
     }
 
     printk("\n");
@@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs)
 
 void show_stack(struct cpu_user_regs *regs)
 {
-    uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
+    register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
     int i;
 
     if ( guest_mode(regs) )
@@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = {
 
 static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code)
 {
-    uint32_t reg, *r;
+    register_t *r;
+    uint32_t reg;
     uint32_t domid = current->domain->domain_id;
     switch ( code ) {
     case 0xe0 ... 0xef:
         reg = code - 0xe0;
         r = select_user_reg(regs, reg);
-        printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n",
+        printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n",
                domid, reg, *r, regs->pc);
         break;
     case 0xfd:
-        printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc);
+        printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc);
         break;
     case 0xfe:
-        printk("%c", (char)(regs->r0 & 0xff));
+        r = select_user_reg(regs, 0);
+        printk("%c", (char)(*r & 0xff));
         break;
     case 0xff:
         printk("DOM%d: DEBUG\n", domid);
@@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
                        union hsr hsr)
 {
     struct hsr_cp32 cp32 = hsr.cp32;
-    uint32_t *r = select_user_reg(regs, cp32.reg);
+    uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg);
 
     if ( !cp32.ccvalid ) {
         dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n");
@@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
         BUG_ON(!vtimer_emulate(regs, hsr));
         break;
     default:
-        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n",
+        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
                cp32.read ? "mrc" : "mcr",
                cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
         panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK);
@@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
         BUG_ON(!vtimer_emulate(regs, hsr));
         break;
     default:
-        printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n",
+        printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
                cp64.read ? "mrrc" : "mcrr",
                cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
         panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK);
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 39b9775..57147d5 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
 {
     struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
-    uint32_t *r = select_user_reg(regs, dabt.reg);
+    register_t *r = select_user_reg(regs, dabt.reg);
     struct vgic_irq_rank *rank;
     int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
     int gicd_reg = REG(offset);
@@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
 {
     struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
-    uint32_t *r = select_user_reg(regs, dabt.reg);
+    register_t *r = select_user_reg(regs, dabt.reg);
     struct vgic_irq_rank *rank;
     int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
     int gicd_reg = REG(offset);
@@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
 
     case GICD_ISPENDR ... GICD_ISPENDRN:
         if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
-        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n",
+        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
                dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
         return 0;
 
     case GICD_ICPENDR ... GICD_ICPENDRN:
         if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
-        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n",
+        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
                dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
         return 0;
 
@@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
 
     case GICD_SGIR:
         if ( dabt.size != 2 ) goto bad_width;
-        printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n",
+        printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n",
                *r, gicd_reg - GICD_ICFGR);
         return 0;
 
     case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
         if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
-        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n",
+        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
                dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
         return 0;
 
     case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
         if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
-        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n",
+        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
                dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
         return 0;
 
@@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
         goto write_ignore;
 
     default:
-        printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n",
+        printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
                dabt.reg, *r, offset);
         return 0;
     }
 
 bad_width:
-    printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n",
+    printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
            dabt.size, dabt.reg, *r, offset);
     domain_crash_synchronous();
     return 0;
diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
index 7dcee90..db5094e 100644
--- a/xen/arch/arm/vpl011.c
+++ b/xen/arch/arm/vpl011.c
@@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info)
 {
     struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
-    uint32_t *r = select_user_reg(regs, dabt.reg);
+    register_t *r = select_user_reg(regs, dabt.reg);
     int offset = (int)(info->gpa - UART0_START);
 
     switch ( offset )
@@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
 {
     struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
-    uint32_t *r = select_user_reg(regs, dabt.reg);
+    register_t *r = select_user_reg(regs, dabt.reg);
     int offset = (int)(info->gpa - UART0_START);
 
     switch ( offset )
@@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
         /* Silently ignore */
         return 1;
     default:
-        printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n",
+        printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n",
                dabt.reg, *r, offset);
         domain_crash_synchronous();
     }
diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
index 85201b5..291b87e 100644
--- a/xen/arch/arm/vtimer.c
+++ b/xen/arch/arm/vtimer.c
@@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr)
 {
     struct vcpu *v = current;
     struct hsr_cp32 cp32 = hsr.cp32;
-    uint32_t *r = select_user_reg(regs, cp32.reg);
+    uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg);
     s_time_t now;
 
     switch ( hsr.bits & HSR_CP32_REGS_MASK )
@@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr)
 {
     struct vcpu *v = current;
     struct hsr_cp64 cp64 = hsr.cp64;
-    uint32_t *r1 = select_user_reg(regs, cp64.reg1);
-    uint32_t *r2 = select_user_reg(regs, cp64.reg2);
+    uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1);
+    uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2);
     uint64_t ticks;
     s_time_t now;
 
diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
index 7486944..a723f92 100644
--- a/xen/include/asm-arm/regs.h
+++ b/xen/include/asm-arm/regs.h
@@ -34,7 +34,7 @@
  * Returns a pointer to the given register value in regs, taking the
  * processor mode (CPSR) into account.
  */
-extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg);
+extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg);
 
 #endif /* __ARM_REGS_H__ */
 /*
diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
index d3e16d8..9ca32f1 100644
--- a/xen/include/asm-arm/types.h
+++ b/xen/include/asm-arm/types.h
@@ -41,6 +41,8 @@ typedef u32 vaddr_t;
 typedef u64 paddr_t;
 #define INVALID_PADDR (~0ULL)
 #define PRIpaddr "016llx"
+typedef u32 register_t;
+#define PRIregister "x"
 #elif defined (CONFIG_ARM_64)
 typedef signed long s64;
 typedef unsigned long u64;
@@ -49,6 +51,8 @@ typedef u64 vaddr_t;
 typedef u64 paddr_t;
 #define INVALID_PADDR (~0UL)
 #define PRIpaddr "016lx"
+typedef u64 register_t;
+#define PRIregister "lx"
 #endif
 
 typedef unsigned long size_t;
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 24/46] xen: arm: separate guest user regs from internal guest state.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (22 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 25/46] xen: arm64: add guest type to domain field Ian Campbell
                   ` (23 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

struct cpu_user_regs is currently used as both internal state
(specifically at the base of the stack) and a guest/toolstack
visible API (via struct vcpu_guest_context used by
XEN_DOMCTL_{g,s}etvcpucontext and VCPUOP_initialise).

This causes problems when we want to make the API 64-bit clean since
we don't really want to change the size of the on-stack struct.

So split into vcpu_guest_core_regs which is the API facing struct
and keep cpu_user_regs purely internal, translate between the two.

In the user API arrange for both 64- and 32-bit registers to be
included in a layout which does not differ depending on toolstack
architecture. Also switch to using the more formal banked register
names (e.g. with the _usr suffix) for clarity.

This is an ABI change. Note that the kernel doesn't currently use
this data structure so it affects the tools interface only.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: Allow 32-bit to see 64-bit register names too, this is needed so that
    32-bit toolstacks can access/control 64-bit guests.
---
 tools/include/xen-foreign/mkheader.py    |   10 +++
 tools/include/xen-foreign/reference.size |    5 +-
 tools/include/xen-foreign/structs.py     |    1 +
 tools/libxc/xc_dom_arm.c                 |   10 ++--
 xen/arch/arm/arm32/Makefile              |    2 +
 xen/arch/arm/arm32/domain.c              |   51 +++++++++++++
 xen/arch/arm/arm64/Makefile              |    2 +
 xen/arch/arm/arm64/domain.c              |   66 +++++++++++++++++
 xen/arch/arm/domain.c                    |    4 +-
 xen/arch/arm/domctl.c                    |    4 +-
 xen/include/asm-arm/arm32/processor.h    |   52 +++++++++++++
 xen/include/asm-arm/arm64/processor.h    |   81 +++++++++++++++++++++
 xen/include/asm-arm/current.h            |    1 +
 xen/include/asm-arm/processor.h          |    5 ++
 xen/include/public/arch-arm.h            |  115 ++++++++++++++++++------------
 15 files changed, 353 insertions(+), 56 deletions(-)
 create mode 100644 xen/arch/arm/arm32/domain.c
 create mode 100644 xen/arch/arm/arm64/domain.c

diff --git a/tools/include/xen-foreign/mkheader.py b/tools/include/xen-foreign/mkheader.py
index 4858687..c57b55b 100644
--- a/tools/include/xen-foreign/mkheader.py
+++ b/tools/include/xen-foreign/mkheader.py
@@ -25,6 +25,11 @@ inttypes["arm32"] = {
 };
 header["arm32"] = """
 #define __arm___ARM32 1
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+# define __DECL_REG(n64, n32) union { uint64_t n64; uint32_t n32; }
+#else
+# define __DECL_REG(n64, n32) uint64_t n64
+#endif
 """;
 footer["arm32"] = """
 #undef __DECL_REG
@@ -38,6 +43,11 @@ inttypes["arm64"] = {
 };
 header["arm64"] = """
 #define __aarch64___ARM64 1
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+# define __DECL_REG(n64, n32) union { uint64_t n64; uint32_t n32; }
+#else
+# define __DECL_REG(n64, n32) uint64_t n64
+#endif
 """;
 footer["arm64"] = """
 #undef __DECL_REG
diff --git a/tools/include/xen-foreign/reference.size b/tools/include/xen-foreign/reference.size
index 7659c64..b3347b4 100644
--- a/tools/include/xen-foreign/reference.size
+++ b/tools/include/xen-foreign/reference.size
@@ -3,8 +3,9 @@ structs                   |   arm32   arm64  x86_32  x86_64
 
 start_info                |       -       -    1112    1168
 trap_info                 |       -       -       8      16
-cpu_user_regs             |     160     160      68     200
-vcpu_guest_context        |     180     180    2800    5168
+cpu_user_regs             |       -       -      68     200
+vcpu_guest_core_regs      |     304     304       -       -
+vcpu_guest_context        |     336     336    2800    5168
 arch_vcpu_info            |       0       0      24      16
 vcpu_time_info            |      32      32      32      32
 vcpu_info                 |      48      48      64      64
diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py
index 5aec2c5..0b33a77 100644
--- a/tools/include/xen-foreign/structs.py
+++ b/tools/include/xen-foreign/structs.py
@@ -6,6 +6,7 @@ unions  = [ "vcpu_cr_regs",
 structs = [ "start_info",
             "trap_info",
             "cpu_user_regs",
+            "vcpu_guest_core_regs",
             "vcpu_guest_context",
             "arch_vcpu_info",
             "vcpu_time_info",
diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c
index 0cec774..e46cec9 100644
--- a/tools/libxc/xc_dom_arm.c
+++ b/tools/libxc/xc_dom_arm.c
@@ -107,17 +107,17 @@ static int vcpu_arm(struct xc_dom_image *dom, void *ptr)
     /* clear everything */
     memset(ctxt, 0, sizeof(*ctxt));
 
-    ctxt->user_regs.pc = dom->parms.virt_entry;
+    ctxt->user_regs.pc32 = dom->parms.virt_entry;
 
     /* Linux boot protocol. See linux.Documentation/arm/Booting. */
-    ctxt->user_regs.r0 = 0; /* SBZ */
+    ctxt->user_regs.r0_usr = 0; /* SBZ */
     /* Machine ID: We use DTB therefore no machine id */
-    ctxt->user_regs.r1 = 0xffffffff;
+    ctxt->user_regs.r1_usr = 0xffffffff;
     /* ATAGS/DTB: We currently require that the guest kernel to be
      * using CONFIG_ARM_APPENDED_DTB. Ensure that r2 does not look
      * like a valid pointer to a set of ATAGS or a DTB.
      */
-    ctxt->user_regs.r2 = 0xffffffff;
+    ctxt->user_regs.r2_usr = 0xffffffff;
 
     ctxt->sctlr = /* #define SCTLR_BASE */0x00c50078;
 
@@ -130,7 +130,7 @@ static int vcpu_arm(struct xc_dom_image *dom, void *ptr)
     ctxt->flags = VGCF_online;
 
     DOMPRINTF("Initial state CPSR %#"PRIx32" PC %#"PRIx32,
-           ctxt->user_regs.cpsr, ctxt->user_regs.pc);
+           ctxt->user_regs.cpsr, ctxt->user_regs.pc32);
 
     return 0;
 }
diff --git a/xen/arch/arm/arm32/Makefile b/xen/arch/arm/arm32/Makefile
index 20931fa..29898ae 100644
--- a/xen/arch/arm/arm32/Makefile
+++ b/xen/arch/arm/arm32/Makefile
@@ -3,3 +3,5 @@ subdir-y += lib
 obj-y += entry.o
 obj-y += mode_switch.o
 obj-y += proc-ca15.o
+
+obj-y += domain.o
diff --git a/xen/arch/arm/arm32/domain.c b/xen/arch/arm/arm32/domain.c
new file mode 100644
index 0000000..f75a2c6
--- /dev/null
+++ b/xen/arch/arm/arm32/domain.c
@@ -0,0 +1,51 @@
+#include <xen/config.h>
+#include <xen/sched.h>
+
+#include <asm/domain.h>
+#include <asm/processor.h>
+
+#include <public/xen.h>
+
+/* C(hyp,user), hyp is Xen internal name, user is user API name. */
+
+#define ALLREGS \
+    C(r0,r0_usr);   C(r1,r1_usr);   C(r2,r2_usr);   C(r3,r3_usr);   \
+    C(r4,r4_usr);   C(r5,r5_usr);   C(r6,r6_usr);   C(r7,r7_usr);   \
+    C(r8,r8_usr);   C(r9,r9_usr);   C(r10,r10_usr); C(r11,r11_usr); \
+    C(r12,r12_usr); \
+    C(sp_usr,sp_usr); \
+    C(lr,lr_usr); \
+    C(spsr_irq,spsr_irq); C(lr_irq,lr_irq); C(sp_irq,sp_irq); \
+    C(spsr_svc,spsr_svc); C(lr_svc,lr_svc); C(sp_svc,sp_svc); \
+    C(spsr_abt,spsr_abt); C(lr_abt,lr_abt); C(sp_abt,sp_abt); \
+    C(spsr_und,spsr_und); C(lr_und,lr_und); C(sp_und,sp_und); \
+    C(spsr_fiq,spsr_fiq); C(sp_fiq,sp_fiq); C(sp_fiq,sp_fiq); \
+    C(r8_fiq,r8_fiq); C(r9_fiq,r9_fiq); \
+    C(r10_fiq,r10_fiq); C(r11_fiq,r11_fiq); C(r12_fiq,r12_fiq); \
+    C(pc,pc32); \
+    C(cpsr,cpsr)
+
+void vcpu_regs_hyp_to_user(const struct vcpu *vcpu,
+                           struct vcpu_guest_core_regs *regs)
+{
+#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp
+    ALLREGS;
+#undef C
+}
+
+void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
+                           const struct vcpu_guest_core_regs *regs)
+{
+#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user
+    ALLREGS;
+#undef C
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index c447eaa..815f305 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -1,3 +1,5 @@
 subdir-y += lib
 
 obj-y += mode_switch.o
+
+obj-y += domain.o
diff --git a/xen/arch/arm/arm64/domain.c b/xen/arch/arm/arm64/domain.c
new file mode 100644
index 0000000..05df29e
--- /dev/null
+++ b/xen/arch/arm/arm64/domain.c
@@ -0,0 +1,66 @@
+#include <xen/config.h>
+#include <xen/sched.h>
+
+#include <asm/domain.h>
+#include <asm/processor.h>
+
+#include <public/xen.h>
+
+/* C(hyp,user), hyp is Xen internal name, user is user API name. */
+
+#define ALLREGS \
+    C(x0,x0);   C(x1,x1);   C(x2,x2);   C(x3,x3);   \
+    C(x4,x4);   C(x5,x5);   C(x6,x6);   C(x7,x7);   \
+    C(x8,x8);   C(x9,x9);   C(x10,x10); C(x11,x11); \
+    C(x12,x12); C(x13,x13); C(x14,x14); C(x15,x15); \
+    C(x16,x16); C(x17,x17); C(x18,x18); C(x19,x19); \
+    C(x20,x20); C(x21,x21); C(x22,x22); C(x23,x23); \
+    C(x24,x24); C(x25,x25); C(x26,x26); C(x27,x27); \
+    C(x28,x28); C(fp,x29);  C(lr,x30);  C(pc,pc64); \
+    C(cpsr, cpsr); C(spsr_el1, spsr_el1)
+
+#define ALLREGS32 C(spsr_fiq, spsr_fiq); C(spsr_irq,spsr_irq); \
+                  C(spsr_und,spsr_und); C(spsr_abt,spsr_abt)
+
+#define ALLREGS64 C(sp_el0,sp_el0); C(sp_el1,sp_el1); C(elr_el1,elr_el1)
+
+void vcpu_regs_hyp_to_user(const struct vcpu *vcpu,
+                           struct vcpu_guest_core_regs *regs)
+{
+#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp
+    ALLREGS;
+    if ( is_pv32_domain(vcpu->domain) )
+    {
+        ALLREGS32;
+    }
+    else
+    {
+        ALLREGS64;
+    }
+#undef C
+}
+
+void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
+                           const struct vcpu_guest_core_regs *regs)
+{
+#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user
+    ALLREGS;
+    if ( is_pv32_domain(vcpu->domain) )
+    {
+        ALLREGS32;
+    }
+    else
+    {
+        ALLREGS64;
+    }
+#undef C
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index e7d3ec6..3651fb2 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -486,7 +486,7 @@ int arch_set_info_guest(
     struct vcpu *v, vcpu_guest_context_u c)
 {
     struct vcpu_guest_context *ctxt = c.nat;
-    struct cpu_user_regs *regs = &c.nat->user_regs;
+    struct vcpu_guest_core_regs *regs = &c.nat->user_regs;
 
     if ( !is_guest_psr(regs->cpsr) )
         return -EINVAL;
@@ -502,7 +502,7 @@ int arch_set_info_guest(
     if ( regs->spsr_fiq && !is_guest_psr(regs->spsr_fiq) )
         return -EINVAL;
 
-    v->arch.cpu_info->guest_cpu_user_regs = *regs;
+    vcpu_regs_user_to_hyp(v, regs);
 
     v->arch.sctlr = ctxt->sctlr;
     v->arch.ttbr0 = ctxt->ttbr0;
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index c7ffd8a..15f8537 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -20,9 +20,9 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
 {
     struct vcpu_guest_context *ctxt = c.nat;
-    struct cpu_user_regs *regs = &c.nat->user_regs;
+    struct vcpu_guest_core_regs *regs = &c.nat->user_regs;
 
-    *regs = v->arch.cpu_info->guest_cpu_user_regs;
+    vcpu_regs_hyp_to_user(v, regs);
 
     ctxt->sctlr = v->arch.sctlr;
     ctxt->ttbr0 = v->arch.ttbr0;
diff --git a/xen/include/asm-arm/arm32/processor.h b/xen/include/asm-arm/arm32/processor.h
index 843fbd2..a782d96 100644
--- a/xen/include/asm-arm/arm32/processor.h
+++ b/xen/include/asm-arm/arm32/processor.h
@@ -1,6 +1,58 @@
 #ifndef __ASM_ARM_ARM32_PROCESSOR_H
 #define __ASM_ARM_ARM32_PROCESSOR_H
 
+#ifndef __ASSEMBLY__
+/* On stack VCPU state */
+struct cpu_user_regs
+{
+    uint32_t r0;
+    uint32_t r1;
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r4;
+    uint32_t r5;
+    uint32_t r6;
+    uint32_t r7;
+    uint32_t r8;
+    uint32_t r9;
+    uint32_t r10;
+    union {
+        uint32_t r11;
+        uint32_t fp;
+    };
+    uint32_t r12;
+
+    uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */
+
+    /* r14 - LR: is the same physical register as LR_usr */
+    union {
+        uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */
+
+        uint32_t lr_usr;
+    };
+
+    uint32_t pc; /* Return IP */
+    uint32_t cpsr; /* Return mode */
+    uint32_t pad0; /* Doubleword-align the kernel half of the frame */
+
+    /* Outer guest frame only from here on... */
+
+    uint32_t sp_usr; /* LR_usr is the same register as LR, see above */
+
+    uint32_t sp_irq, lr_irq;
+    uint32_t sp_svc, lr_svc;
+    uint32_t sp_abt, lr_abt;
+    uint32_t sp_und, lr_und;
+
+    uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq;
+    uint32_t sp_fiq, lr_fiq;
+
+    uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq;
+
+    uint32_t pad1; /* Doubleword-align the user half of the frame */
+};
+#endif
+
 /* Layout as used in assembly, with src/dest registers mixed in */
 #define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2
 #define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm
diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h
index fdb0dab..b4602fa 100644
--- a/xen/include/asm-arm/arm64/processor.h
+++ b/xen/include/asm-arm/arm64/processor.h
@@ -3,6 +3,87 @@
 
 #ifndef __ASSEMBLY__
 
+/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
+
+#define __DECL_REG(n64, n32) union {            \
+    uint64_t n64;                               \
+    uint32_t n32;                               \
+}
+
+/* On stack VCPU state */
+struct cpu_user_regs
+{
+    /*         Aarch64       Aarch32 */
+    __DECL_REG(x0,           r0/*_usr*/);
+    __DECL_REG(x1,           r1/*_usr*/);
+    __DECL_REG(x2,           r2/*_usr*/);
+    __DECL_REG(x3,           r3/*_usr*/);
+    __DECL_REG(x4,           r4/*_usr*/);
+    __DECL_REG(x5,           r5/*_usr*/);
+    __DECL_REG(x6,           r6/*_usr*/);
+    __DECL_REG(x7,           r7/*_usr*/);
+    __DECL_REG(x8,           r8/*_usr*/);
+    __DECL_REG(x9,           r9/*_usr*/);
+    __DECL_REG(x10,          r10/*_usr*/);
+    __DECL_REG(x11 ,         r11/*_usr*/);
+    __DECL_REG(x12,          r12/*_usr*/);
+
+    __DECL_REG(x13,          /* r13_usr */ sp_usr);
+    __DECL_REG(x14,          /* r14_usr */ lr_usr);
+
+    __DECL_REG(x15,          /* r13_hyp */ __unused_sp_hyp);
+
+    __DECL_REG(x16,          /* r14_irq */ lr_irq);
+    __DECL_REG(x17,          /* r13_irq */ sp_irq);
+
+    __DECL_REG(x18,          /* r14_svc */ lr_svc);
+    __DECL_REG(x19,          /* r13_svc */ sp_svc);
+
+    __DECL_REG(x20,          /* r14_abt */ lr_abt);
+    __DECL_REG(x21,          /* r13_abt */ sp_abt);
+
+    __DECL_REG(x22,          /* r14_und */ lr_und);
+    __DECL_REG(x23,          /* r13_und */ sp_und);
+
+    __DECL_REG(x24,          r8_fiq);
+    __DECL_REG(x25,          r9_fiq);
+    __DECL_REG(x26,          r10_fiq);
+    __DECL_REG(x27,          r11_fiq);
+    __DECL_REG(x28,          r12_fiq);
+    __DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq);
+    __DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq);
+
+    register_t sp; /* Valid for hypervisor frames */
+
+    /* Return address and mode */
+    __DECL_REG(pc,           pc32);             /* ELR_EL2 */
+    uint32_t cpsr;                              /* SPSR_EL2 */
+
+    uint64_t pad0;
+
+    /* Outer guest frame only from here on... */
+
+    union {
+        uint32_t spsr_el1;       /* AArch64 */
+        uint32_t spsr_svc;       /* AArch32 */
+    };
+
+    uint32_t pad1; /* Align */
+
+    /* AArch32 guests only */
+    uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
+
+    /* AArch64 guests only */
+    uint64_t sp_el0;
+    uint64_t sp_el1, elr_el1;
+
+    uint64_t pad2; /* Doubleword-align the user half of the frame */
+};
+
+#undef __DECL_REG
+
+/* Access to system registers */
+
 #define READ_SYSREG32(name) ({                          \
     uint32_t _r;                                        \
     asm volatile("mrs  %0, "#name : "=r" (_r));         \
diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h
index d20d7a8..c9c8ac7 100644
--- a/xen/include/asm-arm/current.h
+++ b/xen/include/asm-arm/current.h
@@ -6,6 +6,7 @@
 #include <public/xen.h>
 
 #include <asm/percpu.h>
+#include <asm/processor.h>
 
 #ifndef __ASSEMBLY__
 
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 8183d36..230c901 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -253,6 +253,11 @@ void show_registers(struct cpu_user_regs *regs);
 #define cpu_to_core(_cpu)   (0)
 #define cpu_to_socket(_cpu) (0)
 
+void vcpu_regs_hyp_to_user(const struct vcpu *vcpu,
+                           struct vcpu_guest_core_regs *regs);
+void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
+                           const struct vcpu_guest_core_regs *regs);
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_ARM_PROCESSOR_H */
 /*
diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
index dc12524..91f80d8 100644
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -86,55 +86,80 @@
 #endif
 #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
 
-struct cpu_user_regs
-{
-    uint32_t r0;
-    uint32_t r1;
-    uint32_t r2;
-    uint32_t r3;
-    uint32_t r4;
-    uint32_t r5;
-    uint32_t r6;
-    uint32_t r7;
-    uint32_t r8;
-    uint32_t r9;
-    uint32_t r10;
-    union {
-        uint32_t r11;
-        uint32_t fp;
-    };
-    uint32_t r12;
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
+# define __DECL_REG(n64, n32) union {          \
+        uint64_t n64;                          \
+        uint32_t n32;                          \
+    }
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
+#define __DECL_REG(n64, n32) uint64_t n64
+#endif
 
-    uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */
+struct vcpu_guest_core_regs
+{
+    /*         Aarch64       Aarch32 */
+    __DECL_REG(x0,           r0_usr);
+    __DECL_REG(x1,           r1_usr);
+    __DECL_REG(x2,           r2_usr);
+    __DECL_REG(x3,           r3_usr);
+    __DECL_REG(x4,           r4_usr);
+    __DECL_REG(x5,           r5_usr);
+    __DECL_REG(x6,           r6_usr);
+    __DECL_REG(x7,           r7_usr);
+    __DECL_REG(x8,           r8_usr);
+    __DECL_REG(x9,           r9_usr);
+    __DECL_REG(x10,          r10_usr);
+    __DECL_REG(x11,          r11_usr);
+    __DECL_REG(x12,          r12_usr);
+
+    __DECL_REG(x13,          sp_usr);
+    __DECL_REG(x14,          lr_usr);
+
+    __DECL_REG(x15,          __unused_sp_hyp);
+
+    __DECL_REG(x16,          lr_irq);
+    __DECL_REG(x17,          sp_irq);
+
+    __DECL_REG(x18,          lr_svc);
+    __DECL_REG(x19,          sp_svc);
+
+    __DECL_REG(x20,          lr_abt);
+    __DECL_REG(x21,          sp_abt);
+
+    __DECL_REG(x22,          lr_und);
+    __DECL_REG(x23,          sp_und);
+
+    __DECL_REG(x24,          r8_fiq);
+    __DECL_REG(x25,          r9_fiq);
+    __DECL_REG(x26,          r10_fiq);
+    __DECL_REG(x27,          r11_fiq);
+    __DECL_REG(x28,          r12_fiq);
+
+    __DECL_REG(x29,          sp_fiq);
+    __DECL_REG(x30,          lr_fiq);
+
+    /* Return address and mode */
+    __DECL_REG(pc64,         pc32);             /* ELR_EL2 */
+    uint32_t cpsr;                              /* SPSR_EL2 */
 
-    /* r14 - LR: is the same physical register as LR_usr */
     union {
-        uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */
-        uint32_t lr_usr;
+        uint32_t spsr_el1;       /* AArch64 */
+        uint32_t spsr_svc;       /* AArch32 */
     };
 
-    uint32_t pc; /* Return IP */
-    uint32_t cpsr; /* Return mode */
-    uint32_t pad0; /* Doubleword-align the kernel half of the frame */
-
-    /* Outer guest frame only from here on... */
+    /* AArch32 guests only */
+    uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
 
-    uint32_t sp_usr; /* LR_usr is the same register as LR, see above */
-
-    uint32_t sp_irq, lr_irq;
-    uint32_t sp_svc, lr_svc;
-    uint32_t sp_abt, lr_abt;
-    uint32_t sp_und, lr_und;
-
-    uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq;
-    uint32_t sp_fiq, lr_fiq;
-
-    uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq;
-
-    uint32_t pad1; /* Doubleword-align the user half of the frame */
+    /* AArch64 guests only */
+    uint64_t sp_el0;
+    uint64_t sp_el1, elr_el1;
 };
-typedef struct cpu_user_regs cpu_user_regs_t;
-DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
+
+#undef __DECL_REG
 
 typedef uint64_t xen_pfn_t;
 #define PRI_xen_pfn PRIx64
@@ -151,10 +176,10 @@ struct vcpu_guest_context {
 #define VGCF_online                    (1<<_VGCF_online)
     uint32_t flags;                         /* VGCF_* */
 
-    struct cpu_user_regs user_regs;         /* User-level CPU registers     */
+    struct vcpu_guest_core_regs user_regs;  /* Core CPU registers */
 
-    uint32_t sctlr;
-    uint32_t ttbr0, ttbr1, ttbcr;
+    uint32_t sctlr, ttbcr;
+    uint64_t ttbr0, ttbr1;
 };
 typedef struct vcpu_guest_context vcpu_guest_context_t;
 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 25/46] xen: arm64: add guest type to domain field.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (23 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 24/46] xen: arm: separate guest user regs from internal guest state Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:05   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 26/46] xen: arm: move arm32 specific trap handlers to xen/arch/arm/arm32 Ian Campbell
                   ` (22 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Currently 32 bit PV is the only option.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
v2: Remove nested CONFIG_ARM_64
---
 xen/arch/arm/kernel.c        |    4 ++++
 xen/arch/arm/kernel.h        |    4 ++++
 xen/include/asm-arm/domain.h |   16 ++++++++++++++++
 3 files changed, 24 insertions(+), 0 deletions(-)

diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index c08c230..0c7da54 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -228,6 +228,10 @@ int kernel_prepare(struct kernel_info *info)
     if (rc < 0)
         rc = kernel_try_elf_prepare(info, start, size);
 
+#ifdef CONFIG_ARM_64
+    info->type = DOMAIN_PV32; /* No 64-bit guest support yet */
+#endif
+
     return rc;
 }
 
diff --git a/xen/arch/arm/kernel.h b/xen/arch/arm/kernel.h
index 49fe9da..7232d34 100644
--- a/xen/arch/arm/kernel.h
+++ b/xen/arch/arm/kernel.h
@@ -10,6 +10,10 @@
 #include <xen/device_tree.h>
 
 struct kernel_info {
+#ifdef CONFIG_ARM_64
+    enum domain_type type;
+#endif
+
     void *fdt; /* flat device tree */
     paddr_t unassigned_mem; /* RAM not (yet) assigned to a bank */
     struct dt_mem_info mem;
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 29fe808..e9370a5 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -35,8 +35,24 @@ struct hvm_domain
     uint64_t              params[HVM_NR_PARAMS];
 }  __cacheline_aligned;
 
+#ifdef CONFIG_ARM_64
+enum domain_type {
+    DOMAIN_PV32,
+    DOMAIN_PV64,
+};
+#define is_pv32_domain(d) ((d)->arch.type == DOMAIN_PV32)
+#define is_pv64_domain(d) ((d)->arch.type == DOMAIN_PV64)
+#else
+#define is_pv32_domain(d) (1)
+#define is_pv64_domain(d) (0)
+#endif
+
 struct arch_domain
 {
+#ifdef CONFIG_ARM_64
+    enum domain_type type;
+#endif
+
     struct p2m_domain p2m;
     struct hvm_domain hvm_domain;
     xen_pfn_t *grant_table_gpfn;
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 26/46] xen: arm: move arm32 specific trap handlers to xen/arch/arm/arm32
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (24 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 25/46] xen: arm64: add guest type to domain field Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 27/46] xen: arm: arm64 trap handling Ian Campbell
                   ` (21 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/arm32/Makefile     |    3 +-
 xen/arch/arm/arm32/traps.c      |   53 +++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/traps.c            |   22 +---------------
 xen/include/asm-arm/processor.h |    2 +
 4 files changed, 58 insertions(+), 22 deletions(-)
 create mode 100644 xen/arch/arm/arm32/traps.c

diff --git a/xen/arch/arm/arm32/Makefile b/xen/arch/arm/arm32/Makefile
index 29898ae..1ad3364 100644
--- a/xen/arch/arm/arm32/Makefile
+++ b/xen/arch/arm/arm32/Makefile
@@ -4,4 +4,5 @@ obj-y += entry.o
 obj-y += mode_switch.o
 obj-y += proc-ca15.o
 
-obj-y += domain.o
+obj-y += traps.o
+obj-y += domain.o
\ No newline at end of file
diff --git a/xen/arch/arm/arm32/traps.c b/xen/arch/arm/arm32/traps.c
new file mode 100644
index 0000000..a93c2f7
--- /dev/null
+++ b/xen/arch/arm/arm32/traps.c
@@ -0,0 +1,53 @@
+/*
+ * xen/arch/arm/arm32/traps.c
+ *
+ * ARM AArch32 Specific Trap handlers
+ *
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+
+#include <public/xen.h>
+
+#include <asm/processor.h>
+
+asmlinkage void do_trap_undefined_instruction(struct cpu_user_regs *regs)
+{
+    do_unexpected_trap("Undefined Instruction", regs);
+}
+
+asmlinkage void do_trap_supervisor_call(struct cpu_user_regs *regs)
+{
+    do_unexpected_trap("Supervisor Call", regs);
+}
+
+asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs)
+{
+    do_unexpected_trap("Prefetch Abort", regs);
+}
+
+asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs)
+{
+    do_unexpected_trap("Data Abort", regs);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 0299b33..cb8a8d2 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -423,33 +423,13 @@ void vcpu_show_execution_state(struct vcpu *v)
     vcpu_unpause(v);
 }
 
-static void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs)
+void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs)
 {
     printk("Unexpected Trap: %s\n", msg);
     show_execution_state(regs);
     while(1);
 }
 
-asmlinkage void do_trap_undefined_instruction(struct cpu_user_regs *regs)
-{
-    do_unexpected_trap("Undefined Instruction", regs);
-}
-
-asmlinkage void do_trap_supervisor_call(struct cpu_user_regs *regs)
-{
-    do_unexpected_trap("Supervisor Call", regs);
-}
-
-asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs)
-{
-    do_unexpected_trap("Prefetch Abort", regs);
-}
-
-asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs)
-{
-    do_unexpected_trap("Data Abort", regs);
-}
-
 unsigned long do_arch_0(unsigned int cmd, unsigned long long value)
 {
         printk("do_arch_0 cmd=%x arg=%llx\n", cmd, value);
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 230c901..bd473a8 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -253,6 +253,8 @@ void show_registers(struct cpu_user_regs *regs);
 #define cpu_to_core(_cpu)   (0)
 #define cpu_to_socket(_cpu) (0)
 
+void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs);
+
 void vcpu_regs_hyp_to_user(const struct vcpu *vcpu,
                            struct vcpu_guest_core_regs *regs);
 void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (25 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 26/46] xen: arm: move arm32 specific trap handlers to xen/arch/arm/arm32 Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:10   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 28/46] xen: arm: pcpu context switch Ian Campbell
                   ` (20 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
    restoring state.
---
 xen/arch/arm/arm64/Makefile      |    2 +
 xen/arch/arm/arm64/asm-offsets.c |   64 ++++++++++
 xen/arch/arm/arm64/entry.S       |  256 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/arm64/traps.c       |   56 ++++++++
 xen/arch/arm/io.h                |    2 +-
 xen/arch/arm/setup.c             |    2 +-
 xen/arch/arm/smpboot.c           |    2 +-
 xen/arch/arm/traps.c             |   17 ++-
 xen/include/asm-arm/cpregs.h     |    1 +
 xen/include/asm-arm/processor.h  |    2 +-
 10 files changed, 396 insertions(+), 8 deletions(-)
 create mode 100644 xen/arch/arm/arm64/asm-offsets.c
 create mode 100644 xen/arch/arm/arm64/entry.S
 create mode 100644 xen/arch/arm/arm64/traps.c

diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index 815f305..be41f43 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -1,5 +1,7 @@
 subdir-y += lib
 
+obj-y += entry.o
 obj-y += mode_switch.o
 
+obj-y += traps.o
 obj-y += domain.o
diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c
new file mode 100644
index 0000000..691d6d5
--- /dev/null
+++ b/xen/arch/arm/arm64/asm-offsets.c
@@ -0,0 +1,64 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+#define COMPILE_OFFSETS
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <public/xen.h>
+#include <asm/current.h>
+
+#define DEFINE(_sym, _val) \
+    __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
+#define BLANK() \
+    __asm__ __volatile__ ( "\n->" : : )
+#define OFFSET(_sym, _str, _mem) \
+    DEFINE(_sym, offsetof(_str, _mem));
+
+/* base-2 logarithm */
+#define __L2(_x)  (((_x) & 0x00000002) ?   1 : 0)
+#define __L4(_x)  (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
+#define __L8(_x)  (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
+#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
+#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
+
+void __dummy__(void)
+{
+   OFFSET(UREGS_X0, struct cpu_user_regs, x0);
+   OFFSET(UREGS_LR, struct cpu_user_regs, lr);
+
+   OFFSET(UREGS_SP, struct cpu_user_regs, sp);
+   OFFSET(UREGS_PC, struct cpu_user_regs, pc);
+   OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr);
+
+   OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1);
+
+   OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
+   OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
+   OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
+   OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
+
+   OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0);
+   OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1);
+   OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1);
+
+   OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr);
+   DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
+   BLANK();
+
+   DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+
+   OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
new file mode 100644
index 0000000..1b2c4ad
--- /dev/null
+++ b/xen/arch/arm/arm64/entry.S
@@ -0,0 +1,256 @@
+#include <xen/config.h>
+#include <asm/asm_defns.h>
+#include <public/xen.h>
+
+/*
+ * Register aliases.
+ */
+lr      .req    x30             // link register
+
+/*
+ * Stack pushing/popping (register pairs only). Equivalent to store decrement
+ * before, load increment after.
+ */
+        .macro  push, xreg1, xreg2
+        stp     \xreg1, \xreg2, [sp, #-16]!
+        .endm
+
+        .macro  pop, xreg1, xreg2
+        ldp     \xreg1, \xreg2, [sp], #16
+        .endm
+
+/*
+ * Save/restore guest mode specific state, outer stack frame
+ */
+        .macro  entry_guest, compat
+
+        add     x21, sp, #UREGS_SPSR_el1
+        mrs     x23, SPSR_EL1
+        str     x23, [x21]
+
+        .if \compat == 0 /* Aarch64 mode */
+
+        add     x21, sp, #UREGS_SP_el0
+        mrs     x22, SP_el0
+        str     x22, [x21]
+
+        add     x21, sp, #UREGS_ELR_el1
+        mrs     x22, SP_el1
+        mrs     x23, ELR_el1
+        stp     x22, x23, [x21]
+
+        .else             /* Aarch32 mode */
+
+        add     x21, sp, #UREGS_SPSR_fiq
+        mrs     x22, spsr_fiq
+        mrs     x23, spsr_irq
+        stp     w22, w23, [x21]
+
+        add     x21, sp, #UREGS_SPSR_und
+        mrs     x22, spsr_und
+        mrs     x23, spsr_abt
+        stp     w22, w23, [x21]
+
+        .endif
+
+        .endm
+
+/*
+ * Save state on entry to hypervisor
+ */
+        .macro  entry, hyp, compat
+        sub     sp, sp, #(UREGS_SPSR_el1 - UREGS_SP)
+        push    x28, x29
+        push    x26, x27
+        push    x24, x25
+        push    x22, x23
+        push    x20, x21
+        push    x18, x19
+        push    x16, x17
+        push    x14, x15
+        push    x12, x13
+        push    x10, x11
+        push    x8, x9
+        push    x6, x7
+        push    x4, x5
+        push    x2, x3
+        push    x0, x1
+
+        .if \hyp == 1        /* Hypervisor mode */
+
+        add     x21, sp, #(UREGS_X0 - UREGS_SP)
+
+        .else                /* Guest mode */
+
+        entry_guest \compat
+        mov     x21, ~0 /* sp only valid for hyp frame XXX */
+
+        .endif
+
+        stp     lr, x21, [sp, #UREGS_LR]
+
+        mrs     x22, elr_el2
+        mrs     x23, spsr_el2
+        stp     x22, x23, [sp, #UREGS_PC]
+
+        .endm
+
+/*
+ * Bad Abort numbers
+ *-----------------
+ */
+#define BAD_SYNC        0
+#define BAD_IRQ         1
+#define BAD_FIQ         2
+#define BAD_ERROR       3
+
+        .macro  invalid, reason
+        mov     x0, sp
+        mov     x1, #\reason
+        b       do_bad_mode
+        .endm
+
+hyp_sync_invalid:
+        entry   hyp=1
+        invalid BAD_SYNC
+
+hyp_irq_invalid:
+        entry   hyp=1
+        invalid BAD_IRQ
+
+hyp_fiq_invalid:
+        entry   hyp=1
+        invalid BAD_FIQ
+
+hyp_error_invalid:
+        entry   hyp=1
+        invalid BAD_ERROR
+
+/* Traps taken in Current EL with SP_ELx */
+hyp_sync:
+        entry   hyp=1
+        msr     daifclr, #2
+        adr     lr, return_to_hypervisor
+        mov     x0, sp
+        b       do_trap_hypervisor
+
+hyp_irq:
+        entry   hyp=1
+        adr     lr, return_to_hypervisor
+        mov     x0, sp
+        b       do_trap_irq
+
+guest_sync:
+        entry   hyp=0, compat=0
+        invalid BAD_SYNC /* No AArch64 guest support yet */
+
+guest_irq:
+        entry   hyp=0, compat=0
+        invalid BAD_IRQ /* No AArch64 guest support yet */
+
+guest_fiq_invalid:
+        entry   hyp=0, compat=0
+        invalid BAD_FIQ
+
+guest_error_invalid:
+        entry   hyp=0, compat=0
+        invalid BAD_ERROR
+
+guest_sync_compat:
+        entry   hyp=0, compat=1
+        msr     daifclr, #2
+        adr     lr, return_to_guest
+        mov     x0, sp
+        b       do_trap_hypervisor
+
+guest_irq_compat:
+        entry   hyp=0, compat=1
+        adr     lr, return_to_guest
+        mov     x0, sp
+        b       do_trap_irq
+
+guest_fiq_invalid_compat:
+        entry   hyp=0, compat=1
+        invalid BAD_FIQ
+
+guest_error_invalid_compat:
+        entry   hyp=0, compat=1
+        invalid BAD_ERROR
+
+ENTRY(return_to_new_vcpu)
+        ldr     x21, [sp, #UREGS_CPSR]
+        and     x21, x21, #PSR_MODE_MASK
+        /* Returning to EL2? */
+        cmp     x21, #PSR_MODE_EL2t
+        ccmp    x21, #PSR_MODE_EL2h, #0x4, ne
+        b.eq    return_to_hypervisor /* Yes */
+        /* Fall thru */
+ENTRY(return_to_guest)
+        bl      leave_hypervisor_tail /* Disables interrupts on return */
+        /* Fall thru */
+ENTRY(return_to_hypervisor)
+        msr     daifset, #2 /* Mask interrupts */
+
+        ldp     x21, x22, [sp, #UREGS_PC]       // load ELR, SPSR
+
+        pop     x0, x1
+        pop     x2, x3
+        pop     x4, x5
+        pop     x6, x7
+        pop     x8, x9
+
+        /* XXX handle return to guest tasks, soft irqs etc */
+        
+        msr     elr_el2, x21                    // set up the return data
+        msr     spsr_el2, x22
+
+        pop     x10, x11
+        pop     x12, x13
+        pop     x14, x15
+        pop     x16, x17
+        pop     x18, x19
+        pop     x20, x21
+        pop     x22, x23
+        pop     x24, x25
+        pop     x26, x27
+        pop     x28, x29
+
+        ldr     lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP)
+        eret
+
+/*
+ * Exception vectors.
+ */
+        .macro  ventry  label
+        .align  7
+        b       \label
+        .endm
+
+        .align  11
+ENTRY(hyp_traps_vector)
+        ventry  hyp_sync_invalid                // Synchronous EL2t
+        ventry  hyp_irq_invalid                 // IRQ EL2t
+        ventry  hyp_fiq_invalid                 // FIQ EL2t
+        ventry  hyp_error_invalid               // Error EL2t
+
+        ventry  hyp_sync                        // Synchronous EL2h
+        ventry  hyp_irq                         // IRQ EL2h
+        ventry  hyp_fiq_invalid                 // FIQ EL2h
+        ventry  hyp_error_invalid               // Error EL2h
+
+        ventry  guest_sync                      // Synchronous 64-bit EL0/EL1
+        ventry  guest_irq                       // IRQ 64-bit EL0/EL1
+        ventry  guest_fiq_invalid               // FIQ 64-bit EL0/EL1
+        ventry  guest_error_invalid             // Error 64-bit EL0/EL1
+
+        ventry  guest_sync_compat               // Synchronous 32-bit EL0/EL1
+        ventry  guest_irq_compat                // IRQ 32-bit EL0/EL1
+        ventry  guest_fiq_invalid_compat        // FIQ 32-bit EL0/EL1
+        ventry  guest_error_invalid_compat      // Error 32-bit EL0/EL1
+
+/*
+ * Local variables:
+ * mode: ASM
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c
new file mode 100644
index 0000000..02ef992
--- /dev/null
+++ b/xen/arch/arm/arm64/traps.c
@@ -0,0 +1,56 @@
+/*
+ * xen/arch/arm/arm64/traps.c
+ *
+ * ARM AArch64 Specific Trap handlers
+ *
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+
+#include <asm/system.h>
+#include <asm/processor.h>
+
+#include <public/xen.h>
+
+asmlinkage void do_trap_serror(struct cpu_user_regs *regs)
+{
+    panic("Unhandled serror trap\n");
+}
+
+static const char *handler[]= {
+        "Synchronous Abort",
+        "IRQ",
+        "FIQ",
+        "Error"
+};
+
+asmlinkage void do_bad_mode(struct cpu_user_regs *regs, int reason)
+{
+    uint64_t esr = READ_SYSREG64(ESR_EL2);
+    printk("Bad mode in %s handler detected, code 0x%08"PRIx64"\n",
+           handler[reason], esr);
+
+    local_irq_disable();
+    panic("bad mode");
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/io.h b/xen/arch/arm/io.h
index 0933aa8..883afd8 100644
--- a/xen/arch/arm/io.h
+++ b/xen/arch/arm/io.h
@@ -26,7 +26,7 @@
 typedef struct
 {
     struct hsr_dabt dabt;
-    uint32_t gva;
+    vaddr_t gva;
     paddr_t gpa;
 } mmio_info_t;
 
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index c1f06c9..299848e 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -387,7 +387,7 @@ void __init start_xen(unsigned long boot_phys_offset,
     setup_mm(fdt_paddr, fdt_size);
 
     /* Setup Hyp vector base */
-    WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2);
+    WRITE_SYSREG((vaddr_t)&hyp_traps_vector, VBAR_EL2);
     isb();
 
     /* Setup Stage 2 address translation */
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index d8eb5d3..b18f137 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
     set_processor_id(cpuid);
 
     /* Setup Hyp vector base */
-    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
+    WRITE_SYSREG((vaddr_t)&hyp_traps_vector, VBAR_EL2);
 
     mmu_init_secondary_cpu();
     enable_vfp();
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index cb8a8d2..d6bdaa7 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -628,7 +628,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
 
 }
 
-void dump_guest_s1_walk(struct domain *d, uint32_t addr)
+void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
 {
     uint32_t ttbcr = READ_CP32(TTBCR);
     uint32_t ttbr0 = READ_CP32(TTBR0);
@@ -636,7 +636,7 @@ void dump_guest_s1_walk(struct domain *d, uint32_t addr)
     uint32_t offset;
     uint32_t *first = NULL, *second = NULL;
 
-    printk("dom%d VA 0x%08"PRIx32"\n", d->domain_id, addr);
+    printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
     printk("    TTBCR: 0x%08"PRIx32"\n", ttbcr);
     printk("    TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n",
            ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK));
@@ -692,7 +692,11 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
     mmio_info_t info;
 
     info.dabt = dabt;
+#ifdef CONFIG_ARM_32
     info.gva = READ_CP32(HDFAR);
+#else
+    info.gva = READ_SYSREG64(FAR_EL2);
+#endif
 
     if (dabt.s1ptw)
         goto bad_data_abort;
@@ -713,7 +717,7 @@ bad_data_abort:
 
     /* XXX inject a suitable fault into the guest */
     printk("Guest data abort: %s%s%s\n"
-           "    gva=%"PRIx32"\n",
+           "    gva=%"PRIvaddr"\n",
            msg, dabt.s1ptw ? " S2 during S1" : "",
            fsc_level_str(level),
            info.gva);
@@ -736,13 +740,17 @@ bad_data_abort:
 
 asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
 {
-    union hsr hsr = { .bits = READ_CP32(HSR) };
+    union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) };
 
     switch (hsr.ec) {
     case HSR_EC_CP15_32:
+        if ( ! is_pv32_domain(current->domain) )
+            goto bad_trap;
         do_cp15_32(regs, hsr);
         break;
     case HSR_EC_CP15_64:
+        if ( ! is_pv32_domain(current->domain) )
+            goto bad_trap;
         do_cp15_64(regs, hsr);
         break;
     case HSR_EC_HVC:
@@ -754,6 +762,7 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
         do_trap_data_abort_guest(regs, hsr.dabt);
         break;
     default:
+ bad_trap:
         printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=%"PRIx32"\n",
                hsr.bits, hsr.ec, hsr.len, hsr.iss);
         do_unexpected_trap("Hypervisor", regs);
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 36da12e..75b6287 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -228,6 +228,7 @@
 #define CCSIDR_EL1              CCSIDR
 #define CLIDR_EL1               CLIDR
 #define CSSELR_EL1              CSSELR
+#define ESR_EL2                 HSR
 #define ID_AFR0_EL1             ID_AFR0
 #define ID_DFR0_EL1             ID_DFR0
 #define ID_ISAR0_EL1            ID_ISAR0
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index bd473a8..396ec41 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -238,7 +238,7 @@ union hsr {
 #endif
 
 #ifndef __ASSEMBLY__
-extern uint32_t hyp_traps_vector[8];
+extern uint32_t hyp_traps_vector;
 
 void panic_PAR(uint64_t par);
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 28/46] xen: arm: pcpu context switch
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (26 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 27/46] xen: arm: arm64 trap handling Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 29/46] xen: arm64: percpu variable support Ian Campbell
                   ` (19 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/arm64/entry.S   |   30 ++++++++++++++++++++++++++++++
 xen/arch/arm/domain.c        |    4 ++--
 xen/include/asm-arm/domain.h |   33 +++++++++++++++++++++++----------
 3 files changed, 55 insertions(+), 12 deletions(-)

diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index 1b2c4ad..a09dfcb 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -249,6 +249,36 @@ ENTRY(hyp_traps_vector)
         ventry  guest_error_invalid_compat      // Error 32-bit EL0/EL1
 
 /*
+ * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
+ *
+ * x0 - prev
+ * x1 - next
+ *
+ * Returns prev in x0
+ */
+ENTRY(__context_switch)
+        add     x8, x0, #VCPU_arch_saved_context
+        mov     x9, sp
+        stp     x19, x20, [x8], #16             // store callee-saved registers
+        stp     x21, x22, [x8], #16
+        stp     x23, x24, [x8], #16
+        stp     x25, x26, [x8], #16
+        stp     x27, x28, [x8], #16
+        stp     x29, x9, [x8], #16
+        str     lr, [x8]
+
+        add     x8, x1, #VCPU_arch_saved_context
+        ldp     x19, x20, [x8], #16             // restore callee-saved registers
+        ldp     x21, x22, [x8], #16
+        ldp     x23, x24, [x8], #16
+        ldp     x25, x26, [x8], #16
+        ldp     x27, x28, [x8], #16
+        ldp     x29, x9, [x8], #16
+        ldr     lr, [x8]
+        mov     sp, x9
+        ret
+
+/*
  * Local variables:
  * mode: ASM
  * indent-tabs-mode: nil
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 3651fb2..f74caf4 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -387,8 +387,8 @@ int vcpu_initialise(struct vcpu *v)
                                            - sizeof(struct cpu_info));
 
     memset(&v->arch.saved_context, 0, sizeof(v->arch.saved_context));
-    v->arch.saved_context.sp = (uint32_t)v->arch.cpu_info;
-    v->arch.saved_context.pc = (uint32_t)continue_new_vcpu;
+    v->arch.saved_context.sp = (register_t)v->arch.cpu_info;
+    v->arch.saved_context.pc = (register_t)continue_new_vcpu;
 
     /* Idle VCPUs don't need the rest of this setup */
     if ( is_idle_vcpu(v) )
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index e9370a5..ff6214b 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -99,16 +99,29 @@ struct vtimer {
 struct arch_vcpu
 {
     struct {
-        uint32_t    r4;
-        uint32_t    r5;
-        uint32_t    r6;
-        uint32_t    r7;
-        uint32_t    r8;
-        uint32_t    r9;
-        uint32_t    sl;
-        uint32_t    fp;
-        uint32_t    sp;
-        uint32_t    pc;
+#ifdef CONFIG_ARM_32
+        register_t r4;
+        register_t r5;
+        register_t r6;
+        register_t r7;
+        register_t r8;
+        register_t r9;
+        register_t sl;
+#else
+        register_t x19;
+        register_t x20;
+        register_t x21;
+        register_t x22;
+        register_t x23;
+        register_t x24;
+        register_t x25;
+        register_t x26;
+        register_t x27;
+        register_t x28;
+#endif
+        register_t fp;
+        register_t sp;
+        register_t pc;
     } saved_context;
 
     void *stack;
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 29/46] xen: arm64: percpu variable support.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (27 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 28/46] xen: arm: pcpu context switch Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 30/46] xen: arm: guest context switching Ian Campbell
                   ` (18 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/cpregs.h |    1 +
 xen/include/asm-arm/percpu.h |    5 ++---
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 75b6287..dc69a06 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -244,6 +244,7 @@
 #define ID_PFR0_EL1             ID_PFR0
 #define ID_PFR1_EL1             ID_PFR1
 #define SCTLR_EL2               HSCTLR
+#define TPIDR_EL2               HTPIDR
 #define TTBR0_EL2               HTTBR
 #define VBAR_EL2                HVBAR
 #define VTCR_EL2                VTCR
diff --git a/xen/include/asm-arm/percpu.h b/xen/include/asm-arm/percpu.h
index ab27292..e955136 100644
--- a/xen/include/asm-arm/percpu.h
+++ b/xen/include/asm-arm/percpu.h
@@ -11,18 +11,17 @@ void percpu_init_areas(void);
     __section(".bss.percpu" #suffix)                            \
     __typeof__(type) per_cpu_##name
 
-
 #define per_cpu(var, cpu)  \
     (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
 #define __get_cpu_var(var) \
-    (*RELOC_HIDE(&per_cpu__##var, READ_CP32(HTPIDR)))
+    (*RELOC_HIDE(&per_cpu__##var, READ_SYSREG(TPIDR_EL2)))
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
 DECLARE_PER_CPU(unsigned int, cpu_id);
 #define get_processor_id()    (this_cpu(cpu_id))
 #define set_processor_id(id)  do {                      \
-    WRITE_CP32(__per_cpu_offset[id], HTPIDR);           \
+    WRITE_SYSREG(__per_cpu_offset[id], TPIDR_EL2);      \
     this_cpu(cpu_id) = (id);                            \
 } while(0)
 #endif
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 30/46] xen: arm: guest context switching.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (28 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 29/46] xen: arm64: percpu variable support Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit Ian Campbell
                   ` (17 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

One side effect of this is that we now save the full 64-bit
TTBR[0,1] even on a 32-bit hypervisor. This is needed anyway to
support LPAE guests (although this patch doesn't implement anything
other than the context switch).

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v2: Nuke XXX and rationalise naming:
 s/tpidrurw/tpidr_el0/
 s/tpidrprw/tpidr_el1/
 s/tpidruro/tpidrro_el0/
---
 xen/arch/arm/domain.c        |  113 +++++++++++++++++++++++++-----------------
 xen/arch/arm/traps.c         |   14 +++---
 xen/include/asm-arm/cpregs.h |   21 +++++++-
 xen/include/asm-arm/domain.h |   29 ++++++++---
 4 files changed, 115 insertions(+), 62 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index f74caf4..e0707ff 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -43,55 +43,67 @@ void idle_loop(void)
 static void ctxt_switch_from(struct vcpu *p)
 {
     /* CP 15 */
-    p->arch.csselr = READ_CP32(CSSELR);
+    p->arch.csselr = READ_SYSREG(CSSELR_EL1);
 
     /* Control Registers */
-    p->arch.actlr = READ_CP32(ACTLR);
-    p->arch.sctlr = READ_CP32(SCTLR);
-    p->arch.cpacr = READ_CP32(CPACR);
+    p->arch.actlr = READ_SYSREG(ACTLR_EL1);
+    p->arch.sctlr = READ_SYSREG(SCTLR_EL1);
+    p->arch.cpacr = READ_SYSREG(CPACR_EL1);
 
-    p->arch.contextidr = READ_CP32(CONTEXTIDR);
-    p->arch.tpidrurw = READ_CP32(TPIDRURW);
-    p->arch.tpidruro = READ_CP32(TPIDRURO);
-    p->arch.tpidrprw = READ_CP32(TPIDRPRW);
+    p->arch.contextidr = READ_SYSREG(CONTEXTIDR_EL1);
+    p->arch.tpidr_el0 = READ_SYSREG(TPIDR_EL0);
+    p->arch.tpidrro_el0 = READ_SYSREG(TPIDRRO_EL0);
+    p->arch.tpidr_el1 = READ_SYSREG(TPIDR_EL1);
 
     /* Arch timer */
     virt_timer_save(p);
 
+#if defined(CONFIG_ARM_32)
     /* XXX only save these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */
     p->arch.teecr = READ_CP32(TEECR);
     p->arch.teehbr = READ_CP32(TEEHBR);
 
     p->arch.joscr = READ_CP32(JOSCR);
     p->arch.jmcr = READ_CP32(JMCR);
+#endif
 
     isb();
 
     /* MMU */
-    p->arch.vbar = READ_CP32(VBAR);
-    p->arch.ttbcr = READ_CP32(TTBCR);
-    /* XXX save 64 bit TTBR if guest is LPAE */
-    p->arch.ttbr0 = READ_CP32(TTBR0);
-    p->arch.ttbr1 = READ_CP32(TTBR1);
-
-    p->arch.dacr = READ_CP32(DACR);
-    p->arch.par = READ_CP64(PAR);
+    p->arch.vbar = READ_SYSREG(VBAR_EL1);
+    p->arch.ttbcr = READ_SYSREG(TCR_EL1);
+    p->arch.ttbr0 = READ_SYSREG64(TTBR0_EL1);
+    p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1);
+    if ( is_pv32_domain(p->domain) )
+        p->arch.dacr = READ_SYSREG(DACR32_EL2);
+    p->arch.par = READ_SYSREG64(PAR_EL1);
+#if defined(CONFIG_ARM_32)
     p->arch.mair0 = READ_CP32(MAIR0);
     p->arch.mair1 = READ_CP32(MAIR1);
+#else
+    p->arch.mair = READ_SYSREG64(MAIR_EL1);
+#endif
 
     /* Fault Status */
+#if defined(CONFIG_ARM_32)
     p->arch.dfar = READ_CP32(DFAR);
     p->arch.ifar = READ_CP32(IFAR);
     p->arch.dfsr = READ_CP32(DFSR);
-    p->arch.ifsr = READ_CP32(IFSR);
-    p->arch.adfsr = READ_CP32(ADFSR);
-    p->arch.aifsr = READ_CP32(AIFSR);
+#elif defined(CONFIG_ARM_64)
+    p->arch.far = READ_SYSREG64(FAR_EL1);
+    p->arch.esr = READ_SYSREG64(ESR_EL1);
+#endif
+
+    if ( is_pv32_domain(p->domain) )
+        p->arch.ifsr  = READ_SYSREG(IFSR32_EL2);
+    p->arch.afsr0 = READ_SYSREG(AFSR0_EL1);
+    p->arch.afsr1 = READ_SYSREG(AFSR1_EL1);
 
     /* XXX MPU */
 
     /* XXX VFP */
 
-    /* XXX VGIC */
+    /* VGIC */
     gic_save_state(p);
 
     isb();
@@ -100,16 +112,16 @@ static void ctxt_switch_from(struct vcpu *p)
 
 static void ctxt_switch_to(struct vcpu *n)
 {
-    uint32_t hcr;
+    register_t hcr;
 
-    hcr = READ_CP32(HCR);
-    WRITE_CP32(hcr & ~HCR_VM, HCR);
+    hcr = READ_SYSREG(HCR_EL2);
+    WRITE_SYSREG(hcr & ~HCR_VM, HCR_EL2);
     isb();
 
     p2m_load_VTTBR(n->domain);
     isb();
 
-    /* XXX VGIC */
+    /* VGIC */
     gic_restore_state(n);
 
     /* XXX VFP */
@@ -117,51 +129,62 @@ static void ctxt_switch_to(struct vcpu *n)
     /* XXX MPU */
 
     /* Fault Status */
+#if defined(CONFIG_ARM_32)
     WRITE_CP32(n->arch.dfar, DFAR);
     WRITE_CP32(n->arch.ifar, IFAR);
     WRITE_CP32(n->arch.dfsr, DFSR);
-    WRITE_CP32(n->arch.ifsr, IFSR);
-    WRITE_CP32(n->arch.adfsr, ADFSR);
-    WRITE_CP32(n->arch.aifsr, AIFSR);
+#elif defined(CONFIG_ARM_64)
+    WRITE_SYSREG64(n->arch.far, FAR_EL1);
+    WRITE_SYSREG64(n->arch.esr, ESR_EL1);
+#endif
+
+    if ( is_pv32_domain(n->domain) )
+        WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2);
+    WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1);
+    WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1);
 
     /* MMU */
-    WRITE_CP32(n->arch.vbar, VBAR);
-    WRITE_CP32(n->arch.ttbcr, TTBCR);
-    /* XXX restore 64 bit TTBR if guest is LPAE */
-    WRITE_CP32(n->arch.ttbr0, TTBR0);
-    WRITE_CP32(n->arch.ttbr1, TTBR1);
-
-    WRITE_CP32(n->arch.dacr, DACR);
-    WRITE_CP64(n->arch.par, PAR);
+    WRITE_SYSREG(n->arch.vbar, VBAR_EL1);
+    WRITE_SYSREG(n->arch.ttbcr, TCR_EL1);
+    WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1);
+    WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1);
+    if ( is_pv32_domain(n->domain) )
+        WRITE_SYSREG(n->arch.dacr, DACR32_EL2);
+    WRITE_SYSREG64(n->arch.par, PAR_EL1);
+#if defined(CONFIG_ARM_32)
     WRITE_CP32(n->arch.mair0, MAIR0);
     WRITE_CP32(n->arch.mair1, MAIR1);
+#elif defined(CONFIG_ARM_64)
+    WRITE_SYSREG64(n->arch.mair, MAIR_EL1);
+#endif
     isb();
 
     /* Control Registers */
-    WRITE_CP32(n->arch.actlr, ACTLR);
-    WRITE_CP32(n->arch.sctlr, SCTLR);
-    WRITE_CP32(n->arch.cpacr, CPACR);
+    WRITE_SYSREG(n->arch.actlr, ACTLR_EL1);
+    WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1);
+    WRITE_SYSREG(n->arch.cpacr, CPACR_EL1);
 
-    WRITE_CP32(n->arch.contextidr, CONTEXTIDR);
-    WRITE_CP32(n->arch.tpidrurw, TPIDRURW);
-    WRITE_CP32(n->arch.tpidruro, TPIDRURO);
-    WRITE_CP32(n->arch.tpidrprw, TPIDRPRW);
+    WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1);
+    WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0);
+    WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0);
+    WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1);
 
+#if defined(CONFIG_ARM_32)
     /* XXX only restore these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */
     WRITE_CP32(n->arch.teecr, TEECR);
     WRITE_CP32(n->arch.teehbr, TEEHBR);
 
     WRITE_CP32(n->arch.joscr, JOSCR);
     WRITE_CP32(n->arch.jmcr, JMCR);
-
+#endif
     isb();
 
     /* CP 15 */
-    WRITE_CP32(n->arch.csselr, CSSELR);
+    WRITE_SYSREG(n->arch.csselr, CSSELR_EL1);
 
     isb();
 
-    WRITE_CP32(hcr, HCR);
+    WRITE_SYSREG(hcr, HCR_EL2);
     isb();
 
     /* This is could trigger an hardware interrupt from the virtual
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index d6bdaa7..97a29fb 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -214,8 +214,8 @@ void panic_PAR(uint64_t par)
 }
 
 struct reg_ctxt {
-    uint32_t sctlr;
-    uint32_t ttbr0, ttbr1, ttbcr;
+    uint32_t sctlr, ttbcr;
+    uint64_t ttbr0, ttbr1;
 };
 static void _show_registers(struct cpu_user_regs *regs,
                             struct reg_ctxt *ctxt,
@@ -265,7 +265,7 @@ static void _show_registers(struct cpu_user_regs *regs,
         printk("FIQ: R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n",
                regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, regs->r11_fiq);
         printk("\n");
-        printk("TTBR0 %08"PRIx32" TTBR1 %08"PRIx32" TTBCR %08"PRIx32"\n",
+        printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TTBCR %08"PRIx32"\n",
                ctxt->ttbr0, ctxt->ttbr1, ctxt->ttbcr);
         printk("SCTLR %08"PRIx32"\n", ctxt->sctlr);
         printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR));
@@ -295,8 +295,8 @@ void show_registers(struct cpu_user_regs *regs)
     struct reg_ctxt ctxt;
     ctxt.sctlr = READ_CP32(SCTLR);
     ctxt.ttbcr = READ_CP32(TTBCR);
-    ctxt.ttbr0 = READ_CP32(TTBR0);
-    ctxt.ttbr1 = READ_CP32(TTBR1);
+    ctxt.ttbr0 = READ_CP64(TTBR0);
+    ctxt.ttbr1 = READ_CP64(TTBR1);
     _show_registers(regs, &ctxt, guest_mode(regs));
 }
 
@@ -631,14 +631,14 @@ static void do_cp15_64(struct cpu_user_regs *regs,
 void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
 {
     uint32_t ttbcr = READ_CP32(TTBCR);
-    uint32_t ttbr0 = READ_CP32(TTBR0);
+    uint64_t ttbr0 = READ_CP64(TTBR0);
     paddr_t paddr;
     uint32_t offset;
     uint32_t *first = NULL, *second = NULL;
 
     printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
     printk("    TTBCR: 0x%08"PRIx32"\n", ttbcr);
-    printk("    TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n",
+    printk("    TTBR0: 0x%010"PRIx64" = 0x%"PRIpaddr"\n",
            ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK));
 
     if ( ttbcr & TTBCR_EAE )
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index dc69a06..732f967 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -106,9 +106,9 @@
 #define HCR             p15,4,c1,c1,0   /* Hyp. Configuration Register */
 
 /* CP15 CR2: Translation Table Base and Control Registers */
-#define TTBR0           p15,0,c2,c0,0   /* Translation Table Base Reg. 0 */
-#define TTBR1           p15,0,c2,c0,1   /* Translation Table Base Reg. 1 */
 #define TTBCR           p15,0,c2,c0,2   /* Translatation Table Base Control Register */
+#define TTBR0           p15,0,c2        /* Translation Table Base Reg. 0 */
+#define TTBR1           p15,1,c2        /* Translation Table Base Reg. 1 */
 #define HTTBR           p15,4,c2        /* Hyp. Translation Table Base Register */
 #define HTCR            p15,4,c2,c0,2   /* Hyp. Translation Control Register */
 #define VTCR            p15,4,c2,c1,2   /* Virtualization Translation Control Register */
@@ -225,10 +225,17 @@
 /* Aliases of AArch64 names for use in common code when building for AArch32 */
 #ifdef CONFIG_ARM_32
 /* Alphabetically... */
+#define ACTLR_EL1               ACTLR
+#define AFSR0_EL1               ADFSR
+#define AFSR1_EL1               AIFSR
 #define CCSIDR_EL1              CCSIDR
 #define CLIDR_EL1               CLIDR
+#define CONTEXTIDR_EL1          CONTEXTIDR
+#define CPACR_EL1               CPACR
 #define CSSELR_EL1              CSSELR
+#define DACR32_EL2              DACR
 #define ESR_EL2                 HSR
+#define HCR_EL2                 HCR
 #define ID_AFR0_EL1             ID_AFR0
 #define ID_DFR0_EL1             ID_DFR0
 #define ID_ISAR0_EL1            ID_ISAR0
@@ -243,9 +250,19 @@
 #define ID_MMFR3_EL1            ID_MMFR3
 #define ID_PFR0_EL1             ID_PFR0
 #define ID_PFR1_EL1             ID_PFR1
+#define IFSR32_EL2              IFSR
+#define PAR_EL1                 PAR
+#define SCTLR_EL1               SCTLR
 #define SCTLR_EL2               HSCTLR
+#define TCR_EL1                 TTBCR
+#define TPIDRRO_EL0             TPIDRURO
+#define TPIDR_EL0               TPIDRURW
+#define TPIDR_EL1               TPIDRPRW
 #define TPIDR_EL2               HTPIDR
+#define TTBR0_EL1               TTBR0
 #define TTBR0_EL2               HTTBR
+#define TTBR1_EL1               TTBR1
+#define VBAR_EL1                VBAR
 #define VBAR_EL2                HVBAR
 #define VTCR_EL2                VTCR
 
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index ff6214b..4a4bf2f 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -133,30 +133,43 @@ struct arch_vcpu
     struct cpu_info *cpu_info;
 
     /* Fault Status */
+#ifdef CONFIG_ARM_32
+    uint32_t dfsr;
     uint32_t dfar, ifar;
-    uint32_t dfsr, ifsr;
-    uint32_t adfsr, aifsr;
+#else
+    uint64_t far;
+    uint32_t esr;
+#endif
+
+    uint32_t ifsr; /* 32-bit guests only */
+    uint32_t afsr0, afsr1;
 
     /* MMU */
-    uint32_t vbar;
+    register_t vbar;
     uint32_t ttbcr;
-    uint32_t ttbr0, ttbr1;
+    uint64_t ttbr0, ttbr1;
 
-    uint32_t dacr;
+    uint32_t dacr; /* 32-bit guests only */
     uint64_t par;
+#ifdef CONFIG_ARM_32
     uint32_t mair0, mair1;
+#else
+    uint64_t mair;
+#endif
 
     /* Control Registers */
     uint32_t actlr, sctlr;
     uint32_t cpacr;
 
     uint32_t contextidr;
-    uint32_t tpidrurw;
-    uint32_t tpidruro;
-    uint32_t tpidrprw;
+    register_t tpidr_el0;
+    register_t tpidr_el1;
+    register_t tpidrro_el0;
 
+#ifdef CONFIG_ARM_32
     uint32_t teecr, teehbr;
     uint32_t joscr, jmcr;
+#endif
 
     /* CP 15 */
     uint32_t csselr;
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 00/46] initial arm v8 (64-bit) support
@ 2013-02-14 16:47 Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type Ian Campbell
                   ` (47 more replies)
  0 siblings, 48 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim Deegan, Stefano Stabellini

[-- Attachment #1: Type: text/plain, Size: 2794 bytes --]

This is v2 of the arm64 bit series. It is based on current staging plus
the "xen: public interface (and foreign check) changes for arm" series,
second posting of that is
<1360857557.20449.436.camel@zakaz.uk.xensource.com>

I have implemented Tim's review comments with the exception of the
comments on use of WFE etc in the spinlock implementation (#8 last time)
and the comments on trap handling (#28 last time) which I intend to
return to. Many thanks to Tim for his copious comments and acks!

I have pushed the series, plus the prerequisite mentioned above and a
small number of Stefano's toolstack patches to:
        git://xenbits.xen.org/people/ianc/xen.git arm64-v2

As well as the above I have pushed the kernel tree I am using, which is
based on v3.8-rc3 to:
        git://xenbits.xen.org/people/ianc/linux.git arm64-v2
The kernel config is attached. Note that this is a 32-bit ARM kernel,
64-bit support for dom0 and domU is a WIP but not included here.

I am building the 64-bit hypervisor with the Linaro gcc,
gcc-linaro-aarch64-linux-gnu-4.7-2012.12-20121214_linux, from
http://www.linaro.org/engineering/armv8#tab3 
http://releases.linaro.org/13.01/components/toolchain/binaries/gcc-linaro-aarch64-linux-gnu-4.7-2013.01-20130125_linux.tar.bz2

For the tools I am using the native armhf tools on a Debian Wheezy armhf
system (running on a cluster of IMX.5x loco boards). I have not tried
cross compiling the tools. FWIW I also build the 32-bit hypervisor
natively in this environment.

For the kernel I am using the kernel.org cross compiler,
gcc-4.6.3-nolibc / arm-unknown-linux-gnueabi, from
http://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.6.3

With all this I can boot a 32-bit dom0 and a 32-bit guest domain (using
the same kernel) on either a 64-bit hypervisor (on the RTSM_VE_AEMv8A
model, 0.8.4510) or a 32-bit hypervisor (RTSM_VE_Cortex-A15x1, 8.0.44).
You can also run 32-bit on the V8 model (using -C
cluster.cpu0.CONFIG64=0) if you comment out the ThumbEE in
ctxt_switch_from and ctxt_switch_to (making this dynamic is on my TODO
list).

My dom0 root filesystem is a Debian Wheezy armhf image, attached to the
emulated MMC (-C motherboard.mmc.p_mmc_file=rootfs.img)

To save running all of the dom0 initscripts (which is a bit boring on
the model) I boot using init=/root/init.sh (init.sh is attached), which
mounts the necessary filesystems, starts u-boot and the relevant xen
stuff.

Once booted into dom0 I run the attached guest.sh, which uses the
attached cfg and a guest.img (I use the one from the ARM 3rd party IP)
to start a guest and connect to its console The guest boots to a prompt.

I will at some point be updating
http://wiki.xen.org/wiki/Xen_ARMv7_with_Virtualization_Extensions with
v8 specific info (and renaming the page s/v7//).

Ian.

[-- Attachment #2: config --]
[-- Type: text/plain, Size: 45996 bytes --]

#
# Automatically generated file; DO NOT EDIT.
# Linux/arm 3.8.0-rc3 Kernel Configuration
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_ARM_PATCH_PHYS_VIRT=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_HAVE_IRQ_WORK=y
CONFIG_BUILDTIME_EXTABLE_SORT=y

#
# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_XZ=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_XZ is not set
# CONFIG_KERNEL_LZO is not set
CONFIG_DEFAULT_HOSTNAME="(none)"
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_FHANDLE is not set
# CONFIG_AUDIT is not set
CONFIG_HAVE_GENERIC_HARDIRQS=y

#
# IRQ subsystem
#
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_IRQ_DOMAIN=y
# CONFIG_IRQ_DOMAIN_DEBUG is not set
CONFIG_SPARSE_IRQ=y
CONFIG_KTIME_SCALAR=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y

#
# Timers subsystem
#
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y

#
# CPU/Task time and stats accounting
#
CONFIG_TICK_CPU_ACCOUNTING=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set

#
# RCU Subsystem
#
CONFIG_TINY_RCU=y
# CONFIG_PREEMPT_RCU is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
# CONFIG_CHECKPOINT_RESTORE is not set
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
# CONFIG_SCHED_AUTOGROUP is not set
# CONFIG_SYSFS_DEPRECATED is not set
# CONFIG_RELAY is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
CONFIG_EXPERT=y
CONFIG_HAVE_UID16=y
CONFIG_UID16=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
# CONFIG_EMBEDDED is not set
CONFIG_HAVE_PERF_EVENTS=y
CONFIG_PERF_USE_VMALLOC=y

#
# Kernel Performance Events And Counters
#
# CONFIG_PERF_EVENTS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_JUMP_LABEL is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_DMA_CONTIGUOUS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
CONFIG_HAVE_ARCH_JUMP_LABEL=y
CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
CONFIG_MODULES_USE_ELF_REL=y
CONFIG_CLONE_BACKWARDS=y

#
# GCOV-based kernel profiling
#
# CONFIG_GCOV_KERNEL is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
CONFIG_LBDAF=y
CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_BSGLIB is not set
# CONFIG_BLK_DEV_INTEGRITY is not set

#
# Partition Types
#
CONFIG_PARTITION_ADVANCED=y
# CONFIG_ACORN_PARTITION is not set
# CONFIG_OSF_PARTITION is not set
# CONFIG_AMIGA_PARTITION is not set
# CONFIG_ATARI_PARTITION is not set
# CONFIG_MAC_PARTITION is not set
CONFIG_MSDOS_PARTITION=y
# CONFIG_BSD_DISKLABEL is not set
# CONFIG_MINIX_SUBPARTITION is not set
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_KARMA_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
# CONFIG_SYSV68_PARTITION is not set

#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
CONFIG_INLINE_READ_UNLOCK=y
CONFIG_INLINE_READ_UNLOCK_IRQ=y
CONFIG_INLINE_WRITE_UNLOCK=y
CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
CONFIG_FREEZER=y

#
# System Type
#
CONFIG_MMU=y
CONFIG_ARCH_MULTIPLATFORM=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
# CONFIG_ARCH_BCM2835 is not set
# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_SIRF is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_MXS is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_DOVE is not set
# CONFIG_ARCH_KIRKWOOD is not set
# CONFIG_ARCH_MV78XX0 is not set
# CONFIG_ARCH_ORION5X is not set
# CONFIG_ARCH_MMP is not set
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_W90X900 is not set
# CONFIG_ARCH_LPC32XX is not set
# CONFIG_ARCH_TEGRA is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C24XX is not set
# CONFIG_ARCH_S3C64XX is not set
# CONFIG_ARCH_S5P64X0 is not set
# CONFIG_ARCH_S5PC100 is not set
# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_EXYNOS is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_U300 is not set
# CONFIG_ARCH_U8500 is not set
# CONFIG_ARCH_NOMADIK is not set
# CONFIG_PLAT_SPEAR is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
# CONFIG_ARCH_VT8500_SINGLE is not set

#
# Multiple platform selection
#

#
# CPU Core family selection
#
# CONFIG_ARCH_MULTI_V6 is not set
CONFIG_ARCH_MULTI_V7=y
CONFIG_ARCH_MULTI_V6_V7=y
# CONFIG_ARCH_MULTI_CPU_AUTO is not set
# CONFIG_ARCH_MVEBU is not set
# CONFIG_ARCH_BCM is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_KEYBOARD_GPIO_POLLED is not set
# CONFIG_ARCH_HIGHBANK is not set
# CONFIG_ARCH_MXC is not set
# CONFIG_ARCH_SOCFPGA is not set
# CONFIG_ARCH_SUNXI is not set
CONFIG_ARCH_VEXPRESS=y

#
# Versatile Express platform type
#
CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA=y
# CONFIG_ARCH_VEXPRESS_CA9X4 is not set
CONFIG_PLAT_VERSATILE_CLCD=y
CONFIG_PLAT_VERSATILE_SCHED_CLOCK=y
# CONFIG_ARCH_VT8500 is not set
# CONFIG_ARCH_ZYNQ is not set
CONFIG_PLAT_VERSATILE=y
CONFIG_ARM_TIMER_SP804=y

#
# Processor Type
#
CONFIG_CPU_V7=y
CONFIG_CPU_32v6K=y
CONFIG_CPU_32v7=y
CONFIG_CPU_ABRT_EV7=y
CONFIG_CPU_PABRT_V7=y
CONFIG_CPU_CACHE_V7=y
CONFIG_CPU_CACHE_VIPT=y
CONFIG_CPU_COPY_V6=y
CONFIG_CPU_TLB_V7=y
CONFIG_CPU_HAS_ASID=y
CONFIG_CPU_CP15=y
CONFIG_CPU_CP15_MMU=y

#
# Processor Features
#
# CONFIG_ARM_LPAE is not set
# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_ARM_THUMB=y
# CONFIG_ARM_THUMBEE is not set
# CONFIG_ARM_VIRT_EXT is not set
# CONFIG_SWP_EMULATE is not set
# CONFIG_CPU_ICACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_MIGHT_HAVE_CACHE_L2X0=y
# CONFIG_CACHE_L2X0 is not set
CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_ARM_L1_CACHE_SHIFT=6
CONFIG_ARM_DMA_MEM_BUFFERABLE=y
CONFIG_ARM_NR_BANKS=8
CONFIG_MULTI_IRQ_HANDLER=y
# CONFIG_ARM_ERRATA_430973 is not set
CONFIG_ARM_ERRATA_720789=y
# CONFIG_ARM_ERRATA_754322 is not set
# CONFIG_ARM_ERRATA_775420 is not set
CONFIG_ARM_GIC=y
CONFIG_ICST=y

#
# Bus support
#
CONFIG_ARM_AMBA=y
# CONFIG_PCI_SYSCALL is not set
# CONFIG_PCCARD is not set

#
# Kernel Features
#
CONFIG_HAVE_SMP=y
# CONFIG_SMP is not set
CONFIG_ARM_ARCH_TIMER=y
CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
CONFIG_ARCH_NR_GPIO=0
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_HZ=100
# CONFIG_THUMB2_KERNEL is not set
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
CONFIG_HAVE_ARCH_PFN_VALID=y
CONFIG_HIGHMEM=y
CONFIG_HIGHPTE=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_HAVE_MEMBLOCK=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_COMPACTION is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_MMU_NOTIFIER=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_CROSS_MEMORY_ATTACH=y
CONFIG_NEED_PER_CPU_KM=y
# CONFIG_CLEANCACHE is not set
# CONFIG_FRONTSWAP is not set
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_ALIGNMENT_TRAP=y
# CONFIG_UACCESS_WITH_MEMCPY is not set
# CONFIG_SECCOMP is not set
# CONFIG_CC_STACKPROTECTOR is not set
CONFIG_XEN_DOM0=y
CONFIG_XEN=y

#
# Boot options
#
CONFIG_USE_OF=y
CONFIG_ATAGS=y
# CONFIG_DEPRECATED_PARAM_STRUCT is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
# CONFIG_ARM_ATAG_DTB_COMPAT is not set
CONFIG_CMDLINE="earlyprintk=xenboot console=ttyAMA1 root=/dev/mmcblk0 debug rw init=/bin/bash"
CONFIG_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_CMDLINE_EXTEND is not set
# CONFIG_CMDLINE_FORCE is not set
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
CONFIG_AUTO_ZRELADDR=y

#
# CPU Power Management
#
# CONFIG_CPU_IDLE is not set
# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set

#
# Floating point emulation
#

#
# At least one emulation must be selected
#
# CONFIG_FPE_NWFPE is not set
# CONFIG_FPE_FASTFPE is not set
CONFIG_VFP=y
CONFIG_VFPv3=y
CONFIG_NEON=y

#
# Userspace binary formats
#
CONFIG_BINFMT_ELF=y
CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_HAVE_AOUT=y
CONFIG_BINFMT_AOUT=y
CONFIG_BINFMT_MISC=y
CONFIG_COREDUMP=y

#
# Power management options
#
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
CONFIG_PM_SLEEP=y
# CONFIG_PM_AUTOSLEEP is not set
# CONFIG_PM_WAKELOCKS is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
# CONFIG_APM_EMULATION is not set
CONFIG_PM_CLK=y
CONFIG_CPU_PM=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_ARM_CPU_SUSPEND=y
CONFIG_NET=y

#
# Networking options
#
CONFIG_PACKET=y
# CONFIG_PACKET_DIAG is not set
CONFIG_UNIX=y
# CONFIG_UNIX_DIAG is not set
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_PNP=y
# CONFIG_IP_PNP_DHCP is not set
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IP_PNP_RARP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE_DEMUX is not set
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_NET_IPVTI is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
CONFIG_INET_LRO=y
# CONFIG_INET_DIAG is not set
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_L2TP is not set
CONFIG_STP=y
CONFIG_BRIDGE=y
CONFIG_BRIDGE_IGMP_SNOOPING=y
CONFIG_HAVE_NET_DSA=y
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
CONFIG_LLC=y
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
# CONFIG_BATMAN_ADV is not set
# CONFIG_OPENVSWITCH is not set
CONFIG_BQL=y

#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
# CONFIG_LIB80211 is not set

#
# CFG80211 needs to be enabled for MAC80211
#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
# CONFIG_CAIF is not set
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
CONFIG_HAVE_BPF_JIT=y

#
# Device Drivers
#

#
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH=""
# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
CONFIG_FIRMWARE_IN_KERNEL=y
CONFIG_EXTRA_FIRMWARE=""
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
CONFIG_SYS_HYPERVISOR=y
# CONFIG_GENERIC_CPU_DEVICES is not set
# CONFIG_DMA_SHARED_BUFFER is not set
# CONFIG_CMA is not set

#
# Bus devices
#
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AFS_PARTS is not set
# CONFIG_MTD_OF_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set

#
# User Modules And Translation Layers
#
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
# CONFIG_FTL is not set
# CONFIG_NFTL is not set
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
# CONFIG_SM_FTL is not set
# CONFIG_MTD_OOPS is not set
# CONFIG_MTD_SWAP is not set

#
# RAM/ROM/Flash chip drivers
#
CONFIG_MTD_CFI=y
# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_GEN_PROBE=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_NOSWAP=y
# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
# CONFIG_MTD_CFI_GEOMETRY is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
# CONFIG_MTD_OTP is not set
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
CONFIG_MTD_CFI_UTIL=y
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set

#
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
# CONFIG_MTD_PHYSMAP is not set
# CONFIG_MTD_PHYSMAP_OF is not set
# CONFIG_MTD_PLATRAM is not set

#
# Self-contained MTD device drivers
#
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
# CONFIG_MTD_BLOCK2MTD is not set

#
# Disk-On-Chip Device Drivers
#
# CONFIG_MTD_DOCG3 is not set
# CONFIG_MTD_NAND is not set
# CONFIG_MTD_ONENAND is not set

#
# LPDDR flash memory drivers
#
# CONFIG_MTD_LPDDR is not set
# CONFIG_MTD_UBI is not set
CONFIG_DTC=y
CONFIG_OF=y

#
# Device Tree and Open Firmware support
#
CONFIG_PROC_DEVICETREE=y
# CONFIG_OF_SELFTEST is not set
CONFIG_OF_FLATTREE=y
CONFIG_OF_EARLY_FLATTREE=y
CONFIG_OF_ADDRESS=y
CONFIG_OF_IRQ=y
CONFIG_OF_DEVICE=y
CONFIG_OF_I2C=y
CONFIG_OF_NET=y
CONFIG_OF_MDIO=y
CONFIG_OF_MTD=y
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_DRBD is not set
# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_MG_DISK is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_BLKDEV_BACKEND=y
# CONFIG_BLK_DEV_RBD is not set

#
# Misc devices
#
# CONFIG_SENSORS_LIS3LV02D is not set
# CONFIG_AD525X_DPOT is not set
# CONFIG_ATMEL_PWM is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_APDS9802ALS is not set
# CONFIG_ISL29003 is not set
# CONFIG_ISL29020 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_SENSORS_BH1780 is not set
# CONFIG_SENSORS_BH1770 is not set
# CONFIG_SENSORS_APDS990X is not set
# CONFIG_HMC6352 is not set
# CONFIG_DS1682 is not set
# CONFIG_ARM_CHARLCD is not set
# CONFIG_BMP085_I2C is not set
# CONFIG_USB_SWITCH_FSA9480 is not set
# CONFIG_C2PORT is not set

#
# EEPROM support
#
# CONFIG_EEPROM_AT24 is not set
# CONFIG_EEPROM_LEGACY is not set
# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set

#
# Texas Instruments shared transport line discipline
#
# CONFIG_TI_ST is not set
# CONFIG_SENSORS_LIS3_I2C is not set

#
# Altera FPGA firmware download module
#
# CONFIG_ALTERA_STAPL is not set

#
# SCSI device support
#
CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
# CONFIG_SCSI_NETLINK is not set
CONFIG_SCSI_PROC_FS=y

#
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
# CONFIG_SCSI_SCAN_ASYNC is not set

#
# SCSI Transports
#
# CONFIG_SCSI_SPI_ATTRS is not set
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_ISCSI_BOOT_SYSFS is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_HAVE_PATA_PLATFORM=y
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y

#
# Controllers with non-SFF native interface
#
# CONFIG_SATA_AHCI_PLATFORM is not set
CONFIG_ATA_SFF=y

#
# SFF controllers with custom DMA interface
#
# CONFIG_ATA_BMDMA is not set

#
# PIO-only SFF controllers
#
# CONFIG_PATA_PLATFORM is not set

#
# Generic fallback / legacy drivers
#
# CONFIG_MD is not set
# CONFIG_TARGET_CORE is not set
CONFIG_NETDEVICES=y
CONFIG_NET_CORE=y
# CONFIG_BONDING is not set
# CONFIG_DUMMY is not set
# CONFIG_EQUALIZER is not set
CONFIG_MII=y
# CONFIG_NET_TEAM is not set
# CONFIG_MACVLAN is not set
# CONFIG_VXLAN is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
CONFIG_TUN=y
# CONFIG_VETH is not set

#
# CAIF transport drivers
#

#
# Distributed Switch Architecture drivers
#
# CONFIG_NET_DSA_MV88E6XXX is not set
# CONFIG_NET_DSA_MV88E6060 is not set
# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
# CONFIG_NET_DSA_MV88E6131 is not set
# CONFIG_NET_DSA_MV88E6123_61_65 is not set
CONFIG_ETHERNET=y
CONFIG_NET_CADENCE=y
# CONFIG_ARM_AT91_ETHER is not set
# CONFIG_MACB is not set
CONFIG_NET_VENDOR_BROADCOM=y
# CONFIG_B44 is not set
# CONFIG_NET_CALXEDA_XGMAC is not set
CONFIG_NET_VENDOR_CIRRUS=y
# CONFIG_CS89x0 is not set
# CONFIG_DM9000 is not set
# CONFIG_DNET is not set
CONFIG_NET_VENDOR_FARADAY=y
# CONFIG_FTMAC100 is not set
# CONFIG_FTGMAC100 is not set
CONFIG_NET_VENDOR_INTEL=y
CONFIG_NET_VENDOR_I825XX=y
CONFIG_NET_VENDOR_MARVELL=y
# CONFIG_MVMDIO is not set
CONFIG_NET_VENDOR_MICREL=y
# CONFIG_KS8851_MLL is not set
CONFIG_NET_VENDOR_NATSEMI=y
CONFIG_NET_VENDOR_8390=y
# CONFIG_AX88796 is not set
# CONFIG_ETHOC is not set
CONFIG_NET_VENDOR_SEEQ=y
# CONFIG_SEEQ8005 is not set
CONFIG_NET_VENDOR_SMSC=y
CONFIG_SMC91X=y
CONFIG_SMC911X=y
CONFIG_SMSC911X=y
# CONFIG_SMSC911X_ARCH_HOOKS is not set
CONFIG_NET_VENDOR_STMICRO=y
# CONFIG_STMMAC_ETH is not set
CONFIG_NET_VENDOR_WIZNET=y
# CONFIG_WIZNET_W5100 is not set
# CONFIG_WIZNET_W5300 is not set
CONFIG_PHYLIB=y

#
# MII PHY device drivers
#
# CONFIG_AT803X_PHY is not set
# CONFIG_AMD_PHY is not set
# CONFIG_MARVELL_PHY is not set
# CONFIG_DAVICOM_PHY is not set
# CONFIG_QSEMI_PHY is not set
# CONFIG_LXT_PHY is not set
# CONFIG_CICADA_PHY is not set
# CONFIG_VITESSE_PHY is not set
CONFIG_SMSC_PHY=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_BCM87XX_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
# CONFIG_NATIONAL_PHY is not set
# CONFIG_STE10XP is not set
# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_MICREL_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
# CONFIG_MDIO_BUS_MUX_GPIO is not set
# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
CONFIG_WLAN=y
# CONFIG_HOSTAP is not set
# CONFIG_WL_TI is not set

#
# Enable WiMAX (Networking options) to see the WiMAX drivers
#
# CONFIG_WAN is not set
CONFIG_XEN_NETDEV_FRONTEND=y
CONFIG_XEN_NETDEV_BACKEND=y
# CONFIG_ISDN is not set

#
# Input device support
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
# CONFIG_INPUT_SPARSEKMAP is not set
# CONFIG_INPUT_MATRIXKMAP is not set

#
# Userland interfaces
#
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set

#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ADP5589 is not set
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_QT1070 is not set
# CONFIG_KEYBOARD_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_GPIO is not set
# CONFIG_KEYBOARD_TCA6416 is not set
# CONFIG_KEYBOARD_TCA8418 is not set
# CONFIG_KEYBOARD_MATRIX is not set
# CONFIG_KEYBOARD_LM8333 is not set
# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_MCS is not set
# CONFIG_KEYBOARD_MPR121 is not set
# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_SAMSUNG is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
# CONFIG_MOUSE_PS2_SENTELIC is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_MOUSE_GPIO is not set
# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_MOUSE_SYNAPTICS_USB is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set

#
# Hardware I/O ports
#
CONFIG_SERIO=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
# CONFIG_SERIO_ALTERA_PS2 is not set
# CONFIG_SERIO_PS2MULT is not set
# CONFIG_SERIO_ARC_PS2 is not set
# CONFIG_GAMEPORT is not set

#
# Character devices
#
CONFIG_VT=y
CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_VT_CONSOLE_SLEEP=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_N_GSM is not set
# CONFIG_TRACE_SINK is not set
CONFIG_DEVKMEM=y

#
# Serial drivers
#
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_CONSOLE is not set
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
CONFIG_SERIAL_8250_RSA=y
# CONFIG_SERIAL_8250_DW is not set
# CONFIG_SERIAL_8250_EM is not set

#
# Non-8250 serial port support
#
# CONFIG_SERIAL_AMBA_PL010 is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_OF_PLATFORM is not set
# CONFIG_SERIAL_SCCNXP is not set
# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_SERIAL_ALTERA_JTAGUART is not set
# CONFIG_SERIAL_ALTERA_UART is not set
# CONFIG_SERIAL_XILINX_PS_UART is not set
# CONFIG_SERIAL_ARC is not set
# CONFIG_TTY_PRINTK is not set
CONFIG_HVC_DRIVER=y
CONFIG_HVC_IRQ=y
CONFIG_HVC_XEN=y
CONFIG_HVC_XEN_FRONTEND=y
# CONFIG_HVC_DCC is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_TIMERIOMEM is not set
# CONFIG_HW_RANDOM_ATMEL is not set
# CONFIG_HW_RANDOM_EXYNOS is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_MUX is not set
CONFIG_I2C_HELPER_AUTO=y

#
# I2C Hardware Bus support
#

#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
# CONFIG_I2C_CBUS_GPIO is not set
# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_NOMADIK is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_PXA_PCI is not set
# CONFIG_I2C_SIMTEC is not set
# CONFIG_I2C_VERSATILE is not set
# CONFIG_I2C_XILINX is not set

#
# External I2C/SMBus adapter drivers
#
# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set

#
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_SPI is not set
# CONFIG_HSI is not set

#
# PPS support
#
# CONFIG_PPS is not set

#
# PPS generators support
#

#
# PTP clock support
#
# CONFIG_PTP_1588_CLOCK is not set

#
# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
#
# CONFIG_PTP_1588_CLOCK_PCH is not set
CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
CONFIG_OF_GPIO=y
# CONFIG_DEBUG_GPIO is not set
# CONFIG_GPIO_SYSFS is not set

#
# Memory mapped GPIO drivers:
#
# CONFIG_GPIO_GENERIC_PLATFORM is not set
# CONFIG_GPIO_EM is not set
# CONFIG_GPIO_PL061 is not set
# CONFIG_GPIO_TS5500 is not set

#
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCF857X is not set
# CONFIG_GPIO_SX150X is not set
# CONFIG_GPIO_ADP5588 is not set
# CONFIG_GPIO_ADNP is not set

#
# PCI GPIO expanders:
#

#
# SPI GPIO expanders:
#
# CONFIG_GPIO_MCP23S08 is not set

#
# AC97 GPIO expanders:
#

#
# MODULbus GPIO expanders:
#

#
# USB GPIO expanders:
#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_POWER_AVS is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y

#
# Sonics Silicon Backplane
#
# CONFIG_SSB is not set
CONFIG_BCMA_POSSIBLE=y

#
# Broadcom specific AMBA
#
# CONFIG_BCMA is not set

#
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_88PM800 is not set
# CONFIG_MFD_88PM805 is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_HTC_I2CPLD is not set
# CONFIG_UCB1400_CORE is not set
# CONFIG_MFD_LM3533 is not set
# CONFIG_TPS6105X is not set
# CONFIG_TPS65010 is not set
# CONFIG_TPS6507X is not set
# CONFIG_MFD_TPS65217 is not set
# CONFIG_MFD_TPS6586X is not set
# CONFIG_MFD_TPS65910 is not set
# CONFIG_MFD_TPS65912_I2C is not set
# CONFIG_MFD_TPS80031 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_TWL6040_CORE is not set
# CONFIG_MFD_STMPE is not set
# CONFIG_MFD_TC3589X is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_SMSC is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_DA9052_I2C is not set
# CONFIG_MFD_DA9055 is not set
# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_LP8788 is not set
# CONFIG_MFD_MAX77686 is not set
# CONFIG_MFD_MAX77693 is not set
# CONFIG_MFD_MAX8907 is not set
# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_MAX8997 is not set
# CONFIG_MFD_MAX8998 is not set
# CONFIG_MFD_SEC_CORE is not set
# CONFIG_MFD_ARIZONA_I2C is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X_I2C is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13XXX_I2C is not set
# CONFIG_ABX500_CORE is not set
# CONFIG_MFD_WL1273_CORE is not set
# CONFIG_MFD_TPS65090 is not set
# CONFIG_MFD_AAT2870_CORE is not set
# CONFIG_MFD_RC5T583 is not set
# CONFIG_MFD_SYSCON is not set
# CONFIG_MFD_PALMAS is not set
# CONFIG_MFD_RETU is not set
# CONFIG_MFD_AS3711 is not set
CONFIG_VEXPRESS_CONFIG=y
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set

#
# Graphics support
#
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_SYS_FOPS is not set
# CONFIG_FB_WMT_GE_ROPS is not set
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
# CONFIG_FB_MODE_HELPERS is not set
# CONFIG_FB_TILEBLITTING is not set

#
# Frame buffer hardware drivers
#
CONFIG_FB_ARMCLCD=y
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_XEN_FBDEV_FRONTEND is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_BROADSHEET is not set
# CONFIG_FB_AUO_K190X is not set
# CONFIG_EXYNOS_VIDEO is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set

#
# Console display driver support
#
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
CONFIG_FONTS=y
# CONFIG_FONT_8x8 is not set
# CONFIG_FONT_8x16 is not set
# CONFIG_FONT_6x11 is not set
# CONFIG_FONT_7x14 is not set
# CONFIG_FONT_PEARL_8x8 is not set
CONFIG_FONT_ACORN_8x8=y
# CONFIG_FONT_MINI_4x6 is not set
# CONFIG_FONT_SUN8x16 is not set
# CONFIG_FONT_SUN12x22 is not set
# CONFIG_FONT_10x18 is not set
# CONFIG_LOGO is not set
# CONFIG_FB_SSD1307 is not set
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
# CONFIG_SND_SEQUENCER is not set
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_PCM_OSS_PLUGINS=y
# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
CONFIG_SND_VMASTER=y
# CONFIG_SND_RAWMIDI_SEQ is not set
# CONFIG_SND_OPL3_LIB_SEQ is not set
# CONFIG_SND_OPL4_LIB_SEQ is not set
# CONFIG_SND_SBAWE_SEQ is not set
# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=y
CONFIG_SND_DRIVERS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_ALOOP is not set
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
# CONFIG_SND_AC97_POWER_SAVE is not set
CONFIG_SND_ARM=y
CONFIG_SND_ARMAACI=y
# CONFIG_SND_SOC is not set
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=y

#
# HID support
#
CONFIG_HID=y
# CONFIG_HIDRAW is not set
# CONFIG_UHID is not set
CONFIG_HID_GENERIC=y

#
# Special HID drivers
#

#
# I2C HID support
#
# CONFIG_I2C_HID is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_ARCH_HAS_XHCI is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set

#
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
# CONFIG_USB_GADGET is not set

#
# OTG and related infrastructure
#
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
# CONFIG_MMC_CLKGATE is not set

#
# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_MINORS=8
CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_SDIO_UART is not set
# CONFIG_MMC_TEST is not set

#
# MMC/SD/SDIO Host Controller Drivers
#
CONFIG_MMC_ARMMMCI=y
# CONFIG_MMC_SDHCI is not set
# CONFIG_MMC_SDHCI_PXAV3 is not set
# CONFIG_MMC_SDHCI_PXAV2 is not set
# CONFIG_MMC_DW is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_EDAC is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set

#
# Virtio drivers
#
# CONFIG_VIRTIO_MMIO is not set

#
# Microsoft Hyper-V guest support
#

#
# Xen driver support
#
CONFIG_XEN_DEV_EVTCHN=y
CONFIG_XEN_BACKEND=y
CONFIG_XENFS=y
CONFIG_XEN_COMPAT_XENFS=y
CONFIG_XEN_SYS_HYPERVISOR=y
CONFIG_XEN_XENBUS_FRONTEND=y
CONFIG_XEN_GNTDEV=y
# CONFIG_XEN_GRANT_DEV_ALLOC is not set
CONFIG_XEN_PRIVCMD=y
# CONFIG_STAGING is not set
CONFIG_CLKDEV_LOOKUP=y
CONFIG_HAVE_CLK_PREPARE=y
CONFIG_COMMON_CLK=y

#
# Common Clock Framework
#
# CONFIG_COMMON_CLK_DEBUG is not set
CONFIG_COMMON_CLK_VERSATILE=y

#
# Hardware Spinlock drivers
#
CONFIG_CLKSRC_MMIO=y
CONFIG_IOMMU_SUPPORT=y
CONFIG_OF_IOMMU=y

#
# Remoteproc drivers (EXPERIMENTAL)
#
# CONFIG_STE_MODEM_RPROC is not set

#
# Rpmsg drivers (EXPERIMENTAL)
#
# CONFIG_VIRT_DRIVERS is not set
# CONFIG_PM_DEVFREQ is not set
# CONFIG_EXTCON is not set
# CONFIG_MEMORY is not set
# CONFIG_IIO is not set
# CONFIG_PWM is not set
# CONFIG_IPACK_BUS is not set

#
# File systems
#
CONFIG_DCACHE_WORD_ACCESS=y
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
CONFIG_EXT4_FS=y
# CONFIG_EXT4_FS_POSIX_ACL is not set
# CONFIG_EXT4_FS_SECURITY is not set
# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_JBD2=y
# CONFIG_JBD2_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
# CONFIG_NILFS2_FS is not set
CONFIG_FS_POSIX_ACL=y
CONFIG_EXPORTFS=y
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_FANOTIFY is not set
# CONFIG_QUOTA is not set
# CONFIG_QUOTACTL is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
CONFIG_GENERIC_ACL=y

#
# Caches
#
# CONFIG_FSCACHE is not set

#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
# CONFIG_UDF_FS is not set

#
# DOS/FAT/NT Filesystems
#
CONFIG_FAT_FS=y
# CONFIG_MSDOS_FS is not set
CONFIG_VFAT_FS=y
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set

#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_TMPFS_XATTR=y
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_WRITEBUFFER=y
# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
# CONFIG_JFFS2_SUMMARY is not set
# CONFIG_JFFS2_FS_XATTR is not set
# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
CONFIG_MINIX_FS=y
# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_QNX6FS_FS is not set
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_BLOCK=y
# CONFIG_ROMFS_BACKED_BY_MTD is not set
# CONFIG_ROMFS_BACKED_BY_BOTH is not set
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_PSTORE is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
# CONFIG_F2FS_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V2=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
# CONFIG_NFS_SWAP is not set
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
# CONFIG_NFSD_V4 is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
# CONFIG_SUNRPC_DEBUG is not set
# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
CONFIG_NLS_CODEPAGE_850=y
# CONFIG_NLS_CODEPAGE_852 is not set
# CONFIG_NLS_CODEPAGE_855 is not set
# CONFIG_NLS_CODEPAGE_857 is not set
# CONFIG_NLS_CODEPAGE_860 is not set
# CONFIG_NLS_CODEPAGE_861 is not set
# CONFIG_NLS_CODEPAGE_862 is not set
# CONFIG_NLS_CODEPAGE_863 is not set
# CONFIG_NLS_CODEPAGE_864 is not set
# CONFIG_NLS_CODEPAGE_865 is not set
# CONFIG_NLS_CODEPAGE_866 is not set
# CONFIG_NLS_CODEPAGE_869 is not set
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
# CONFIG_NLS_CODEPAGE_932 is not set
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ASCII is not set
CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
# CONFIG_NLS_ISO8859_5 is not set
# CONFIG_NLS_ISO8859_6 is not set
# CONFIG_NLS_ISO8859_7 is not set
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
# CONFIG_NLS_ISO8859_15 is not set
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_MAC_ROMAN is not set
# CONFIG_NLS_MAC_CELTIC is not set
# CONFIG_NLS_MAC_CENTEURO is not set
# CONFIG_NLS_MAC_CROATIAN is not set
# CONFIG_NLS_MAC_CYRILLIC is not set
# CONFIG_NLS_MAC_GAELIC is not set
# CONFIG_NLS_MAC_GREEK is not set
# CONFIG_NLS_MAC_ICELAND is not set
# CONFIG_NLS_MAC_INUIT is not set
# CONFIG_NLS_MAC_ROMANIAN is not set
# CONFIG_NLS_MAC_TURKISH is not set
# CONFIG_NLS_UTF8 is not set

#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_READABLE_ASM is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_SECTION_MISMATCH is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
# CONFIG_LOCKUP_DETECTOR is not set
# CONFIG_PANIC_ON_OOPS is not set
CONFIG_PANIC_ON_OOPS_VALUE=0
# CONFIG_DETECT_HUNG_TASK is not set
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_HAVE_DEBUG_KMEMLEAK=y
# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_LOCK_ALLOC is not set
# CONFIG_PROVE_LOCKING is not set
# CONFIG_SPARSE_RCU_POINTER is not set
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_ATOMIC_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_INFO_REDUCED is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_TEST_LIST_SORT is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_TRACE is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_NOTIFIER_ERROR_INJECTION is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_HAVE_C_RECORDMCOUNT=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_FTRACE_SYSCALLS is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_PROBE_EVENTS is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
# CONFIG_TEST_KSTRTOX is not set
# CONFIG_STRICT_DEVMEM is not set
CONFIG_ARM_UNWIND=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_VEXPRESS_UART0_DETECT is not set
# CONFIG_DEBUG_VEXPRESS_UART0_CA9 is not set
CONFIG_DEBUG_VEXPRESS_UART0_RS1=y
# CONFIG_DEBUG_ICEDCC is not set
# CONFIG_DEBUG_SEMIHOSTING is not set
CONFIG_DEBUG_LL_INCLUDE="debug/vexpress.S"
CONFIG_EARLY_PRINTK=y
# CONFIG_OC_ETM is not set
# CONFIG_PID_IN_CONTEXTIDR is not set

#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y

#
# Crypto core or helper
#
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=y
CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_PCOMP2=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_USER is not set
CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set

#
# Authenticated Encryption with Associated Data
#
# CONFIG_CRYPTO_CCM is not set
# CONFIG_CRYPTO_GCM is not set
# CONFIG_CRYPTO_SEQIV is not set

#
# Block modes
#
CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_CTR is not set
# CONFIG_CRYPTO_CTS is not set
# CONFIG_CRYPTO_ECB is not set
# CONFIG_CRYPTO_LRW is not set
# CONFIG_CRYPTO_PCBC is not set
# CONFIG_CRYPTO_XTS is not set

#
# Hash modes
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
# CONFIG_CRYPTO_VMAC is not set

#
# Digest
#
CONFIG_CRYPTO_CRC32C=y
# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_CRYPTO_RMD128 is not set
# CONFIG_CRYPTO_RMD160 is not set
# CONFIG_CRYPTO_RMD256 is not set
# CONFIG_CRYPTO_RMD320 is not set
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA1_ARM is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_TGR192 is not set
# CONFIG_CRYPTO_WP512 is not set

#
# Ciphers
#
CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_AES_ARM is not set
# CONFIG_CRYPTO_ANUBIS is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
# CONFIG_CRYPTO_SALSA20 is not set
# CONFIG_CRYPTO_SEED is not set
# CONFIG_CRYPTO_SERPENT is not set
# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_TWOFISH is not set

#
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set

#
# Random Number Generation
#
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_USER_API_HASH is not set
# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
CONFIG_CRYPTO_HW=y
# CONFIG_BINARY_PRINTF is not set

#
# Library routines
#
CONFIG_BITREVERSE=y
CONFIG_GENERIC_STRNCPY_FROM_USER=y
CONFIG_GENERIC_STRNLEN_USER=y
CONFIG_GENERIC_PCI_IOMAP=y
CONFIG_GENERIC_IO=y
CONFIG_PERCPU_RWSEM=y
# CONFIG_CRC_CCITT is not set
CONFIG_CRC16=y
# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC32_SELFTEST is not set
CONFIG_CRC32_SLICEBY8=y
# CONFIG_CRC32_SLICEBY4 is not set
# CONFIG_CRC32_SARWATE is not set
# CONFIG_CRC32_BIT is not set
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
# CONFIG_CRC8 is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_XZ_DEC=y
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y
CONFIG_XZ_DEC_ARM=y
CONFIG_XZ_DEC_ARMTHUMB=y
CONFIG_XZ_DEC_SPARC=y
CONFIG_XZ_DEC_BCJ=y
# CONFIG_XZ_DEC_TEST is not set
CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_DMA=y
CONFIG_DQL=y
CONFIG_NLATTR=y
CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
# CONFIG_AVERAGE is not set
# CONFIG_CORDIC is not set
# CONFIG_DDR is not set

[-- Attachment #3: guest.sh --]
[-- Type: application/x-shellscript, Size: 101 bytes --]

[-- Attachment #4: cfg --]
[-- Type: text/plain, Size: 98 bytes --]

kernel = "/root/guest.img"
name = "g"
memory = 128
vcpus = 1

disk = [ 'phy:/dev/loop0,xvda,w' ]


[-- Attachment #5: init.sh --]
[-- Type: application/x-shellscript, Size: 341 bytes --]

[-- Attachment #6: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 81+ messages in thread

* [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (29 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 30/46] xen: arm: guest context switching Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:11   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 32/46] xen: arm: make dom0 builder work on 64-bit hypervisor Ian Campbell
                   ` (16 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/arch/arm/traps.c |  176 +++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 151 insertions(+), 25 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 97a29fb..642b0ea 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -214,12 +214,19 @@ void panic_PAR(uint64_t par)
 }
 
 struct reg_ctxt {
-    uint32_t sctlr, ttbcr;
+    uint32_t sctlr, tcr;
     uint64_t ttbr0, ttbr1;
+#ifdef CONFIG_ARM_32
+    uint32_t dfar, ifar;
+#else
+    uint64_t far;
+#endif
 };
-static void _show_registers(struct cpu_user_regs *regs,
-                            struct reg_ctxt *ctxt,
-                            int guest_mode)
+
+static void show_registers_32(struct cpu_user_regs *regs,
+                              struct reg_ctxt *ctxt,
+                              int guest_mode,
+                              const struct vcpu *v)
 {
     static const char *mode_strings[] = {
        [PSR_MODE_USR] = "USR",
@@ -233,25 +240,34 @@ static void _show_registers(struct cpu_user_regs *regs,
        [PSR_MODE_SYS] = "SYS"
     };
 
-    print_xen_info();
-    printk("CPU:    %d\n", smp_processor_id());
+#ifdef CONFIG_ARM_64
+    printk("PC:     %08"PRIx32"\n", regs->pc32);
+#else
     printk("PC:     %08"PRIx32, regs->pc);
     if ( !guest_mode )
-            print_symbol(" %s", regs->pc);
+        print_symbol(" %s", regs->pc);
     printk("\n");
-    printk("CPSR:   %08"PRIx32" MODE:%s\n", regs->cpsr,
-           mode_strings[regs->cpsr & PSR_MODE_MASK]);
+#endif
+    printk("CPSR:   %08"PRIx32" MODE:%s%s\n", regs->cpsr,
+           guest_mode ? "32-bit Guest " : "Hypervisor",
+           guest_mode ? mode_strings[regs->cpsr & PSR_MODE_MASK] : "");
     printk("     R0: %08"PRIx32" R1: %08"PRIx32" R2: %08"PRIx32" R3: %08"PRIx32"\n",
            regs->r0, regs->r1, regs->r2, regs->r3);
     printk("     R4: %08"PRIx32" R5: %08"PRIx32" R6: %08"PRIx32" R7: %08"PRIx32"\n",
            regs->r4, regs->r5, regs->r6, regs->r7);
     printk("     R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n",
-           regs->r8, regs->r9, regs->r10, regs->r11, regs->r12);
+           regs->r8, regs->r9, regs->r10,
+#ifdef CONFIG_ARM_64
+           regs->r11,
+#else
+           regs->fp,
+#endif
+           regs->r12);
 
     if ( guest_mode )
     {
-        printk("USR: SP: %08"PRIx32" LR: %08"PRIx32" CPSR:%08"PRIx32"\n",
-               regs->sp_usr, regs->lr_usr, regs->cpsr);
+        printk("USR: SP: %08"PRIx32" LR: %08"PRIregister"\n",
+               regs->sp_usr, regs->lr);
         printk("SVC: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n",
                regs->sp_svc, regs->lr_svc, regs->spsr_svc);
         printk("ABT: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n",
@@ -264,50 +280,160 @@ static void _show_registers(struct cpu_user_regs *regs,
                regs->sp_fiq, regs->lr_fiq, regs->spsr_fiq);
         printk("FIQ: R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n",
                regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, regs->r11_fiq);
-        printk("\n");
-        printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TTBCR %08"PRIx32"\n",
-               ctxt->ttbr0, ctxt->ttbr1, ctxt->ttbcr);
+    }
+#ifndef CONFIG_ARM_64
+    else
+    {
+        printk("HYP: SP: %08"PRIx32" LR: %08"PRIregister"\n", regs->sp, regs->lr);
+    }
+#endif
+    printk("\n");
+
+    if ( guest_mode )
+    {
+        printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TCR %08"PRIx32"\n",
+               ctxt->ttbr0, ctxt->ttbr1, ctxt->tcr);
         printk("SCTLR %08"PRIx32"\n", ctxt->sctlr);
-        printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR));
+        printk("IFAR %08"PRIx32" DFAR %08"PRIx32"\n",
+#ifdef CONFIG_ARM_64
+               (uint32_t)(ctxt->far >> 32),
+               (uint32_t)(ctxt->far & 0xffffffff)
+#else
+               ctxt->ifar, ctxt->dfar
+#endif
+            );
         printk("\n");
     }
-    else
+}
+
+#ifdef CONFIG_ARM_64
+static void show_registers_64(struct cpu_user_regs *regs,
+                              struct reg_ctxt *ctxt,
+                              int guest_mode,
+                              const struct vcpu *v)
+{
+    printk("PC:     %016"PRIx64, regs->pc);
+    if ( !guest_mode )
+        print_symbol(" %s", regs->pc);
+    printk("\n");
+    printk("SP:     %08"PRIx64"\n", regs->sp);
+    printk("CPSR:   %08"PRIx32" MODE:%s\n", regs->cpsr,
+           guest_mode ? "64-bit Guest" : "Hypervisor");
+    printk("     X0: %016"PRIx64"  X1: %016"PRIx64"  X2: %016"PRIx64"\n",
+           regs->x0, regs->x1, regs->x2);
+    printk("     X3: %016"PRIx64"  X4: %016"PRIx64"  X5: %016"PRIx64"\n",
+           regs->x3, regs->x4, regs->x5);
+    printk("     X6: %016"PRIx64"  X7: %016"PRIx64"  X8: %016"PRIx64"\n",
+           regs->x6, regs->x7, regs->x8);
+    printk("     X9: %016"PRIx64" X10: %016"PRIx64" X11: %016"PRIx64"\n",
+           regs->x9, regs->x10, regs->x11);
+    printk("    X12: %016"PRIx64" X13: %016"PRIx64" X14: %016"PRIx64"\n",
+           regs->x12, regs->x13, regs->x14);
+    printk("    X15: %016"PRIx64" X16: %016"PRIx64" X17: %016"PRIx64"\n",
+           regs->x15, regs->x16, regs->x17);
+    printk("    X18: %016"PRIx64" X19: %016"PRIx64" X20: %016"PRIx64"\n",
+           regs->x18, regs->x19, regs->x20);
+    printk("    X21: %016"PRIx64" X22: %016"PRIx64" X23: %016"PRIx64"\n",
+           regs->x21, regs->x22, regs->x23);
+    printk("    X24: %016"PRIx64" X25: %016"PRIx64" X26: %016"PRIx64"\n",
+           regs->x24, regs->x25, regs->x26);
+    printk("    X27: %016"PRIx64" X28: %016"PRIx64" X29: %016"PRIx64"\n",
+           regs->x27, regs->x28, regs->lr);
+    printk("\n");
+
+    if ( guest_mode )
     {
-        printk("     SP: %08"PRIx32" LR: %08"PRIx32"\n", regs->sp, regs->lr);
+        printk("SCTLR_EL1: %08"PRIx32"\n", ctxt->sctlr);
+        printk("  TCR_EL1: %08"PRIx32"\n", ctxt->tcr);
+        printk("TTBR0_EL1: %010"PRIx64"\n", ctxt->ttbr0);
+        printk("TTBR1_EL1: %010"PRIx64"\n", ctxt->ttbr1);
+        printk("  FAR_EL1: %010"PRIx64"\n", ctxt->far);
         printk("\n");
     }
+}
+#endif
+
+static void _show_registers(struct cpu_user_regs *regs,
+                            struct reg_ctxt *ctxt,
+                            int guest_mode,
+                            const struct vcpu *v)
+{
+    print_xen_info();
+
+    printk("CPU:    %d\n", smp_processor_id());
+
+    if ( guest_mode )
+    {
+        if ( is_pv32_domain(v->domain) )
+            show_registers_32(regs, ctxt, guest_mode, v);
+#ifdef CONFIG_ARM_64
+        else if ( is_pv64_domain(v->domain) )
+            show_registers_64(regs, ctxt, guest_mode, v);
+#endif
+    }
+    else
+    {
+#ifdef CONFIG_ARM_64
+        show_registers_64(regs, ctxt, guest_mode, v);
+#else
+        show_registers_32(regs, ctxt, guest_mode, v);
+#endif
+    }
 
+#ifdef CONFIG_ARM_32
     printk("HTTBR %"PRIx64"\n", READ_CP64(HTTBR));
     printk("HDFAR %"PRIx32"\n", READ_CP32(HDFAR));
     printk("HIFAR %"PRIx32"\n", READ_CP32(HIFAR));
     printk("HPFAR %"PRIx32"\n", READ_CP32(HPFAR));
     printk("HCR %08"PRIx32"\n", READ_CP32(HCR));
     printk("HSR   %"PRIx32"\n", READ_CP32(HSR));
+    printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR));
     printk("\n");
 
     printk("DFSR %"PRIx32" DFAR %"PRIx32"\n", READ_CP32(DFSR), READ_CP32(DFAR));
     printk("IFSR %"PRIx32" IFAR %"PRIx32"\n", READ_CP32(IFSR), READ_CP32(IFAR));
     printk("\n");
+#else
+    printk("TTBR0_EL2: %"PRIx64"\n", READ_SYSREG64(TTBR0_EL2));
+    printk("  FAR_EL2: %"PRIx64"\n", READ_SYSREG64(FAR_EL2));
+    printk("HPFAR_EL2: %"PRIx64"\n", READ_SYSREG64(HPFAR_EL2));
+    printk("  HCR_EL2: %"PRIx64"\n", READ_SYSREG64(HCR_EL2));
+    printk("  ESR_EL2: %"PRIx64"\n", READ_SYSREG64(ESR_EL2));
+    printk("VTTBR_EL2: %"PRIx64"\n", READ_SYSREG64(VTTBR_EL2));
+    printk("\n");
+#endif
 }
 
 void show_registers(struct cpu_user_regs *regs)
 {
     struct reg_ctxt ctxt;
-    ctxt.sctlr = READ_CP32(SCTLR);
-    ctxt.ttbcr = READ_CP32(TTBCR);
-    ctxt.ttbr0 = READ_CP64(TTBR0);
-    ctxt.ttbr1 = READ_CP64(TTBR1);
-    _show_registers(regs, &ctxt, guest_mode(regs));
+    ctxt.sctlr = READ_SYSREG(SCTLR_EL1);
+    ctxt.tcr = READ_SYSREG(TCR_EL1);
+    ctxt.ttbr0 = READ_SYSREG64(TTBR0_EL1);
+    ctxt.ttbr1 = READ_SYSREG64(TTBR1_EL1);
+#ifdef CONFIG_ARM_32
+    ctxt.dfar = READ_CP32(DFAR);
+    ctxt.ifar = READ_CP32(IFAR);
+#else
+    ctxt.far = READ_SYSREG(FAR_EL1);
+#endif
+    _show_registers(regs, &ctxt, guest_mode(regs), current);
 }
 
 void vcpu_show_registers(const struct vcpu *v)
 {
     struct reg_ctxt ctxt;
     ctxt.sctlr = v->arch.sctlr;
-    ctxt.ttbcr = v->arch.ttbcr;
+    ctxt.tcr = v->arch.ttbcr;
     ctxt.ttbr0 = v->arch.ttbr0;
     ctxt.ttbr1 = v->arch.ttbr1;
-    _show_registers(&v->arch.cpu_info->guest_cpu_user_regs, &ctxt, 1);
+#ifdef CONFIG_ARM_32
+    ctxt.dfar = v->arch.dfar;
+    ctxt.ifar = v->arch.ifar;
+#else
+    ctxt.far = v->arch.far;
+#endif
+    _show_registers(&v->arch.cpu_info->guest_cpu_user_regs, &ctxt, 1, v);
 }
 
 static void show_guest_stack(struct cpu_user_regs *regs)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 32/46] xen: arm: make dom0 builder work on 64-bit hypervisor
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (30 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 33/46] xen: arm: gic: use 64-bit compatible registers Ian Campbell
                   ` (15 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

This still only builds a 32-bit dom0, although it lays a bit of
simple ground work for 64-bit dom0.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/domain_build.c |   53 ++++++++++++++++++++++++++++--------------
 1 files changed, 35 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 30d014a..29cef73 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -68,7 +68,7 @@ static int set_memory_reg(struct domain *d, struct kernel_info *kinfo,
             size = kinfo->unassigned_mem;
         device_tree_set_reg(&new_cell, address_cells, size_cells, start, size);
 
-        printk("Populate P2M %#llx->%#llx\n", start, start + size);
+        printk("Populate P2M %#"PRIx64"->%#"PRIx64"\n", start, start + size);
         p2m_populate_ram(d, start, start + size);
         kinfo->mem.bank[kinfo->mem.nr_banks].start = start;
         kinfo->mem.bank[kinfo->mem.nr_banks].size = size;
@@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo)
 
 static void dtb_load(struct kernel_info *kinfo)
 {
-    void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr;
+    void * __user dtb_virt = (void * __user)(register_t)kinfo->dtb_paddr;
 
     raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt));
     xfree(kinfo->fdt);
@@ -319,7 +319,8 @@ int construct_dom0(struct domain *d)
     gic_route_irq_to_guest(d, 47, "eth");
 
     /* Enable second stage translation */
-    WRITE_CP32(READ_CP32(HCR) | HCR_VM, HCR); isb();
+    WRITE_SYSREG(READ_SYSREG(HCR_EL2) | HCR_VM, HCR_EL2);
+    isb();
 
     /* The following loads use the domain's p2m */
     p2m_load_VTTBR(d);
@@ -337,24 +338,40 @@ int construct_dom0(struct domain *d)
 
     regs->cpsr = PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC;
 
-/* FROM LINUX head.S
-
- * Kernel startup entry point.
- * ---------------------------
- *
- * This is normally called from the decompressor code.  The requirements
- * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
- * r1 = machine nr, r2 = atags or dtb pointer.
- *...
- */
+#ifdef CONFIG_ARM_64
+    d->arch.type = kinfo.type;
+#endif
 
-    regs->r0 = 0; /* SBZ */
-    regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */
-    regs->r2 = kinfo.dtb_paddr;
+    if ( is_pv32_domain(d) )
+    {
+        /* FROM LINUX head.S
+         *
+         * Kernel startup entry point.
+         * ---------------------------
+         *
+         * This is normally called from the decompressor code.  The requirements
+         * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+         * r1 = machine nr, r2 = atags or dtb pointer.
+         *...
+         */
+        regs->r0 = 0; /* SBZ */
+        regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */
+        regs->r2 = kinfo.dtb_paddr;
+    }
+#ifdef CONFIG_ARM_64
+    else
+    {
+        /* From linux/Documentation/arm64/booting.txt */
+        regs->x0 = kinfo.dtb_paddr;
+        regs->x1 = 0; /* Reserved for future use */
+        regs->x2 = 0; /* Reserved for future use */
+        regs->x3 = 0; /* Reserved for future use */
+    }
+#endif
 
-    WRITE_CP32(SCTLR_BASE, SCTLR);
+    v->arch.sctlr = SCTLR_BASE;
 
-    WRITE_CP32(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR);
+    WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR_EL2);
     isb();
 
     local_abort_enable();
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 33/46] xen: arm: gic: use 64-bit compatible registers
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (31 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 32/46] xen: arm: make dom0 builder work on 64-bit hypervisor Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 34/46] xen: arm: time: " Ian Campbell
                   ` (14 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/gic.c |   12 +++++-------
 1 files changed, 5 insertions(+), 7 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 7627ad8..e1af33a 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -267,7 +267,7 @@ static void __init gic_dist_init(void)
 
     /* Disable all global interrupts */
     for ( i = 32; i < gic.lines; i += 32 )
-        GICD[GICD_ICENABLER + i / 32] = ~0ul;
+        GICD[GICD_ICENABLER + i / 32] = (uint32_t)~0ul;
 
     /* Turn on the distributor */
     GICD[GICD_CTLR] = GICD_CTL_ENABLE;
@@ -531,18 +531,16 @@ static void gic_restore_pending_irqs(struct vcpu *v)
 
 static void gic_inject_irq_start(void)
 {
-    uint32_t hcr;
-    hcr = READ_CP32(HCR);
-    WRITE_CP32(hcr | HCR_VI, HCR);
+    register_t hcr = READ_SYSREG(HCR_EL2);
+    WRITE_SYSREG(hcr | HCR_VI, HCR_EL2);
     isb();
 }
 
 static void gic_inject_irq_stop(void)
 {
-    uint32_t hcr;
-    hcr = READ_CP32(HCR);
+    register_t hcr = READ_SYSREG(HCR_EL2);
     if (hcr & HCR_VI) {
-        WRITE_CP32(hcr & ~HCR_VI, HCR);
+        WRITE_SYSREG(hcr & ~HCR_VI, HCR_EL2);
         isb();
     }
 }
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 34/46] xen: arm: time: use 64-bit compatible registers
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (32 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 33/46] xen: arm: gic: use 64-bit compatible registers Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 35/46] xen: arm: p2m: " Ian Campbell
                   ` (13 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/time.c          |   48 +++++++++++++++++++++--------------------
 xen/include/asm-arm/cpregs.h |   12 ++++++++++
 2 files changed, 37 insertions(+), 23 deletions(-)

diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c
index 3dad9b3..ee92d8c 100644
--- a/xen/arch/arm/time.c
+++ b/xen/arch/arm/time.c
@@ -76,9 +76,9 @@ static uint32_t calibrate_timer(void)
     sec = rtc[0] + 1;
     do {} while ( rtc[0] != sec );
     // Now time a few seconds
-    start = READ_CP64(CNTPCT);
+    start = READ_SYSREG64(CNTPCT_EL0);
     do {} while ( rtc[0] < sec + 32 );
-    end = READ_CP64(CNTPCT);
+    end = READ_SYSREG64(CNTPCT_EL0);
     printk("done.\n");
 
     clear_fixmap(FIXMAP_MISC);
@@ -90,11 +90,13 @@ static uint32_t calibrate_timer(void)
 int __init init_xen_time(void)
 {
     /* Check that this CPU supports the Generic Timer interface */
+#if defined(CONFIG_ARM_32)
     if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 )
         panic("CPU does not support the Generic Timer v1 interface.\n");
+#endif
 
-    cpu_khz = READ_CP32(CNTFRQ) / 1000;
-    boot_count = READ_CP64(CNTPCT);
+    cpu_khz = READ_SYSREG32(CNTFRQ_EL0) / 1000;
+    boot_count = READ_SYSREG64(CNTPCT_EL0);
     printk("Using generic timer at %lu KHz\n", cpu_khz);
 
     return 0;
@@ -103,7 +105,7 @@ int __init init_xen_time(void)
 /* Return number of nanoseconds since boot */
 s_time_t get_s_time(void)
 {
-    uint64_t ticks = READ_CP64(CNTPCT) - boot_count;
+    uint64_t ticks = READ_SYSREG64(CNTPCT_EL0) - boot_count;
     return ticks_to_ns(ticks);
 }
 
@@ -117,20 +119,20 @@ int reprogram_timer(s_time_t timeout)
     if ( timeout == 0 )
     {
 #if USE_HYP_TIMER
-        WRITE_CP32(0, CNTHP_CTL);
+        WRITE_SYSREG32(0, CNTHP_CTL_EL2);
 #else
-        WRITE_CP32(0, CNTP_CTL);
+        WRITE_SYSREG32(0, CNTP_CTL_EL0);
 #endif
         return 1;
     }
 
     deadline = ns_to_ticks(timeout) + boot_count;
 #if USE_HYP_TIMER
-    WRITE_CP64(deadline, CNTHP_CVAL);
-    WRITE_CP32(CNTx_CTL_ENABLE, CNTHP_CTL);
+    WRITE_SYSREG64(deadline, CNTHP_CVAL_EL2);
+    WRITE_SYSREG32(CNTx_CTL_ENABLE, CNTHP_CTL_EL2);
 #else
-    WRITE_CP64(deadline, CNTP_CVAL);
-    WRITE_CP32(CNTx_CTL_ENABLE, CNTP_CTL);
+    WRITE_SYSREG64(deadline, CNTP_CVAL_EL0);
+    WRITE_SYSREG32(CNTx_CTL_ENABLE, CNTP_CTL_EL0);
 #endif
     isb();
 
@@ -142,27 +144,27 @@ int reprogram_timer(s_time_t timeout)
 /* Handle the firing timer */
 static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
 {
-    if ( irq == 26 && READ_CP32(CNTHP_CTL) & CNTx_CTL_PENDING )
+    if ( irq == 26 && READ_SYSREG32(CNTHP_CTL_EL2) & CNTx_CTL_PENDING )
     {
         /* Signal the generic timer code to do its work */
         raise_softirq(TIMER_SOFTIRQ);
         /* Disable the timer to avoid more interrupts */
-        WRITE_CP32(0, CNTHP_CTL);
+        WRITE_SYSREG32(0, CNTHP_CTL_EL2);
     }
 
-    if (irq == 30 && READ_CP32(CNTP_CTL) & CNTx_CTL_PENDING )
+    if (irq == 30 && READ_SYSREG32(CNTP_CTL_EL0) & CNTx_CTL_PENDING )
     {
         /* Signal the generic timer code to do its work */
         raise_softirq(TIMER_SOFTIRQ);
         /* Disable the timer to avoid more interrupts */
-        WRITE_CP32(0, CNTP_CTL);
+        WRITE_SYSREG32(0, CNTP_CTL_EL0);
     }
 }
 
 static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
 {
-    current->arch.virt_timer.ctl = READ_CP32(CNTV_CTL);
-    WRITE_CP32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL);
+    current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
+    WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0);
     vgic_vcpu_inject_irq(current, irq, 1);
 }
 
@@ -170,17 +172,17 @@ static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
 void __cpuinit init_timer_interrupt(void)
 {
     /* Sensible defaults */
-    WRITE_CP64(0, CNTVOFF);     /* No VM-specific offset */
-    WRITE_CP32(0, CNTKCTL);     /* No user-mode access */
+    WRITE_SYSREG64(0, CNTVOFF_EL2);     /* No VM-specific offset */
+    WRITE_SYSREG32(0, CNTKCTL_EL1);     /* No user-mode access */
 #if USE_HYP_TIMER
     /* Do not let the VMs program the physical timer, only read the physical counter */
-    WRITE_CP32(CNTHCTL_PA, CNTHCTL);
+    WRITE_SYSREG32(CNTHCTL_PA, CNTHCTL_EL2);
 #else
     /* Cannot let VMs access physical counter if we are using it */
-    WRITE_CP32(0, CNTHCTL);
+    WRITE_SYSREG32(0, CNTHCTL_EL2);
 #endif
-    WRITE_CP32(0, CNTP_CTL);    /* Physical timer disabled */
-    WRITE_CP32(0, CNTHP_CTL);   /* Hypervisor's timer disabled */
+    WRITE_SYSREG32(0, CNTP_CTL_EL0);    /* Physical timer disabled */
+    WRITE_SYSREG32(0, CNTHP_CTL_EL2);   /* Hypervisor's timer disabled */
     isb();
 
     /* XXX Need to find this IRQ number from devicetree? */
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 732f967..a374f5c 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -230,6 +230,18 @@
 #define AFSR1_EL1               AIFSR
 #define CCSIDR_EL1              CCSIDR
 #define CLIDR_EL1               CLIDR
+#define CNTFRQ_EL0              CNTFRQ
+#define CNTHCTL_EL2             CNTHCTL
+#define CNTHP_CTL_EL2           CNTHP_CTL
+#define CNTHP_CVAL_EL2          CNTHP_CVAL
+#define CNTKCTL_EL1             CNTKCTL
+#define CNTPCT_EL0              CNTPCT
+#define CNTP_CTL_EL0            CNTP_CTL
+#define CNTP_CVAL_EL0           CNTP_CVAL
+#define CNTVCT_EL0              CNTVCT
+#define CNTVOFF_EL2             CNTVOFF
+#define CNTV_CTL_EL0            CNTV_CTL
+#define CNTV_CVAL_EL0           CNTV_CVAL
 #define CONTEXTIDR_EL1          CONTEXTIDR
 #define CPACR_EL1               CPACR
 #define CSSELR_EL1              CSSELR
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 35/46] xen: arm: p2m: use 64-bit compatible registers.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (33 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 34/46] xen: arm: time: " Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 36/46] xen: arm: Use 64-bit compatible registers in vtimer Ian Campbell
                   ` (12 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/p2m.c           |    2 +-
 xen/include/asm-arm/cpregs.h |    1 +
 2 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 852f0d8..aaa43ef 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -29,7 +29,7 @@ void p2m_load_VTTBR(struct domain *d)
 
     vttbr |= ((uint64_t)p2m->vmid&0xff)<<48;
 
-    WRITE_CP64(vttbr, VTTBR);
+    WRITE_SYSREG64(vttbr, VTTBR_EL2);
     isb(); /* Ensure update is visible */
 }
 
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index a374f5c..676c8cf 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -277,6 +277,7 @@
 #define VBAR_EL1                VBAR
 #define VBAR_EL2                HVBAR
 #define VTCR_EL2                VTCR
+#define VTTBR_EL2               VTTBR
 
 #endif
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 36/46] xen: arm: Use 64-bit compatible registers in vtimer.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (34 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 35/46] xen: arm: p2m: " Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 37/46] xen: arm: select_user_reg support for 64-bit hypervisor Ian Campbell
                   ` (11 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Also, don't crash the host if we fail to emulate a vtimer access,
just kill the guest.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/traps.c  |   14 ++++++++++++--
 xen/arch/arm/vtimer.c |   23 +++++++++++++----------
 2 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 642b0ea..20d2db9 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -712,7 +712,12 @@ static void do_cp15_32(struct cpu_user_regs *regs,
         break;
     case HSR_CPREG32(CNTP_CTL):
     case HSR_CPREG32(CNTP_TVAL):
-        BUG_ON(!vtimer_emulate(regs, hsr));
+        if ( !vtimer_emulate(regs, hsr) )
+        {
+            dprintk(XENLOG_ERR,
+                    "failed emulation of 32-bit vtimer CP register access\n");
+            domain_crash_synchronous();
+        }
         break;
     default:
         printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
@@ -742,7 +747,12 @@ static void do_cp15_64(struct cpu_user_regs *regs,
     switch ( hsr.bits & HSR_CP64_REGS_MASK )
     {
     case HSR_CPREG64(CNTPCT):
-        BUG_ON(!vtimer_emulate(regs, hsr));
+        if ( !vtimer_emulate(regs, hsr) )
+        {
+            dprintk(XENLOG_ERR,
+                    "failed emulation of 64-bit vtimer CP register access\n");
+            domain_crash_synchronous();
+        }
         break;
     default:
         printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
index 291b87e..0051ff7 100644
--- a/xen/arch/arm/vtimer.c
+++ b/xen/arch/arm/vtimer.c
@@ -42,7 +42,7 @@ static void virt_timer_expired(void *data)
     struct vtimer *t = data;
     vcpu_wake(t->v);
 }
- 
+
 int vcpu_vtimer_init(struct vcpu *v)
 {
     struct vtimer *t = &v->arch.phys_timer;
@@ -57,7 +57,7 @@ int vcpu_vtimer_init(struct vcpu *v)
     t = &v->arch.virt_timer;
     init_timer(&t->timer, virt_timer_expired, t, smp_processor_id());
     t->ctl = 0;
-    t->offset = READ_CP64(CNTVCT) + READ_CP64(CNTVOFF);
+    t->offset = READ_SYSREG64(CNTVCT_EL0) + READ_SYSREG64(CNTVOFF_EL2);
     t->cval = 0;
     t->irq = 27;
     t->v = v;
@@ -73,9 +73,9 @@ void vcpu_timer_destroy(struct vcpu *v)
 
 int virt_timer_save(struct vcpu *v)
 {
-    v->arch.virt_timer.ctl = READ_CP32(CNTV_CTL);
-    WRITE_CP32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL);
-    v->arch.virt_timer.cval = READ_CP64(CNTV_CVAL);
+    v->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
+    WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0);
+    v->arch.virt_timer.cval = READ_SYSREG64(CNTV_CVAL_EL0);
     if ( v->arch.virt_timer.ctl & CNTx_CTL_ENABLE )
     {
         set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval +
@@ -88,13 +88,13 @@ int virt_timer_restore(struct vcpu *v)
 {
     stop_timer(&v->arch.virt_timer.timer);
 
-    WRITE_CP32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL);
-    WRITE_CP64(v->arch.virt_timer.offset, CNTVOFF);
-    WRITE_CP64(v->arch.virt_timer.cval, CNTV_CVAL);
-    WRITE_CP32(v->arch.virt_timer.ctl, CNTV_CTL);
+    WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0);
+    WRITE_SYSREG64(v->arch.virt_timer.offset, CNTVOFF_EL2);
+    WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0);
+    WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0);
     return 0;
 }
- 
+
 static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr)
 {
     struct vcpu *v = current;
@@ -180,6 +180,9 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr)
 
 int vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr)
 {
+    if ( !is_pv32_domain(current->domain) )
+        return -EINVAL;
+
     switch (hsr.ec) {
     case HSR_EC_CP15_32:
         return vtimer_emulate_32(regs, hsr);
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 37/46] xen: arm: select_user_reg support for 64-bit hypervisor
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (35 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 36/46] xen: arm: Use 64-bit compatible registers in vtimer Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 38/46] xen: arm: handle 32-bit guest CP register traps on " Ian Campbell
                   ` (10 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/traps.c |   10 ++++++++++
 1 files changed, 10 insertions(+), 0 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 20d2db9..b2b9327 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -72,6 +72,7 @@ register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
 {
     BUG_ON( !guest_mode(regs) );
 
+#ifdef CONFIG_ARM_32
     /*
      * We rely heavily on the layout of cpu_user_regs to avoid having
      * to handle all of the registers individually. Use BUILD_BUG_ON to
@@ -124,6 +125,15 @@ register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
         BUG();
     }
 #undef REGOFFS
+#else
+    /* In 64 bit the syndrome register contains the AArch64 register
+     * number even if the trap was from AArch32 mode. Except that
+     * AArch32 R15 (PC) is encoded as 0b11111.
+     */
+    if ( reg == 0x1f /* && is aarch32 guest */)
+        return &regs->pc;
+    return &regs->x0 + reg;
+#endif
 }
 
 static const char *decode_fsc(uint32_t fsc, int *level)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 38/46] xen: arm: handle 32-bit guest CP register traps on 64-bit hypervisor
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (36 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 37/46] xen: arm: select_user_reg support for 64-bit hypervisor Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 39/46] xen: arm: guest stage 1 walks " Ian Campbell
                   ` (9 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/traps.c |   10 +++++++---
 1 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index b2b9327..1e64be1 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -700,16 +700,16 @@ static void do_cp15_32(struct cpu_user_regs *regs,
                     "attempt to write to read-only register CLIDR\n");
             domain_crash_synchronous();
         }
-        *r = READ_CP32(CLIDR);
+        *r = READ_SYSREG32(CLIDR_EL1);
         break;
     case HSR_CPREG32(CCSIDR):
         if ( !cp32.read )
         {
             dprintk(XENLOG_ERR,
-                    "attempt to write to read-only register CSSIDR\n");
+                    "attempt to write to read-only register CCSIDR\n");
             domain_crash_synchronous();
         }
-        *r = READ_CP32(CCSIDR);
+        *r = READ_SYSREG32(CCSIDR_EL1);
         break;
     case HSR_CPREG32(DCCISW):
         if ( cp32.read )
@@ -718,7 +718,11 @@ static void do_cp15_32(struct cpu_user_regs *regs,
                     "attempt to read from write-only register DCCISW\n");
             domain_crash_synchronous();
         }
+#ifdef CONFIG_ARM_32
         WRITE_CP32(*r, DCCISW);
+#else
+        asm volatile("dc cisw, %0;" : : "r" (*r) : "memory");
+#endif
         break;
     case HSR_CPREG32(CNTP_CTL):
     case HSR_CPREG32(CNTP_TVAL):
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 39/46] xen: arm: guest stage 1 walks on 64-bit hypervisor
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (37 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 38/46] xen: arm: handle 32-bit guest CP register traps on " Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 40/46] xen: arm: implement do_multicall_call for both 32 and 64-bit Ian Campbell
                   ` (8 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Still only supports non-LPAE 32-bit guests.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/traps.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 1e64be1..e00fef0 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -780,8 +780,8 @@ static void do_cp15_64(struct cpu_user_regs *regs,
 
 void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
 {
-    uint32_t ttbcr = READ_CP32(TTBCR);
-    uint64_t ttbr0 = READ_CP64(TTBR0);
+    uint32_t ttbcr = READ_SYSREG32(TCR_EL1);
+    uint64_t ttbr0 = READ_SYSREG64(TTBR0_EL1);
     paddr_t paddr;
     uint32_t offset;
     uint32_t *first = NULL, *second = NULL;
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 40/46] xen: arm: implement do_multicall_call for both 32 and 64-bit
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (38 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 39/46] xen: arm: guest stage 1 walks " Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 41/46] xen: arm: Enable VFP is a nop on 64-bit Ian Campbell
                   ` (7 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Obviously nothing is actually making multicalls even on 32-bit so
this isn't tested.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/traps.c            |   22 ++++++++++++++++++++++
 xen/include/asm-arm/multicall.h |   11 +----------
 2 files changed, 23 insertions(+), 10 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index e00fef0..5f9c785 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -675,6 +675,28 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, unsigned long iss)
 #endif
 }
 
+void do_multicall_call(struct multicall_entry *multi)
+{
+    arm_hypercall_fn_t call = NULL;
+
+    if ( multi->op >= ARRAY_SIZE(arm_hypercall_table) )
+    {
+        multi->result = -ENOSYS;
+        return;
+    }
+
+    call = arm_hypercall_table[multi->op].fn;
+    if ( call == NULL )
+    {
+        multi->result = -ENOSYS;
+        return;
+    }
+
+    multi->result = call(multi->args[0], multi->args[1],
+                        multi->args[2], multi->args[3],
+                        multi->args[4]);
+}
+
 static void do_cp15_32(struct cpu_user_regs *regs,
                        union hsr hsr)
 {
diff --git a/xen/include/asm-arm/multicall.h b/xen/include/asm-arm/multicall.h
index c800940..f717b51 100644
--- a/xen/include/asm-arm/multicall.h
+++ b/xen/include/asm-arm/multicall.h
@@ -1,16 +1,7 @@
 #ifndef __ASM_ARM_MULTICALL_H__
 #define __ASM_ARM_MULTICALL_H__
 
-#define do_multicall_call(_call)                             \
-    do {                                                     \
-        __asm__ __volatile__ (                               \
-            ".word 0xe7f000f0@; do_multicall_call\n"         \
-            "    mov r0,#0; @ do_multicall_call\n"           \
-            "    str r0, [r0];\n"                            \
-            :                                                \
-            :                                                \
-            : );                                             \
-    } while ( 0 )
+extern void do_multicall_call(struct multicall_entry *call);
 
 #endif /* __ASM_ARM_MULTICALL_H__ */
 /*
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 41/46] xen: arm: Enable VFP is a nop on 64-bit.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (39 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 40/46] xen: arm: implement do_multicall_call for both 32 and 64-bit Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 42/46] xen: arm: Use generic mem{cpy, move, set, zero} " Ian Campbell
                   ` (6 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/vfp.h |   10 +++++++++-
 1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/xen/include/asm-arm/vfp.h b/xen/include/asm-arm/vfp.h
index 0bab2a8..5c61376 100644
--- a/xen/include/asm-arm/vfp.h
+++ b/xen/include/asm-arm/vfp.h
@@ -3,6 +3,9 @@
 
 #include <xen/types.h>
 
+
+#ifdef CONFIG_ARM_32
+
 #define FPEXC_EN (1u << 30)
 
 /* Save and restore FP state.
@@ -17,12 +20,17 @@
     asm volatile ("fmxr fp" #reg ", %0" : : "r" (val)); \
 } while (0)
 
-
 /* Start-of-day: Turn on VFP */
 static inline void enable_vfp(void)
 {
     WRITE_FP(exc, READ_FP(exc) | FPEXC_EN);
 }
+#else
+static inline void enable_vfp(void)
+{
+    /* Always enable on 64-bit */
+}
+#endif
 
 #endif
 /*
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 42/46] xen: arm: Use generic mem{cpy, move, set, zero} on 64-bit
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (40 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 41/46] xen: arm: Enable VFP is a nop on 64-bit Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 43/46] xen: arm: Explicitly setup VPIDR & VMPIDR at start of day Ian Campbell
                   ` (5 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

No optimised versions are available in Linux yet (meaning I couldn't
copy them).

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/include/asm-arm/string.h |    3 +++
 1 files changed, 3 insertions(+), 0 deletions(-)

diff --git a/xen/include/asm-arm/string.h b/xen/include/asm-arm/string.h
index f2d643d..e5d1e7e 100644
--- a/xen/include/asm-arm/string.h
+++ b/xen/include/asm-arm/string.h
@@ -3,6 +3,7 @@
 
 #include <xen/config.h>
 
+#if defined(CONFIG_ARM_32)
 #define __HAVE_ARCH_MEMCPY
 extern void * memcpy(void *, const void *, __kernel_size_t);
 
@@ -27,6 +28,8 @@ extern void __memzero(void *ptr, __kernel_size_t n);
                 (__p);                                                  \
         })
 
+#endif
+
 #endif /* __ARM_STRING_H__ */
 /*
  * Local variables:
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 43/46] xen: arm: Explicitly setup VPIDR & VMPIDR at start of day
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (41 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 42/46] xen: arm: Use generic mem{cpy, move, set, zero} " Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-14 16:47 ` [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate Ian Campbell
                   ` (4 subsequent siblings)
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

These are supposed to reset to the value of the underlying hardware
but appears not to be on at least some v8 models. There's no harm in
setting them explicitly.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/arm/setup.c         |    5 +++++
 xen/include/asm-arm/cpregs.h |    6 ++++++
 2 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 299848e..94e9754 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -56,6 +56,11 @@ static void __init init_idle_domain(void)
 
 static void __init processor_id(void)
 {
+
+    /* Setup the virtual ID to match the physical */
+    WRITE_SYSREG32(READ_SYSREG32(MIDR_EL1), VPIDR_EL2);
+    WRITE_SYSREG(READ_SYSREG(MPIDR_EL1), VMPIDR_EL2);
+
 #if defined(CONFIG_ARM_64)
     printk("64-bit Processor Features: %016"PRIx64" %016"PRIx64"\n",
            READ_SYSREG64(ID_AA64PFR0_EL1), READ_SYSREG64(ID_AA64PFR1_EL1));
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 676c8cf..908aad9 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -95,6 +95,8 @@
 #define CCSIDR          p15,1,c0,c0,0   /* Cache Size ID Registers */
 #define CLIDR           p15,1,c0,c0,1   /* Cache Level ID Register */
 #define CSSELR          p15,2,c0,c0,0   /* Cache Size Selection Register */
+#define VPIDR           p15,4,c0,c0,0   /* Virtualization Processor ID Register */
+#define VMPIDR          p15,4,c0,c0,5   /* Virtualization Multiprocessor ID Register */
 
 /* CP15 CR1: System Control Registers */
 #define SCTLR           p15,0,c1,c0,0   /* System Control Register */
@@ -278,6 +280,10 @@
 #define VBAR_EL2                HVBAR
 #define VTCR_EL2                VTCR
 #define VTTBR_EL2               VTTBR
+#define MIDR_EL1                MIDR
+#define VPIDR_EL2               VPIDR
+#define MPIDR_EL1               MPIDR
+#define VMPIDR_EL2              VMPIDR
 
 #endif
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate.
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (42 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 43/46] xen: arm: Explicitly setup VPIDR & VMPIDR at start of day Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:12   ` Tim Deegan
  2013-02-14 16:47 ` [PATCH V2 45/46] xen: arm: Fix guest mode for 64-bit Ian Campbell
                   ` (3 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/arch/arm/traps.c |    7 ++++++-
 1 files changed, 6 insertions(+), 1 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 5f9c785..52af819 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -63,8 +63,13 @@ static void print_xen_info(void)
 {
     char taint_str[TAINT_STRING_MAX_LEN];
 
-    printk("----[ Xen-%d.%d%s  arm32  debug=%c  %s ]----\n",
+    printk("----[ Xen-%d.%d%s  %s  debug=%c  %s ]----\n",
            xen_major_version(), xen_minor_version(), xen_extra_version(),
+#ifdef CONFIG_ARM_32
+           "arm32",
+#else
+           "arm64",
+#endif
            debug_build() ? 'y' : 'n', print_tainted(taint_str));
 }
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 45/46] xen: arm: Fix guest mode for 64-bit
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (43 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate Ian Campbell
@ 2013-02-14 16:47 ` Ian Campbell
  2013-02-21 15:18   ` Tim Deegan
  2013-02-14 16:48 ` [PATCH V2 46/46] xen: arm: skanky "appended kernel" option Ian Campbell
                   ` (2 subsequent siblings)
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:47 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

Need to check for the 64-bit EL2 modes, not 32-bit HYP mode.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/include/asm-arm/regs.h |    8 +++++++-
 1 files changed, 7 insertions(+), 1 deletions(-)

diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
index a723f92..6bfab38 100644
--- a/xen/include/asm-arm/regs.h
+++ b/xen/include/asm-arm/regs.h
@@ -13,10 +13,16 @@
 #define svc_mode(r)     psr_mode((r)->cpsr,PSR_MODE_SVC)
 #define mon_mode(r)     psr_mode((r)->cpsr,PSR_MODE_MON)
 #define abt_mode(r)     psr_mode((r)->cpsr,PSR_MODE_ABT)
-#define hyp_mode(r)     psr_mode((r)->cpsr,PSR_MODE_HYP)
 #define und_mode(r)     psr_mode((r)->cpsr,PSR_MODE_UND)
 #define sys_mode(r)     psr_mode((r)->cpsr,PSR_MODE_SYS)
 
+#ifdef CONFIG_ARM_32
+#define hyp_mode(r)     psr_mode((r)->cpsr,PSR_MODE_HYP)
+#else
+#define hyp_mode(r)     (psr_mode((r)->cpsr,PSR_MODE_EL2h) || \
+                         psr_mode((r)->cpsr,PSR_MODE_EL2t))
+#endif
+
 #define guest_mode(r)                                                         \
 ({                                                                            \
     unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r);         \
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH V2 46/46] xen: arm: skanky "appended kernel" option
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (44 preceding siblings ...)
  2013-02-14 16:47 ` [PATCH V2 45/46] xen: arm: Fix guest mode for 64-bit Ian Campbell
@ 2013-02-14 16:48 ` Ian Campbell
  2013-02-14 16:59 ` [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
  2013-02-15 12:06 ` [PATCH] xen: arm: implement cpuinfo Ian Campbell
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:48 UTC (permalink / raw)
  To: xen-devel; +Cc: stefano.stabellini, tim, Ian Campbell

I'm using this with the ARMv8 Foundation model:

./Foundation_v8pkg/Foundation_v8 \
	--image xen-unstable/xen/xen-arm64 \
	--data flash.img@0x80400000

where flash.img is a zImage (what you would put in Flash in the VE
models)

(disabled by default edit config.h to enable)

Mostly throwing this out there in case others find it useful.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/arch/arm/setup.c         |   14 ++++++++++++++
 xen/include/asm-arm/config.h |    2 ++
 2 files changed, 16 insertions(+), 0 deletions(-)

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 94e9754..967a8d4 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -368,6 +368,20 @@ void __init start_xen(unsigned long boot_phys_offset,
         + (fdt_paddr & ((1 << SECOND_SHIFT) - 1));
     fdt_size = device_tree_early_init(fdt);
 
+#ifdef CONFIG_KERNEL_APPEND
+    early_info.modules.module[1].start  = boot_phys_offset + (uintptr_t)_end;
+    early_info.modules.module[1].start += (2<<20)-1;
+    early_info.modules.module[1].start &= ~((2<<20)-1);
+
+    early_info.modules.module[1].size = 4<<20;
+    early_info.modules.nr_mods = 1;
+    early_printk("assuming kernel is appended at "
+                 "%"PRIpaddr"-%"PRIpaddr"\n",
+                 early_info.modules.module[1].start,
+                 early_info.modules.module[1].start
+                 + early_info.modules.module[1].size);
+#endif
+
     cpus = smp_get_max_cpus();
     cmdline_parse(device_tree_bootargs(fdt));
 
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index add70bd..d02ef6c 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -34,6 +34,8 @@
 
 #define CONFIG_DOMAIN_PAGE 1
 
+//#define CONFIG_KERNEL_APPEND 1
+
 #define OPT_CONSOLE_STR "com1"
 
 #ifdef MAX_PHYS_CPUS
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* Re: [PATCH 00/46] initial arm v8 (64-bit) support
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (45 preceding siblings ...)
  2013-02-14 16:48 ` [PATCH V2 46/46] xen: arm: skanky "appended kernel" option Ian Campbell
@ 2013-02-14 16:59 ` Ian Campbell
  2013-02-15 12:06 ` [PATCH] xen: arm: implement cpuinfo Ian Campbell
  47 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-14 16:59 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim (Xen.org), Stefano Stabellini


> I am building the 64-bit hypervisor with the Linaro gcc,
> gcc-linaro-aarch64-linux-gnu-4.7-2012.12-20121214_linux, from
> http://www.linaro.org/engineering/armv8#tab3 
> http://releases.linaro.org/13.01/components/toolchain/binaries/gcc-linaro-aarch64-linux-gnu-4.7-2013.01-20130125_linux.tar.bz2

I forgot to mention that for the DTB I am using
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dtb built by the kernel tree,
passing it to the Xen build on the make command line:
        make -C xen XEN_TARGET_ARCH=arm64
        CONFIG_DTB_FILE=/home/ianc/vexpress-v2p-ca15-tc1-linux.dtb
        debug=y CROSS_COMPILE=aarch64-linux-gnu- -j12 -s install

Xen is passed to the model directly as the image to boot (no
bootwrapper)

The dom0 kernel is passed to the hypervisor in the flash "-C
motherboard.flashloader0.fname=zImage". The arm64 boot-wrapper is not
yet advanced enough to be ported to the scheme used in "xen: arm: parse
modules from DT during early boot." per
http://lists.xen.org/archives/html/xen-devel/2013-01/msg02469.html

For the guest I use arch/arm/boot/xenvm-4.2.dtb and append it to the
zImage (the kernel config has CONFIG_ARM_APPENDED_DTB).
Ian.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* [PATCH] xen: arm: implement cpuinfo
  2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
                   ` (46 preceding siblings ...)
  2013-02-14 16:59 ` [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
@ 2013-02-15 12:06 ` Ian Campbell
  2013-02-21 17:19   ` Tim Deegan
  47 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-15 12:06 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim (Xen.org), Stefano Stabellini

On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> You can also run 32-bit on the V8 model (using -C
> cluster.cpu0.CONFIG64=0) if you comment out the ThumbEE in
> ctxt_switch_from and ctxt_switch_to (making this dynamic is on my TODO
> list). 

8<-----------------------------------------------

>From e45c4e4f45e72e404052629c619af8810dadd76f Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@citrix.com>
Date: Fri, 15 Feb 2013 10:30:48 +0000
Subject: [PATCH] xen: arm: implement cpuinfo

Use to:

 - Only context switch ThumbEE state if the processor implements it. In
   particular the ARMv8 FastModels do not.
 - Detect the generic timer, and therefore call identify_cpu before
   init_xen_time.

Also improve the boot time messages a bit.

I haven't added decoding for all of the CPUID words, it seems like overkill
for the moment.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: tim@xen.org
Cc: stefano.stabellini@citrix.com
---
 xen/arch/arm/Makefile            |    1 +
 xen/arch/arm/cpu.c               |   69 +++++++++++++++++++++
 xen/arch/arm/domain.c            |   39 ++++++++++---
 xen/arch/arm/setup.c             |  109 +++++++++++++++++++++++++---------
 xen/arch/arm/smpboot.c           |    7 ++
 xen/arch/arm/time.c              |    5 +-
 xen/include/asm-arm/cpregs.h     |   11 ++--
 xen/include/asm-arm/cpufeature.h |   40 +++++++++++++
 xen/include/asm-arm/domain.h     |   10 +++-
 xen/include/asm-arm/processor.h  |  121 ++++++++++++++++++++++++++++++++++++-
 10 files changed, 364 insertions(+), 48 deletions(-)
 create mode 100644 xen/arch/arm/cpu.c
 create mode 100644 xen/include/asm-arm/cpufeature.h

diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 7ff67c7..a43e7c9 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -2,6 +2,7 @@ subdir-$(arm32) += arm32
 subdir-$(arm64) += arm64
 
 obj-y += early_printk.o
+obj-y += cpu.o
 obj-y += domain.o
 obj-y += domctl.o
 obj-y += sysctl.o
diff --git a/xen/arch/arm/cpu.c b/xen/arch/arm/cpu.c
new file mode 100644
index 0000000..7a8ad33
--- /dev/null
+++ b/xen/arch/arm/cpu.c
@@ -0,0 +1,69 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+
+#include <asm/processor.h>
+
+void __cpuinit identify_cpu(struct cpuinfo_arm *c)
+{
+        c->midr.bits = READ_SYSREG32(MIDR_EL1);
+        c->mpidr.bits = READ_SYSREG(MPIDR_EL1);
+
+#ifdef CONFIG_ARM_64
+        c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1);
+        c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1);
+
+        c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1);
+        c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1);
+
+        c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1);
+        c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1);
+
+        c->mm64.bits[0]  = READ_SYSREG64(ID_AA64MMFR0_EL1);
+        c->mm64.bits[1]  = READ_SYSREG64(ID_AA64MMFR1_EL1);
+
+        c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1);
+        c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1);
+#endif
+
+        c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1);
+        c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1);
+
+        c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1);
+
+        c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1);
+
+        c->mm32.bits[0]  = READ_SYSREG32(ID_MMFR0_EL1);
+        c->mm32.bits[1]  = READ_SYSREG32(ID_MMFR1_EL1);
+        c->mm32.bits[2]  = READ_SYSREG32(ID_MMFR2_EL1);
+        c->mm32.bits[3]  = READ_SYSREG32(ID_MMFR3_EL1);
+
+        c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1);
+        c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1);
+        c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1);
+        c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1);
+        c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1);
+        c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 494bed6..de1d837 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -1,3 +1,14 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/lib.h>
@@ -13,6 +24,7 @@
 #include <asm/regs.h>
 #include <asm/p2m.h>
 #include <asm/irq.h>
+#include <asm/cpufeature.h>
 
 #include <asm/gic.h>
 #include "vtimer.h"
@@ -58,11 +70,13 @@ static void ctxt_switch_from(struct vcpu *p)
     /* Arch timer */
     virt_timer_save(p);
 
-#if defined(CONFIG_ARM_32x)
-    /* XXX only save these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */
-    p->arch.teecr = READ_CP32(TEECR);
-    p->arch.teehbr = READ_CP32(TEEHBR);
+    if ( is_pv32_domain(p->domain) && cpu_has_thumbee )
+    {
+        p->arch.teecr = READ_SYSREG32(TEECR32_EL1);
+        p->arch.teehbr = READ_SYSREG32(TEEHBR32_EL1);
+    }
 
+#ifdef CONFIG_ARM_32
     p->arch.joscr = READ_CP32(JOSCR);
     p->arch.jmcr = READ_CP32(JMCR);
 #endif
@@ -121,6 +135,9 @@ static void ctxt_switch_to(struct vcpu *n)
     p2m_load_VTTBR(n->domain);
     isb();
 
+    WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2);
+    WRITE_SYSREG(n->domain->arch.vmpidr, VMPIDR_EL2);
+
     /* VGIC */
     gic_restore_state(n);
 
@@ -169,11 +186,13 @@ static void ctxt_switch_to(struct vcpu *n)
     WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0);
     WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1);
 
-#if defined(CONFIG_ARM_32x)
-    /* XXX only restore these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */
-    WRITE_CP32(n->arch.teecr, TEECR);
-    WRITE_CP32(n->arch.teehbr, TEEHBR);
+    if ( is_pv32_domain(n->domain) && cpu_has_thumbee )
+    {
+        WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1);
+        WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1);
+    }
 
+#ifdef CONFIG_ARM_32
     WRITE_CP32(n->arch.joscr, JOSCR);
     WRITE_CP32(n->arch.jmcr, JMCR);
 #endif
@@ -447,6 +466,10 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
     if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
         goto fail;
 
+    /* Default the virtual ID to match the physical */
+    d->arch.vpidr = boot_cpu_data.midr.bits;
+    d->arch.vmpidr = boot_cpu_data.mpidr.bits;
+
     clear_page(d->shared_info);
     share_xen_page_with_guest(
         virt_to_page(d->shared_info), d, XENSHARE_writable);
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 967a8d4..d13e45d 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -40,6 +40,9 @@
 #include <asm/vfp.h>
 #include <asm/early_printk.h>
 #include <asm/gic.h>
+#include <asm/cpufeature.h>
+
+struct cpuinfo_arm __read_mostly boot_cpu_data;
 
 static __used void init_done(void)
 {
@@ -54,41 +57,93 @@ static void __init init_idle_domain(void)
     /* TODO: setup_idle_pagetable(); */
 }
 
+static const char * __initdata processor_implementers[] = {
+    ['A'] = "ARM Limited",
+    ['D'] = "Digital Equipment Corp",
+    ['M'] = "Motorola, Freescale Semiconductor Inc.",
+    ['Q'] = "Qualcomm Inc.",
+    ['V'] = "Marvell Semiconductor Inc.",
+    ['i'] = "Intel Corporation",
+};
+
 static void __init processor_id(void)
 {
+    const char *implementer = "Unknown";
+    struct cpuinfo_arm *c = &boot_cpu_data;
+
+    identify_cpu(c);
+    current_cpu_data = *c;
+
+    if ( c->midr.implementer < ARRAY_SIZE(processor_implementers) &&
+         processor_implementers[c->midr.implementer] )
+        implementer = processor_implementers[c->midr.implementer];
 
-    /* Setup the virtual ID to match the physical */
-    WRITE_SYSREG32(READ_SYSREG32(MIDR_EL1), VPIDR_EL2);
-    WRITE_SYSREG(READ_SYSREG(MPIDR_EL1), VMPIDR_EL2);
+    if ( c->midr.architecture != 0xf )
+        printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n",
+               c->midr.architecture);
+
+    printk("Processor: \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n",
+           implementer, c->midr.variant, c->midr.part_number, c->midr.revision);
 
 #if defined(CONFIG_ARM_64)
-    printk("64-bit Processor Features: %016"PRIx64" %016"PRIx64"\n",
-           READ_SYSREG64(ID_AA64PFR0_EL1), READ_SYSREG64(ID_AA64PFR1_EL1));
-    printk("64-bit Debug Features: %016"PRIx64" %016"PRIx64"\n",
-           READ_SYSREG64(ID_AA64DFR0_EL1), READ_SYSREG64(ID_AA64DFR1_EL1));
-    printk("64-bit Auxiliary Features: %016"PRIx64" %016"PRIx64"\n",
-           READ_SYSREG64(ID_AA64AFR0_EL1), READ_SYSREG64(ID_AA64AFR1_EL1));
-    printk("64-bit Memory Model Features: %016"PRIx64" %016"PRIx64"\n",
-           READ_SYSREG64(ID_AA64MMFR0_EL1), READ_SYSREG64(ID_AA64MMFR1_EL1));
-    printk("64-bit ISA Features:  %016"PRIx64" %016"PRIx64"\n",
-           READ_SYSREG64(ID_AA64ISAR0_EL1), READ_SYSREG64(ID_AA64ISAR1_EL1));
+    printk("64-bit Execution:\n");
+    printk("  Processor Features: %016"PRIx64" %016"PRIx64"\n",
+           boot_cpu_data.pfr64.bits[0], boot_cpu_data.pfr64.bits[1]);
+    printk("    Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n",
+           cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No",
+           cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No",
+           cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No",
+           cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No");
+    printk("    Extensions:%s%s\n",
+           cpu_has_fp ? " FloatingPoint" : "",
+           cpu_has_simd ? " AdvancedSIMD" : "");
+
+    printk("  Debug Features: %016"PRIx64" %016"PRIx64"\n",
+           boot_cpu_data.dbg64.bits[0], boot_cpu_data.dbg64.bits[1]);
+    printk("  Auxiliary Features: %016"PRIx64" %016"PRIx64"\n",
+           boot_cpu_data.aux64.bits[0], boot_cpu_data.aux64.bits[1]);
+    printk("  Memory Model Features: %016"PRIx64" %016"PRIx64"\n",
+           boot_cpu_data.mm64.bits[0], boot_cpu_data.mm64.bits[1]);
+    printk("  ISA Features:  %016"PRIx64" %016"PRIx64"\n",
+           boot_cpu_data.isa64.bits[0], boot_cpu_data.isa64.bits[1]);
 #endif
+
     /*
      * On AArch64 these refer to the capabilities when running in
      * AArch32 mode.
      */
-    printk("32-bit Processor Features: %08x %08x\n",
-           READ_SYSREG32(ID_PFR0_EL1), READ_SYSREG32(ID_PFR1_EL1));
-    printk("32-bit Debug Features: %08x\n", READ_SYSREG32(ID_DFR0_EL1));
-    printk("32-bit Auxiliary Features: %08x\n", READ_SYSREG32(ID_AFR0_EL1));
-    printk("32-bit Memory Model Features: %08x %08x %08x %08x\n",
-           READ_SYSREG32(ID_MMFR0_EL1), READ_SYSREG32(ID_MMFR1_EL1),
-           READ_SYSREG32(ID_MMFR2_EL1), READ_SYSREG32(ID_MMFR3_EL1));
-    printk("32-bit ISA Features: %08x %08x %08x %08x %08x %08x\n",
-           READ_SYSREG32(ID_ISAR0_EL1), READ_SYSREG32(ID_ISAR1_EL1),
-           READ_SYSREG32(ID_ISAR2_EL1), READ_SYSREG32(ID_ISAR3_EL1),
-           READ_SYSREG32(ID_ISAR4_EL1), READ_SYSREG32(ID_ISAR5_EL1));
-
+    if ( cpu_has_aarch32 )
+    {
+        printk("32-bit Execution:\n");
+        printk("  Processor Features: %08"PRIx32":%08"PRIx32"\n",
+               boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]);
+        printk("    Instruction Sets:%s%s%s%s%s\n",
+               cpu_has_aarch32 ? " AArch32" : "",
+               cpu_has_thumb ? " Thumb" : "",
+               cpu_has_thumb2 ? " Thumb-2" : "",
+               cpu_has_thumbee ? " ThumbEE" : "",
+               cpu_has_jazelle ? " Jazelle" : "");
+        printk("    Extensions:%s%s\n",
+               cpu_has_gentimer ? " GenericTimer" : "",
+               cpu_has_security ? " Security" : "");
+
+        printk("  Debug Features: %08"PRIx32"\n",
+               boot_cpu_data.dbg32.bits[0]);
+        printk("  Auxiliary Features: %08"PRIx32"\n",
+               boot_cpu_data.aux32.bits[0]);
+        printk("  Memory Model Features: "
+               "%08"PRIx32" %08"PRIx32" %08"PRIx32" %08"PRIx32"\n",
+               boot_cpu_data.mm32.bits[0], boot_cpu_data.mm32.bits[1],
+               boot_cpu_data.mm32.bits[2], boot_cpu_data.mm32.bits[3]);
+        printk(" ISA Features: %08x %08x %08x %08x %08x %08x\n",
+               boot_cpu_data.isa32.bits[0], boot_cpu_data.isa32.bits[1],
+               boot_cpu_data.isa32.bits[2], boot_cpu_data.isa32.bits[3],
+               boot_cpu_data.isa32.bits[4], boot_cpu_data.isa32.bits[5]);
+    }
+    else
+    {
+        printk("32-bit Execution: Unsupported\n");
+    }
 }
 
 void __init discard_initial_modules(void)
@@ -393,6 +448,8 @@ void __init start_xen(unsigned long boot_phys_offset,
     console_init_preirq();
 #endif
 
+    processor_id();
+
     init_xen_time();
 
     gic_init();
@@ -416,8 +473,6 @@ void __init start_xen(unsigned long boot_phys_offset,
      */
     WRITE_SYSREG32(0x80002558, VTCR_EL2); isb();
 
-    processor_id();
-
     enable_vfp();
 
     softirq_init();
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index b18f137..cadf79f 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -38,6 +38,8 @@ EXPORT_SYMBOL(cpu_online_map);
 cpumask_t cpu_possible_map;
 EXPORT_SYMBOL(cpu_possible_map);
 
+struct cpuinfo_arm cpu_data[NR_CPUS];
+
 /* Fake one node for now. See also include/asm-arm/numa.h */
 nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
 
@@ -136,11 +138,16 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
                                unsigned long fdt_paddr,
                                unsigned long cpuid)
 {
+    struct cpuinfo_arm *c = cpu_data + cpuid;
+
     memset(get_cpu_info(), 0, sizeof (struct cpu_info));
 
     /* TODO: handle boards where CPUIDs are not contiguous */
     set_processor_id(cpuid);
 
+    *c = boot_cpu_data;
+    identify_cpu(c);
+
     /* Setup Hyp vector base */
     WRITE_SYSREG((vaddr_t)&hyp_traps_vector, VBAR_EL2);
 
diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c
index ee92d8c..81d490d 100644
--- a/xen/arch/arm/time.c
+++ b/xen/arch/arm/time.c
@@ -31,6 +31,7 @@
 #include <asm/system.h>
 #include <asm/time.h>
 #include <asm/gic.h>
+#include <asm/cpufeature.h>
 
 /*
  * Unfortunately the hypervisor timer interrupt appears to be buggy in
@@ -90,10 +91,8 @@ static uint32_t calibrate_timer(void)
 int __init init_xen_time(void)
 {
     /* Check that this CPU supports the Generic Timer interface */
-#if defined(CONFIG_ARM_32)
-    if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 )
+    if ( !cpu_has_gentimer )
         panic("CPU does not support the Generic Timer v1 interface.\n");
-#endif
 
     cpu_khz = READ_SYSREG32(CNTFRQ_EL0) / 1000;
     boot_count = READ_SYSREG64(CNTPCT_EL0);
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 908aad9..a72ca62 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -265,10 +265,14 @@
 #define ID_PFR0_EL1             ID_PFR0
 #define ID_PFR1_EL1             ID_PFR1
 #define IFSR32_EL2              IFSR
+#define MIDR_EL1                MIDR
+#define MPIDR_EL1               MPIDR
 #define PAR_EL1                 PAR
 #define SCTLR_EL1               SCTLR
 #define SCTLR_EL2               HSCTLR
 #define TCR_EL1                 TTBCR
+#define TEECR32_EL1             TEECR
+#define TEEHBR32_EL1            TEEHBR
 #define TPIDRRO_EL0             TPIDRURO
 #define TPIDR_EL0               TPIDRURW
 #define TPIDR_EL1               TPIDRPRW
@@ -278,13 +282,10 @@
 #define TTBR1_EL1               TTBR1
 #define VBAR_EL1                VBAR
 #define VBAR_EL2                HVBAR
+#define VMPIDR_EL2              VMPIDR
+#define VPIDR_EL2               VPIDR
 #define VTCR_EL2                VTCR
 #define VTTBR_EL2               VTTBR
-#define MIDR_EL1                MIDR
-#define VPIDR_EL2               VPIDR
-#define MPIDR_EL1               MPIDR
-#define VMPIDR_EL2              VMPIDR
-
 #endif
 
 #endif
diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h
new file mode 100644
index 0000000..e633239
--- /dev/null
+++ b/xen/include/asm-arm/cpufeature.h
@@ -0,0 +1,40 @@
+#ifndef __ASM_ARM_CPUFEATURE_H
+#define __ASM_ARM_CPUFEATURE_H
+
+#ifdef CONFIG_ARM_64
+#define cpu_feature64(c, feat)         ((c)->pfr64.feat)
+#define boot_cpu_feature64(feat)       (boot_cpu_data.pfr64.feat)
+
+#define cpu_has_el0_32    (boot_cpu_feature64(el0) == 2)
+#define cpu_has_el0_64    (boot_cpu_feature64(el0) >= 1)
+#define cpu_has_el1_32    (boot_cpu_feature64(el1) == 2)
+#define cpu_has_el1_64    (boot_cpu_feature64(el1) >= 1)
+#define cpu_has_el2_32    (boot_cpu_feature64(el2) == 2)
+#define cpu_has_el2_64    (boot_cpu_feature64(el2) >= 1)
+#define cpu_has_el3_32    (boot_cpu_feature64(el3) == 2)
+#define cpu_has_el3_64    (boot_cpu_feature64(el3) >= 1)
+#define cpu_has_fp        (boot_cpu_feature64(fp) == 0)
+#define cpu_has_simd      (boot_cpu_feature64(simd) == 0)
+#endif
+
+#define cpu_feature32(c, feat)         ((c)->pfr32.feat)
+#define boot_cpu_feature32(feat)       (boot_cpu_data.pfr32.feat)
+
+#define cpu_has_aarch32   (boot_cpu_feature32(arm) == 1)
+#define cpu_has_thumb     (boot_cpu_feature32(thumb) >= 1)
+#define cpu_has_thumb2    (boot_cpu_feature32(thumb) >= 3)
+#define cpu_has_jazelle   (boot_cpu_feature32(jazelle) >= 0)
+#define cpu_has_thumbee   (boot_cpu_feature32(thumbee) == 1)
+
+#define cpu_has_gentimer  (boot_cpu_feature32(gentimer) == 1)
+#define cpu_has_security  (boot_cpu_feature32(security) > 0)
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 4a4bf2f..601e972 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -57,6 +57,10 @@ struct arch_domain
     struct hvm_domain hvm_domain;
     xen_pfn_t *grant_table_gpfn;
 
+    /* Virtual CPUID */
+    uint32_t vpidr;
+    register_t vmpidr;
+
     struct {
         /*
          * Covers access to other members of this struct _except_ for
@@ -166,8 +170,12 @@ struct arch_vcpu
     register_t tpidr_el1;
     register_t tpidrro_el0;
 
+    uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
 #ifdef CONFIG_ARM_32
-    uint32_t teecr, teehbr;
+    /*
+     * ARMv8 only supports a trivial implementation on Jazelle when in AArch32
+     * mode and therefore has no extended control registers.
+     */
     uint32_t joscr, jmcr;
 #endif
 
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 1072aa2..0515986 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -91,6 +91,123 @@
 #define HSR_EC_DATA_ABORT_HYP       0x25
 
 #ifndef __ASSEMBLY__
+
+#include <xen/types.h>
+
+struct cpuinfo_arm {
+    union {
+        uint32_t bits;
+        struct {
+            unsigned long revision:4;
+            unsigned long part_number:12;
+            unsigned long architecture:4;
+            unsigned long variant:4;
+            unsigned long implementer:8;
+        };
+    } midr;
+    union {
+        register_t bits;
+        struct {
+            unsigned long aff0:8;
+            unsigned long aff1:8;
+            unsigned long aff2:8;
+            unsigned long mt:1; /* Multi-thread, iff MP == 1 */
+            unsigned long __res0:5;
+            unsigned long up:1; /* UP system, iff MP == 1 */
+            unsigned long mp:1; /* MP extensions */
+
+#ifdef CONFIG_ARM_64
+            unsigned long aff3:8;
+            unsigned long __res1:24;
+#endif
+        };
+    } mpidr;
+
+#ifdef CONFIG_ARM_64
+    /* 64-bit CPUID registers. */
+    union {
+        uint64_t bits[2];
+        struct {
+            unsigned long el0:4;
+            unsigned long el1:4;
+            unsigned long el2:4;
+            unsigned long el3:4;
+            unsigned long fp:4;   /* Floating Point */
+            unsigned long simd:4; /* Advanced SIMD */
+            unsigned long __res0:8;
+
+            unsigned long __res1;
+        };
+    } pfr64;
+
+    struct {
+        uint64_t bits[2];
+    } dbg64;
+
+    struct {
+        uint64_t bits[2];
+    } aux64;
+
+    struct {
+        uint64_t bits[2];
+    } mm64;
+
+    struct {
+        uint64_t bits[2];
+    } isa64;
+
+#endif
+
+    /*
+     * 32-bit CPUID registers. On ARMv8 these describe the properties
+     * when running in 32-bit mode.
+     */
+    union {
+        uint32_t bits[2];
+        struct {
+            unsigned long arm:4;
+            unsigned long thumb:4;
+            unsigned long jazelle:4;
+            unsigned long thumbee:4;
+            unsigned long __res0:16;
+
+            unsigned long progmodel:4;
+            unsigned long security:4;
+            unsigned long mprofile:4;
+            unsigned long virt:4;
+            unsigned long gentimer:4;
+            unsigned long __res1:12;
+        };
+    } pfr32;
+
+    struct {
+        uint32_t bits[1];
+    } dbg32;
+
+    struct {
+        uint32_t bits[1];
+    } aux32;
+
+    struct {
+        uint32_t bits[4];
+    } mm32;
+
+    struct {
+        uint32_t bits[6];
+    } isa32;
+};
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_arm boot_cpu_data;
+
+extern void identify_cpu(struct cpuinfo_arm *);
+
+extern struct cpuinfo_arm cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+
 union hsr {
     uint32_t bits;
     struct {
@@ -225,10 +342,6 @@ union hsr {
 #define CNTx_CTL_MASK     (1u<<1)  /* Mask IRQ */
 #define CNTx_CTL_PENDING  (1u<<2)  /* IRQ pending */
 
-/* CPUID bits */
-#define ID_PFR1_GT_MASK  0x000F0000  /* Generic Timer interface support */
-#define ID_PFR1_GT_v1    0x00010000
-
 #if defined(CONFIG_ARM_32)
 # include <asm/arm32/processor.h>
 #elif defined(CONFIG_ARM_64)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type
  2013-02-14 16:47 ` [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type Ian Campbell
@ 2013-02-15 13:36   ` Ian Campbell
  0 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-15 13:36 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim (Xen.org), Stefano Stabellini

On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> Everything is DTB based and on 64-bit there is no such concept even in
> Linux.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Stefano Acked in
<alpine.DEB.2.02.1301241739070.29727@kaball.uk.xensource.com> which I
missed picking up here. Applied, thanks.

> ---
> v2: Update start_secondary too.
> ---
>  xen/arch/arm/arm32/head.S |    7 +++----
>  xen/arch/arm/setup.c      |    1 -
>  xen/arch/arm/smpboot.c    |    1 -
>  3 files changed, 3 insertions(+), 6 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 20e9da6..92fc36c 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -72,7 +72,7 @@ past_zImage:
>          cpsid aif                    /* Disable all interrupts */
>  
>          /* Save the bootloader arguments in less-clobberable registers */
> -        mov   r7, r1                 /* r7 := ARM-linux machine type */
> +        /* No need to save r1 == Unused ARM-linux machine type */
>          mov   r8, r2                 /* r8 := ATAG base address */
>  
>          /* Find out where we are */
> @@ -334,9 +334,8 @@ launch:
>          add   sp, #STACK_SIZE        /* (which grows down from the top). */
>          sub   sp, #CPUINFO_sizeof    /* Make room for CPU save record */
>          mov   r0, r10                /* Marshal args: - phys_offset */
> -        mov   r1, r7                 /*               - machine type */
> -        mov   r2, r8                 /*               - ATAG address */
> -        movs  r3, r12                /*               - CPU ID */
> +        mov   r1, r8                 /*               - ATAG address */
> +        movs  r2, r12                /*               - CPU ID */
>          beq   start_xen              /* and disappear into the land of C */
>          b     start_secondary        /* (to the appropriate entry point) */
>  
> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index acb7abb..782d252 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -329,7 +329,6 @@ void __init setup_cache(void)
>  
>  /* C entry point for boot CPU */
>  void __init start_xen(unsigned long boot_phys_offset,
> -                      unsigned long arm_type,
>                        unsigned long atag_paddr,
>                        unsigned long cpuid)
>  {
> diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> index c7a586b..da4880c 100644
> --- a/xen/arch/arm/smpboot.c
> +++ b/xen/arch/arm/smpboot.c
> @@ -132,7 +132,6 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset)
>  
>  /* Boot the current CPU */
>  void __cpuinit start_secondary(unsigned long boot_phys_offset,
> -                               unsigned long arm_type,
>                                 unsigned long atag_paddr,
>                                 unsigned long cpuid)
>  {

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr
  2013-02-14 16:47 ` [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr Ian Campbell
@ 2013-02-15 13:36   ` Ian Campbell
  0 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-15 13:36 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim (Xen.org), Stefano Stabellini

On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> We don't support ATAGs and this is always actually an FDT address.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> Acked-by: Tim Deegan <tim@xen.org>

Applied, thanks.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0
  2013-02-14 16:47 ` [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0 Ian Campbell
@ 2013-02-15 13:37   ` Ian Campbell
  0 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-15 13:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim (Xen.org), Stefano Stabellini

On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> Xen relies on DTB and we pass in a suitable device-tree so we don't
> need to (and shouldn't) pretend to be a Versatile Express here.
> 
> We already don't pass a machine ID to domU in the same way.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> Acked-by: Tim Deegan <tim@xen.org>

Applied, thanks.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev.
  2013-02-14 16:47 ` [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev Ian Campbell
@ 2013-02-21 14:51   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 14:51 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860438), Ian Campbell wrote:
> "dsb" must be written "dsb sy" on arm64. "dsb sy" is also valid (and
> synonymous) on arm32 but we have a macro so lets use it.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Stefano's acked this one too.  But for good measure,
Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
  2013-02-14 16:47 ` [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code Ian Campbell
@ 2013-02-21 14:56   ` Tim Deegan
  2013-02-21 15:26     ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 14:56 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote:
> +2:      PRINT("- Started in Hyp mode -\r\n")
> +
> +hyp:

I though we were going to use "EL3" instead of "Hyp".

> +        /* Non-boot CPUs need to move on to the relocated pagetables */
> +        //mov   x0, #0

This line should go. 

> +/*
> + * xen/arch/arm/arm64/mode_switch.S
> + *
> + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from
> +        bootwrapper.

Still missing a *.

Tim.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 08/46] xen: arm64: atomics
  2013-02-14 16:47 ` [PATCH V2 08/46] xen: arm64: atomics Ian Campbell
@ 2013-02-21 14:57   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 14:57 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860442), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 10/46] xen: arm64: TLB flushes
  2013-02-14 16:47 ` [PATCH V2 10/46] xen: arm64: TLB flushes Ian Campbell
@ 2013-02-21 15:00   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:00 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860444), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
  2013-02-14 16:47 ` [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events Ian Campbell
@ 2013-02-21 15:01   ` Tim Deegan
  2013-02-21 15:27     ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:01 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

Were we also talking about having smb_ barriers equivalent to the normas
ones, like on x86?

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c
  2013-02-14 16:47 ` [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c Ian Campbell
@ 2013-02-21 15:04   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:04 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860453), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 25/46] xen: arm64: add guest type to domain field.
  2013-02-14 16:47 ` [PATCH V2 25/46] xen: arm64: add guest type to domain field Ian Campbell
@ 2013-02-21 15:05   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:05 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860459), Ian Campbell wrote:
> Currently 32 bit PV is the only option.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-14 16:47 ` [PATCH V2 27/46] xen: arm: arm64 trap handling Ian Campbell
@ 2013-02-21 15:10   ` Tim Deegan
  2013-02-21 15:25     ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:10 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> ---
> v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
>     restoring state.

You don't seem to have addressed my other comments on v1:

> --- a/xen/arch/arm/arm64/Makefile
> +++ b/xen/arch/arm/arm64/Makefile
> @@ -1,5 +1,7 @@
>  subdir-y += lib
>  
> +obj-y += entry.o
>  obj-y += mode_switch.o
>  
> +obj-y += traps.o
>  obj-y += domain.o

Alphabetical order, please. 

> +#define __L2(_x)  (((_x) & 0x00000002) ?   1 : 0)
> +#define __L4(_x)  (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
> +#define __L8(_x)  (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
> +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
> +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))

This is now replicated in three places.  Maybe it should live in, say,
xen/bitops.h?

> --- a/xen/include/asm-arm/processor.h
> +++ b/xen/include/asm-arm/processor.h
> @@ -238,7 +238,7 @@ union hsr {
>  #endif
>  
>  #ifndef __ASSEMBLY__
> -extern uint32_t hyp_traps_vector[8];
> +extern uint32_t hyp_traps_vector;

Keep the array type?  uint8_t[] would do, or define up something the
right size. 

Cheers,

Tim.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit.
  2013-02-14 16:47 ` [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit Ian Campbell
@ 2013-02-21 15:11   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:11 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860465), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate.
  2013-02-14 16:47 ` [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate Ian Campbell
@ 2013-02-21 15:12   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:12 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860478), Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 45/46] xen: arm: Fix guest mode for 64-bit
  2013-02-14 16:47 ` [PATCH V2 45/46] xen: arm: Fix guest mode for 64-bit Ian Campbell
@ 2013-02-21 15:18   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:18 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, xen-devel

At 16:47 +0000 on 14 Feb (1360860479), Ian Campbell wrote:
> Need to check for the 64-bit EL2 modes, not 32-bit HYP mode.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 15:10   ` Tim Deegan
@ 2013-02-21 15:25     ` Ian Campbell
  2013-02-21 15:36       ` Tim Deegan
  2013-02-21 15:49       ` Ian Campbell
  0 siblings, 2 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 15:25 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Stefano Stabellini, xen-devel

On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > ---
> > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> >     restoring state.
> 
> You don't seem to have addressed my other comments on v1:

I've got them in v3, I noted that I hadn't addresses you comment on this
patch in the #0/46.

> > --- a/xen/arch/arm/arm64/Makefile
> > +++ b/xen/arch/arm/arm64/Makefile
> > @@ -1,5 +1,7 @@
> >  subdir-y += lib
> >  
> > +obj-y += entry.o
> >  obj-y += mode_switch.o
> >  
> > +obj-y += traps.o
> >  obj-y += domain.o
> 
> Alphabetical order, please.

I kept this the same order as arm32/Makefile on purpose.

> > +#define __L2(_x)  (((_x) & 0x00000002) ?   1 : 0)
> > +#define __L4(_x)  (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
> > +#define __L8(_x)  (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
> > +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
> > +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
> 
> This is now replicated in three places.  Maybe it should live in, say,
> xen/bitops.h?
[...]
> Keep the array type?  uint8_t[] would do, or define up something the
> right size. 

I've got both of these in my tree already for v3.

Ian.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
  2013-02-21 14:56   ` Tim Deegan
@ 2013-02-21 15:26     ` Ian Campbell
  2013-02-21 16:03       ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 15:26 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Stefano Stabellini, xen-devel

On Thu, 2013-02-21 at 14:56 +0000, Tim Deegan wrote:
> At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote:
> > +2:      PRINT("- Started in Hyp mode -\r\n")
> > +
> > +hyp:
> 
> I though we were going to use "EL3" instead of "Hyp".

Sorry, looks like I missed a few comments when I went through this one.

> 
> > +        /* Non-boot CPUs need to move on to the relocated pagetables */
> > +        //mov   x0, #0
> 
> This line should go. 
> 
> > +/*
> > + * xen/arch/arm/arm64/mode_switch.S
> > + *
> > + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from
> > +        bootwrapper.
> 
> Still missing a *.
> 
> Tim.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
  2013-02-21 15:01   ` Tim Deegan
@ 2013-02-21 15:27     ` Ian Campbell
  2013-02-21 15:58       ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 15:27 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Stefano Stabellini, xen-devel

On Thu, 2013-02-21 at 15:01 +0000, Tim Deegan wrote:
> At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote:
> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> 
> Acked-by: Tim Deegan <tim@xen.org>
> 
> Were we also talking about having smb_ barriers equivalent to the normas
> ones, like on x86?

Yes, I think in a F2F conversation which is why I forgot.

Ian.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 15:25     ` Ian Campbell
@ 2013-02-21 15:36       ` Tim Deegan
  2013-02-21 16:02         ` Ian Campbell
  2013-02-21 15:49       ` Ian Campbell
  1 sibling, 1 reply; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:36 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Stefano Stabellini

At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > ---
> > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> > >     restoring state.
> > 
> > You don't seem to have addressed my other comments on v1:
> 
> I've got them in v3, I noted that I hadn't addresses you comment on this
> patch in the #0/46.

So you did; I did read the 0/46, but for some reason all that stuck in
my head was the WFE stuff. 

AFAICS you just need to re-roll this and #25, and get a tools-person to
ack #20.  So for v3, can you just send those, and avoid another 46-patch
mailbomb? :)

Cheers,

Tim.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 15:25     ` Ian Campbell
  2013-02-21 15:36       ` Tim Deegan
@ 2013-02-21 15:49       ` Ian Campbell
  2013-02-21 15:53         ` Tim Deegan
  1 sibling, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 15:49 UTC (permalink / raw)
  To: Tim (Xen.org); +Cc: xen-devel, Stefano Stabellini

On Thu, 2013-02-21 at 15:25 +0000, Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > ---
> > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> > >     restoring state.
> > 
> > You don't seem to have addressed my other comments on v1:
> 
> I've got them in v3, I noted that I hadn't addresses you comment on this
> patch in the #0/46.

Here is v3. Needs "xen: consolidate implementations of LOG() macro"
which I've just posted.

Ian.

8<--------------------------------------------------

>From 6978a03e10316ff997c91ccd6f88be110dfcffec Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@citrix.com>
Date: Mon, 21 Jan 2013 17:33:31 +0000
Subject: [PATCH] xen: arm: arm64 trap handling.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v3: use bitops.h provided LOG() macro
    use simple bl instead of preloading lr and b
    remove an incorrectly placed and inaccurate comment
    declare hyp_traps_vector as an array, avoiding & on uses
v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
    restoring state.
---
 xen/arch/arm/arm64/Makefile      |    2 +
 xen/arch/arm/arm64/asm-offsets.c |   58 +++++++++
 xen/arch/arm/arm64/entry.S       |  254 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/arm64/traps.c       |   56 +++++++++
 xen/arch/arm/smpboot.c           |    2 +-
 xen/arch/arm/traps.c             |   17 ++-
 xen/include/asm-arm/cpregs.h     |    1 +
 xen/include/asm-arm/processor.h  |    2 +-
 8 files changed, 386 insertions(+), 6 deletions(-)
 create mode 100644 xen/arch/arm/arm64/asm-offsets.c
 create mode 100644 xen/arch/arm/arm64/entry.S
 create mode 100644 xen/arch/arm/arm64/traps.c

diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index 815f305..be41f43 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -1,5 +1,7 @@
 subdir-y += lib
 
+obj-y += entry.o
 obj-y += mode_switch.o
 
+obj-y += traps.o
 obj-y += domain.o
diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c
new file mode 100644
index 0000000..7949e3e
--- /dev/null
+++ b/xen/arch/arm/arm64/asm-offsets.c
@@ -0,0 +1,58 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+#define COMPILE_OFFSETS
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/bitops.h>
+#include <public/xen.h>
+#include <asm/current.h>
+
+#define DEFINE(_sym, _val) \
+    __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
+#define BLANK() \
+    __asm__ __volatile__ ( "\n->" : : )
+#define OFFSET(_sym, _str, _mem) \
+    DEFINE(_sym, offsetof(_str, _mem));
+
+void __dummy__(void)
+{
+   OFFSET(UREGS_X0, struct cpu_user_regs, x0);
+   OFFSET(UREGS_LR, struct cpu_user_regs, lr);
+
+   OFFSET(UREGS_SP, struct cpu_user_regs, sp);
+   OFFSET(UREGS_PC, struct cpu_user_regs, pc);
+   OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr);
+
+   OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1);
+
+   OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
+   OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
+   OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
+   OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
+
+   OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0);
+   OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1);
+   OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1);
+
+   OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr);
+   DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
+   BLANK();
+
+   DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+
+   OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
new file mode 100644
index 0000000..e35b6ea
--- /dev/null
+++ b/xen/arch/arm/arm64/entry.S
@@ -0,0 +1,254 @@
+#include <xen/config.h>
+#include <asm/asm_defns.h>
+#include <public/xen.h>
+
+/*
+ * Register aliases.
+ */
+lr      .req    x30             // link register
+
+/*
+ * Stack pushing/popping (register pairs only). Equivalent to store decrement
+ * before, load increment after.
+ */
+        .macro  push, xreg1, xreg2
+        stp     \xreg1, \xreg2, [sp, #-16]!
+        .endm
+
+        .macro  pop, xreg1, xreg2
+        ldp     \xreg1, \xreg2, [sp], #16
+        .endm
+
+/*
+ * Save/restore guest mode specific state, outer stack frame
+ */
+        .macro  entry_guest, compat
+
+        add     x21, sp, #UREGS_SPSR_el1
+        mrs     x23, SPSR_EL1
+        str     x23, [x21]
+
+        .if \compat == 0 /* Aarch64 mode */
+
+        add     x21, sp, #UREGS_SP_el0
+        mrs     x22, SP_el0
+        str     x22, [x21]
+
+        add     x21, sp, #UREGS_ELR_el1
+        mrs     x22, SP_el1
+        mrs     x23, ELR_el1
+        stp     x22, x23, [x21]
+
+        .else             /* Aarch32 mode */
+
+        add     x21, sp, #UREGS_SPSR_fiq
+        mrs     x22, spsr_fiq
+        mrs     x23, spsr_irq
+        stp     w22, w23, [x21]
+
+        add     x21, sp, #UREGS_SPSR_und
+        mrs     x22, spsr_und
+        mrs     x23, spsr_abt
+        stp     w22, w23, [x21]
+
+        .endif
+
+        .endm
+
+/*
+ * Save state on entry to hypervisor
+ */
+        .macro  entry, hyp, compat
+        sub     sp, sp, #(UREGS_SPSR_el1 - UREGS_SP)
+        push    x28, x29
+        push    x26, x27
+        push    x24, x25
+        push    x22, x23
+        push    x20, x21
+        push    x18, x19
+        push    x16, x17
+        push    x14, x15
+        push    x12, x13
+        push    x10, x11
+        push    x8, x9
+        push    x6, x7
+        push    x4, x5
+        push    x2, x3
+        push    x0, x1
+
+        .if \hyp == 1        /* Hypervisor mode */
+
+        add     x21, sp, #(UREGS_X0 - UREGS_SP)
+
+        .else                /* Guest mode */
+
+        entry_guest \compat
+        mov     x21, ~0 /* sp only valid for hyp frame XXX */
+
+        .endif
+
+        stp     lr, x21, [sp, #UREGS_LR]
+
+        mrs     x22, elr_el2
+        mrs     x23, spsr_el2
+        stp     x22, x23, [sp, #UREGS_PC]
+
+        .endm
+
+/*
+ * Bad Abort numbers
+ *-----------------
+ */
+#define BAD_SYNC        0
+#define BAD_IRQ         1
+#define BAD_FIQ         2
+#define BAD_ERROR       3
+
+        .macro  invalid, reason
+        mov     x0, sp
+        mov     x1, #\reason
+        b       do_bad_mode
+        .endm
+
+hyp_sync_invalid:
+        entry   hyp=1
+        invalid BAD_SYNC
+
+hyp_irq_invalid:
+        entry   hyp=1
+        invalid BAD_IRQ
+
+hyp_fiq_invalid:
+        entry   hyp=1
+        invalid BAD_FIQ
+
+hyp_error_invalid:
+        entry   hyp=1
+        invalid BAD_ERROR
+
+/* Traps taken in Current EL with SP_ELx */
+hyp_sync:
+        entry   hyp=1
+        msr     daifclr, #2
+        mov     x0, sp
+        bl      do_trap_hypervisor
+        b       return_to_hypervisor
+
+hyp_irq:
+        entry   hyp=1
+        mov     x0, sp
+        bl      do_trap_irq
+        b       return_to_hypervisor
+
+guest_sync:
+        entry   hyp=0, compat=0
+        invalid BAD_SYNC /* No AArch64 guest support yet */
+
+guest_irq:
+        entry   hyp=0, compat=0
+        invalid BAD_IRQ /* No AArch64 guest support yet */
+
+guest_fiq_invalid:
+        entry   hyp=0, compat=0
+        invalid BAD_FIQ
+
+guest_error_invalid:
+        entry   hyp=0, compat=0
+        invalid BAD_ERROR
+
+guest_sync_compat:
+        entry   hyp=0, compat=1
+        msr     daifclr, #2
+        mov     x0, sp
+        bl      do_trap_hypervisor
+        b       return_to_guest
+
+guest_irq_compat:
+        entry   hyp=0, compat=1
+        mov     x0, sp
+        bl      do_trap_irq
+        b       return_to_guest
+
+guest_fiq_invalid_compat:
+        entry   hyp=0, compat=1
+        invalid BAD_FIQ
+
+guest_error_invalid_compat:
+        entry   hyp=0, compat=1
+        invalid BAD_ERROR
+
+ENTRY(return_to_new_vcpu)
+        ldr     x21, [sp, #UREGS_CPSR]
+        and     x21, x21, #PSR_MODE_MASK
+        /* Returning to EL2? */
+        cmp     x21, #PSR_MODE_EL2t
+        ccmp    x21, #PSR_MODE_EL2h, #0x4, ne
+        b.eq    return_to_hypervisor /* Yes */
+        /* Fall thru */
+ENTRY(return_to_guest)
+        bl      leave_hypervisor_tail /* Disables interrupts on return */
+        /* Fall thru */
+ENTRY(return_to_hypervisor)
+        msr     daifset, #2 /* Mask interrupts */
+
+        ldp     x21, x22, [sp, #UREGS_PC]       // load ELR, SPSR
+
+        pop     x0, x1
+        pop     x2, x3
+        pop     x4, x5
+        pop     x6, x7
+        pop     x8, x9
+
+        msr     elr_el2, x21                    // set up the return data
+        msr     spsr_el2, x22
+
+        pop     x10, x11
+        pop     x12, x13
+        pop     x14, x15
+        pop     x16, x17
+        pop     x18, x19
+        pop     x20, x21
+        pop     x22, x23
+        pop     x24, x25
+        pop     x26, x27
+        pop     x28, x29
+
+        ldr     lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP)
+        eret
+
+/*
+ * Exception vectors.
+ */
+        .macro  ventry  label
+        .align  7
+        b       \label
+        .endm
+
+        .align  11
+ENTRY(hyp_traps_vector)
+        ventry  hyp_sync_invalid                // Synchronous EL2t
+        ventry  hyp_irq_invalid                 // IRQ EL2t
+        ventry  hyp_fiq_invalid                 // FIQ EL2t
+        ventry  hyp_error_invalid               // Error EL2t
+
+        ventry  hyp_sync                        // Synchronous EL2h
+        ventry  hyp_irq                         // IRQ EL2h
+        ventry  hyp_fiq_invalid                 // FIQ EL2h
+        ventry  hyp_error_invalid               // Error EL2h
+
+        ventry  guest_sync                      // Synchronous 64-bit EL0/EL1
+        ventry  guest_irq                       // IRQ 64-bit EL0/EL1
+        ventry  guest_fiq_invalid               // FIQ 64-bit EL0/EL1
+        ventry  guest_error_invalid             // Error 64-bit EL0/EL1
+
+        ventry  guest_sync_compat               // Synchronous 32-bit EL0/EL1
+        ventry  guest_irq_compat                // IRQ 32-bit EL0/EL1
+        ventry  guest_fiq_invalid_compat        // FIQ 32-bit EL0/EL1
+        ventry  guest_error_invalid_compat      // Error 32-bit EL0/EL1
+
+/*
+ * Local variables:
+ * mode: ASM
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c
new file mode 100644
index 0000000..02ef992
--- /dev/null
+++ b/xen/arch/arm/arm64/traps.c
@@ -0,0 +1,56 @@
+/*
+ * xen/arch/arm/arm64/traps.c
+ *
+ * ARM AArch64 Specific Trap handlers
+ *
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+
+#include <asm/system.h>
+#include <asm/processor.h>
+
+#include <public/xen.h>
+
+asmlinkage void do_trap_serror(struct cpu_user_regs *regs)
+{
+    panic("Unhandled serror trap\n");
+}
+
+static const char *handler[]= {
+        "Synchronous Abort",
+        "IRQ",
+        "FIQ",
+        "Error"
+};
+
+asmlinkage void do_bad_mode(struct cpu_user_regs *regs, int reason)
+{
+    uint64_t esr = READ_SYSREG64(ESR_EL2);
+    printk("Bad mode in %s handler detected, code 0x%08"PRIx64"\n",
+           handler[reason], esr);
+
+    local_irq_disable();
+    panic("bad mode");
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index d8eb5d3..866ed62 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
     set_processor_id(cpuid);
 
     /* Setup Hyp vector base */
-    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
+    WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2);
 
     mmu_init_secondary_cpu();
     enable_vfp();
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index cb8a8d2..d6bdaa7 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -628,7 +628,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
 
 }
 
-void dump_guest_s1_walk(struct domain *d, uint32_t addr)
+void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
 {
     uint32_t ttbcr = READ_CP32(TTBCR);
     uint32_t ttbr0 = READ_CP32(TTBR0);
@@ -636,7 +636,7 @@ void dump_guest_s1_walk(struct domain *d, uint32_t addr)
     uint32_t offset;
     uint32_t *first = NULL, *second = NULL;
 
-    printk("dom%d VA 0x%08"PRIx32"\n", d->domain_id, addr);
+    printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
     printk("    TTBCR: 0x%08"PRIx32"\n", ttbcr);
     printk("    TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n",
            ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK));
@@ -692,7 +692,11 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
     mmio_info_t info;
 
     info.dabt = dabt;
+#ifdef CONFIG_ARM_32
     info.gva = READ_CP32(HDFAR);
+#else
+    info.gva = READ_SYSREG64(FAR_EL2);
+#endif
 
     if (dabt.s1ptw)
         goto bad_data_abort;
@@ -713,7 +717,7 @@ bad_data_abort:
 
     /* XXX inject a suitable fault into the guest */
     printk("Guest data abort: %s%s%s\n"
-           "    gva=%"PRIx32"\n",
+           "    gva=%"PRIvaddr"\n",
            msg, dabt.s1ptw ? " S2 during S1" : "",
            fsc_level_str(level),
            info.gva);
@@ -736,13 +740,17 @@ bad_data_abort:
 
 asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
 {
-    union hsr hsr = { .bits = READ_CP32(HSR) };
+    union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) };
 
     switch (hsr.ec) {
     case HSR_EC_CP15_32:
+        if ( ! is_pv32_domain(current->domain) )
+            goto bad_trap;
         do_cp15_32(regs, hsr);
         break;
     case HSR_EC_CP15_64:
+        if ( ! is_pv32_domain(current->domain) )
+            goto bad_trap;
         do_cp15_64(regs, hsr);
         break;
     case HSR_EC_HVC:
@@ -754,6 +762,7 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
         do_trap_data_abort_guest(regs, hsr.dabt);
         break;
     default:
+ bad_trap:
         printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=%"PRIx32"\n",
                hsr.bits, hsr.ec, hsr.len, hsr.iss);
         do_unexpected_trap("Hypervisor", regs);
diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h
index 36da12e..75b6287 100644
--- a/xen/include/asm-arm/cpregs.h
+++ b/xen/include/asm-arm/cpregs.h
@@ -228,6 +228,7 @@
 #define CCSIDR_EL1              CCSIDR
 #define CLIDR_EL1               CLIDR
 #define CSSELR_EL1              CSSELR
+#define ESR_EL2                 HSR
 #define ID_AFR0_EL1             ID_AFR0
 #define ID_DFR0_EL1             ID_DFR0
 #define ID_ISAR0_EL1            ID_ISAR0
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index bd473a8..6ab466a 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -238,7 +238,7 @@ union hsr {
 #endif
 
 #ifndef __ASSEMBLY__
-extern uint32_t hyp_traps_vector[8];
+extern uint32_t hyp_traps_vector[];
 
 void panic_PAR(uint64_t par);
 
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 15:49       ` Ian Campbell
@ 2013-02-21 15:53         ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 15:53 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Stefano Stabellini, xen-devel

At 15:49 +0000 on 21 Feb (1361461781), Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:25 +0000, Ian Campbell wrote:
> > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > > ---
> > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> > > >     restoring state.
> > > 
> > > You don't seem to have addressed my other comments on v1:
> > 
> > I've got them in v3, I noted that I hadn't addresses you comment on this
> > patch in the #0/46.
> 
> Here is v3. Needs "xen: consolidate implementations of LOG() macro"
> which I've just posted.
> 
> Ian.
> 
> 8<--------------------------------------------------
> 
> From 6978a03e10316ff997c91ccd6f88be110dfcffec Mon Sep 17 00:00:00 2001
> From: Ian Campbell <ian.campbell@citrix.com>
> Date: Mon, 21 Jan 2013 17:33:31 +0000
> Subject: [PATCH] xen: arm: arm64 trap handling.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
  2013-02-21 15:27     ` Ian Campbell
@ 2013-02-21 15:58       ` Ian Campbell
  0 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 15:58 UTC (permalink / raw)
  To: Tim (Xen.org); +Cc: xen-devel, Stefano Stabellini

On Thu, 2013-02-21 at 15:27 +0000, Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:01 +0000, Tim Deegan wrote:
> > At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote:
> > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > 
> > Acked-by: Tim Deegan <tim@xen.org>
> > 
> > Were we also talking about having smb_ barriers equivalent to the normas
> > ones, like on x86?
> 
> Yes, I think in a F2F conversation which is why I forgot.

FYI it ended up like this. I retained your Ack, hope that's ok.

8<--------------------------------

>From 117f08d439bca2798db71b9971429e32424ad092 Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@citrix.com>
Date: Thu, 13 Dec 2012 13:18:07 +0000
Subject: [PATCH] xen: arm64: barriers and wait for interrupts/events

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v3: - smp barriers are the same as up (which are conservative)
    - add dmb

---
 xen/include/asm-arm/arm32/system.h |   29 +++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/system.h |   29 +++++++++++++++++++++++++++++
 xen/include/asm-arm/system.h       |   20 ++++++++------------
 3 files changed, 66 insertions(+), 12 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/system.h
 create mode 100644 xen/include/asm-arm/arm64/system.h

diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h
new file mode 100644
index 0000000..91098a0
--- /dev/null
+++ b/xen/include/asm-arm/arm32/system.h
@@ -0,0 +1,29 @@
+/* Portions taken from Linux arch arm */
+#ifndef __ASM_ARM32_SYSTEM_H
+#define __ASM_ARM32_SYSTEM_H
+
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           dsb()
+#define wmb()           mb()
+
+#define smp_mb()        dmb()
+#define smp_rmb()       dmb()
+#define smp_wmb()       dmb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h
new file mode 100644
index 0000000..b3ea4a3
--- /dev/null
+++ b/xen/include/asm-arm/arm64/system.h
@@ -0,0 +1,29 @@
+/* Portions taken from Linux arch arm64 */
+#ifndef __ASM_ARM64_SYSTEM_H
+#define __ASM_ARM64_SYSTEM_H
+
+#define sev()           asm volatile("sev" : : : "memory")
+#define wfe()           asm volatile("wfe" : : : "memory")
+#define wfi()           asm volatile("wfi" : : : "memory")
+
+#define isb()           asm volatile("isb" : : : "memory")
+#define dsb()           asm volatile("dsb sy" : : : "memory")
+#define dmb()           asm volatile("dmb sy" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           dsb()
+#define wmb()           mb()
+
+#define smp_mb()        dmb()
+#define smp_rmb()       dmb()
+#define smp_wmb()       dmb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index 216ef1f..8b4c97a 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -11,18 +11,6 @@
 #define xchg(ptr,x) \
         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
-
-#define mb()            dsb()
-#define rmb()           dsb()
-#define wmb()           mb()
-
-#define smp_mb()        dmb()
-#define smp_rmb()       dmb()
-#define smp_wmb()       dmb()
-
 /*
  * This is used to ensure the compiler did actually allocate the register we
  * asked it for some inline assembly sequences.  Apparently we can't trust
@@ -33,6 +21,14 @@
  */
 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/system.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/system.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 extern void __bad_xchg(volatile void *, int);
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
  2013-02-14 16:47 ` [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor Ian Campbell
@ 2013-02-21 16:01   ` Ian Campbell
  2013-02-21 16:23     ` Stefano Stabellini
  0 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 16:01 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim (Xen.org), Stefano Stabellini

On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> Acked-by: Tim Deegan <tim@xen.org>
> but:
>         This is mostly a matter of coding taste, so I'd like Stefano's
>         ack/nack here as well.


Stefano, any strong opinion?

> ---
> ---
>  xen/arch/arm/domain_build.c |    2 +-
>  xen/arch/arm/smpboot.c      |    2 +-
>  xen/arch/arm/traps.c        |   44 ++++++++++++++++++++++--------------------
>  xen/arch/arm/vgic.c         |   18 ++++++++--------
>  xen/arch/arm/vpl011.c       |    6 ++--
>  xen/arch/arm/vtimer.c       |    6 ++--
>  xen/include/asm-arm/regs.h  |    2 +-
>  xen/include/asm-arm/types.h |    4 +++
>  8 files changed, 45 insertions(+), 39 deletions(-)
> 
> diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> index 7403f1a..30d014a 100644
> --- a/xen/arch/arm/domain_build.c
> +++ b/xen/arch/arm/domain_build.c
> @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo)
> 
>  static void dtb_load(struct kernel_info *kinfo)
>  {
> -    void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr;
> +    void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr;
> 
>      raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt));
>      xfree(kinfo->fdt);
> diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> index 86379b7..d8eb5d3 100644
> --- a/xen/arch/arm/smpboot.c
> +++ b/xen/arch/arm/smpboot.c
> @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
>      set_processor_id(cpuid);
> 
>      /* Setup Hyp vector base */
> -    WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
> +    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
> 
>      mmu_init_secondary_cpu();
>      enable_vfp();
> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> index eaf1f52..0299b33 100644
> --- a/xen/arch/arm/traps.c
> +++ b/xen/arch/arm/traps.c
> @@ -68,7 +68,7 @@ static void print_xen_info(void)
>             debug_build() ? 'y' : 'n', print_tainted(taint_str));
>  }
> 
> -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> +register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
>  {
>      BUG_ON( !guest_mode(regs) );
> 
> @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> 
>      switch ( reg ) {
>      case 0 ... 7: /* Unbanked registers */
> -        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7));
> +        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7));
>          return &regs->r0 + reg;
>      case 8 ... 12: /* Register banked in FIQ mode */
> -        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq));
> +        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq));
>          if ( fiq_mode(regs) )
>              return &regs->r8_fiq + reg - 8;
>          else
>              return &regs->r8 + reg - 8;
>      case 13 ... 14: /* Banked SP + LR registers */
> -        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq));
> -        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq));
> -        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc));
> -        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt));
> -        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und));
> +        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq));
> +        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq));
> +        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc));
> +        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt));
> +        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und));
>          switch ( regs->cpsr & PSR_MODE_MASK )
>          {
>          case PSR_MODE_USR:
> @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs)
>      printk("GUEST STACK GOES HERE\n");
>  }
> 
> -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp)
> +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp)
> 
>  static void show_trace(struct cpu_user_regs *regs)
>  {
> -    uint32_t *frame, next, addr, low, high;
> +    register_t *frame, next, addr, low, high;
> 
>      printk("Xen call trace:\n   ");
> 
> @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs)
>      print_symbol(" %s\n   ", regs->pc);
> 
>      /* Bounds for range of valid frame pointer. */
> -    low  = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> +    low  = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
>      high = (low & ~(STACK_SIZE - 1)) +
>          (STACK_SIZE - sizeof(struct cpu_info));
> 
> @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs)
>              break;
>          {
>              /* Ordinary stack frame. */
> -            frame = (uint32_t *)next;
> +            frame = (register_t *)next;
>              next  = frame[-1];
>              addr  = frame[0];
>          }
> @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs)
>          printk("[<%p>]", _p(addr));
>          print_symbol(" %s\n   ", addr);
> 
> -        low = (uint32_t)&frame[1];
> +        low = (register_t)&frame[1];
>      }
> 
>      printk("\n");
> @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs)
> 
>  void show_stack(struct cpu_user_regs *regs)
>  {
> -    uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> +    register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
>      int i;
> 
>      if ( guest_mode(regs) )
> @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = {
> 
>  static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code)
>  {
> -    uint32_t reg, *r;
> +    register_t *r;
> +    uint32_t reg;
>      uint32_t domid = current->domain->domain_id;
>      switch ( code ) {
>      case 0xe0 ... 0xef:
>          reg = code - 0xe0;
>          r = select_user_reg(regs, reg);
> -        printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n",
> +        printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n",
>                 domid, reg, *r, regs->pc);
>          break;
>      case 0xfd:
> -        printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc);
> +        printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc);
>          break;
>      case 0xfe:
> -        printk("%c", (char)(regs->r0 & 0xff));
> +        r = select_user_reg(regs, 0);
> +        printk("%c", (char)(*r & 0xff));
>          break;
>      case 0xff:
>          printk("DOM%d: DEBUG\n", domid);
> @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
>                         union hsr hsr)
>  {
>      struct hsr_cp32 cp32 = hsr.cp32;
> -    uint32_t *r = select_user_reg(regs, cp32.reg);
> +    uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg);
> 
>      if ( !cp32.ccvalid ) {
>          dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n");
> @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
>          BUG_ON(!vtimer_emulate(regs, hsr));
>          break;
>      default:
> -        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n",
> +        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
>                 cp32.read ? "mrc" : "mcr",
>                 cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
>          panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK);
> @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
>          BUG_ON(!vtimer_emulate(regs, hsr));
>          break;
>      default:
> -        printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n",
> +        printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
>                 cp64.read ? "mrrc" : "mcrr",
>                 cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
>          panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK);
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 39b9775..57147d5 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
>  {
>      struct hsr_dabt dabt = info->dabt;
>      struct cpu_user_regs *regs = guest_cpu_user_regs();
> -    uint32_t *r = select_user_reg(regs, dabt.reg);
> +    register_t *r = select_user_reg(regs, dabt.reg);
>      struct vgic_irq_rank *rank;
>      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
>      int gicd_reg = REG(offset);
> @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
>  {
>      struct hsr_dabt dabt = info->dabt;
>      struct cpu_user_regs *regs = guest_cpu_user_regs();
> -    uint32_t *r = select_user_reg(regs, dabt.reg);
> +    register_t *r = select_user_reg(regs, dabt.reg);
>      struct vgic_irq_rank *rank;
>      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
>      int gicd_reg = REG(offset);
> @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> 
>      case GICD_ISPENDR ... GICD_ISPENDRN:
>          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n",
> +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
>                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
>          return 0;
> 
>      case GICD_ICPENDR ... GICD_ICPENDRN:
>          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n",
> +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
>                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
>          return 0;
> 
> @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> 
>      case GICD_SGIR:
>          if ( dabt.size != 2 ) goto bad_width;
> -        printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n",
> +        printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n",
>                 *r, gicd_reg - GICD_ICFGR);
>          return 0;
> 
>      case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
>          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n",
> +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
>                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
>          return 0;
> 
>      case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
>          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n",
> +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
>                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
>          return 0;
> 
> @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
>          goto write_ignore;
> 
>      default:
> -        printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n",
> +        printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
>                 dabt.reg, *r, offset);
>          return 0;
>      }
> 
>  bad_width:
> -    printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n",
> +    printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
>             dabt.size, dabt.reg, *r, offset);
>      domain_crash_synchronous();
>      return 0;
> diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
> index 7dcee90..db5094e 100644
> --- a/xen/arch/arm/vpl011.c
> +++ b/xen/arch/arm/vpl011.c
> @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info)
>  {
>      struct hsr_dabt dabt = info->dabt;
>      struct cpu_user_regs *regs = guest_cpu_user_regs();
> -    uint32_t *r = select_user_reg(regs, dabt.reg);
> +    register_t *r = select_user_reg(regs, dabt.reg);
>      int offset = (int)(info->gpa - UART0_START);
> 
>      switch ( offset )
> @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
>  {
>      struct hsr_dabt dabt = info->dabt;
>      struct cpu_user_regs *regs = guest_cpu_user_regs();
> -    uint32_t *r = select_user_reg(regs, dabt.reg);
> +    register_t *r = select_user_reg(regs, dabt.reg);
>      int offset = (int)(info->gpa - UART0_START);
> 
>      switch ( offset )
> @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
>          /* Silently ignore */
>          return 1;
>      default:
> -        printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n",
> +        printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n",
>                 dabt.reg, *r, offset);
>          domain_crash_synchronous();
>      }
> diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
> index 85201b5..291b87e 100644
> --- a/xen/arch/arm/vtimer.c
> +++ b/xen/arch/arm/vtimer.c
> @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr)
>  {
>      struct vcpu *v = current;
>      struct hsr_cp32 cp32 = hsr.cp32;
> -    uint32_t *r = select_user_reg(regs, cp32.reg);
> +    uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg);
>      s_time_t now;
> 
>      switch ( hsr.bits & HSR_CP32_REGS_MASK )
> @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr)
>  {
>      struct vcpu *v = current;
>      struct hsr_cp64 cp64 = hsr.cp64;
> -    uint32_t *r1 = select_user_reg(regs, cp64.reg1);
> -    uint32_t *r2 = select_user_reg(regs, cp64.reg2);
> +    uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1);
> +    uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2);
>      uint64_t ticks;
>      s_time_t now;
> 
> diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
> index 7486944..a723f92 100644
> --- a/xen/include/asm-arm/regs.h
> +++ b/xen/include/asm-arm/regs.h
> @@ -34,7 +34,7 @@
>   * Returns a pointer to the given register value in regs, taking the
>   * processor mode (CPSR) into account.
>   */
> -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> 
>  #endif /* __ARM_REGS_H__ */
>  /*
> diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
> index d3e16d8..9ca32f1 100644
> --- a/xen/include/asm-arm/types.h
> +++ b/xen/include/asm-arm/types.h
> @@ -41,6 +41,8 @@ typedef u32 vaddr_t;
>  typedef u64 paddr_t;
>  #define INVALID_PADDR (~0ULL)
>  #define PRIpaddr "016llx"
> +typedef u32 register_t;
> +#define PRIregister "x"
>  #elif defined (CONFIG_ARM_64)
>  typedef signed long s64;
>  typedef unsigned long u64;
> @@ -49,6 +51,8 @@ typedef u64 vaddr_t;
>  typedef u64 paddr_t;
>  #define INVALID_PADDR (~0UL)
>  #define PRIpaddr "016lx"
> +typedef u64 register_t;
> +#define PRIregister "lx"
>  #endif
> 
>  typedef unsigned long size_t;
> --
> 1.7.2.5
> 

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 15:36       ` Tim Deegan
@ 2013-02-21 16:02         ` Ian Campbell
  2013-02-21 16:07           ` Tim Deegan
  0 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 16:02 UTC (permalink / raw)
  To: Tim Deegan; +Cc: xen-devel, Stefano Stabellini

On Thu, 2013-02-21 at 15:36 +0000, Tim Deegan wrote:
> At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote:
> > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > > ---
> > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> > > >     restoring state.
> > > 
> > > You don't seem to have addressed my other comments on v1:
> > 
> > I've got them in v3, I noted that I hadn't addresses you comment on this
> > patch in the #0/46.
> 
> So you did; I did read the 0/46, but for some reason all that stuck in
> my head was the WFE stuff. 
> 
> AFAICS you just need to re-roll this and #25,

I don't think you mean #25? That is "xen: arm64: add guest type to
domain field." which you've acked.

I had an outstanding comment for #14 "xen: arm64: barriers and wait for
interrupts/events" which I've just addressed (and reposted)

> and get a tools-person to ack #20.

Stefano perhaps? ;-)

> So for v3, can you just send those, and avoid another 46-patch
> mailbomb? :)

When I'm applying my own patches I prefer to do it from the list rather
than short cutting them from my own tree, keep me honest/from making
mistakes. How about I include an index of acked/unacked patches in the
zeroeth mail? You ought to be able to just mark it all as read.

Ian.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
  2013-02-21 15:26     ` Ian Campbell
@ 2013-02-21 16:03       ` Ian Campbell
  2013-02-21 16:08         ` Tim Deegan
  0 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 16:03 UTC (permalink / raw)
  To: Tim (Xen.org); +Cc: xen-devel, Stefano Stabellini

On Thu, 2013-02-21 at 15:26 +0000, Ian Campbell wrote:
> On Thu, 2013-02-21 at 14:56 +0000, Tim Deegan wrote:
> > At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote:
> > > +2:      PRINT("- Started in Hyp mode -\r\n")
> > > +
> > > +hyp:
> > 
> > I though we were going to use "EL3" instead of "Hyp".
> 
> Sorry, looks like I missed a few comments when I went through this one.

s/EL3/EL2/g

This is what I have now:

8<-----------------------------------------

>From e4587a06df0d04ccbfd04ec7cc371900fe7dabf4 Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@citrix.com>
Date: Mon, 10 Dec 2012 14:19:00 +0000
Subject: [PATCH] xen: arm64: initial build + config changes, start of day code

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v3: - s/hyp/el2/
    - remove dead code
    - fix comment formatting
v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of
      defining in head.S
    - Nuke hard tabs in head.S and mode_switch.S with expand(1)
---
 Config.mk                        |    2 +-
 config/arm64.mk                  |   12 ++
 xen/arch/arm/Makefile            |    1 +
 xen/arch/arm/Rules.mk            |    6 +
 xen/arch/arm/arm64/Makefile      |    1 +
 xen/arch/arm/arm64/head.S        |  393 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/arm64/mode_switch.S |   83 ++++++++
 xen/arch/arm/xen.lds.S           |    8 +-
 xen/include/asm-arm/page.h       |    1 +
 xen/include/public/arch-arm.h    |   14 ++
 xen/include/public/hvm/save.h    |    2 +-
 xen/include/public/xen.h         |    2 +-
 xen/include/xen/libelf.h         |    2 +-
 13 files changed, 522 insertions(+), 5 deletions(-)
 create mode 100644 config/arm64.mk
 create mode 100644 xen/arch/arm/arm64/Makefile
 create mode 100644 xen/arch/arm/arm64/head.S
 create mode 100644 xen/arch/arm/arm64/mode_switch.S

diff --git a/Config.mk b/Config.mk
index 64541c8..ea64925 100644
--- a/Config.mk
+++ b/Config.mk
@@ -15,7 +15,7 @@ debug_symbols ?= $(debug)
 
 XEN_COMPILE_ARCH    ?= $(shell uname -m | sed -e s/i.86/x86_32/ \
                          -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \
-                         -e s/armv7.*/arm32/)
+                         -e s/armv7.*/arm32/ -e s/armv8.*/arm64/)
 
 XEN_TARGET_ARCH     ?= $(XEN_COMPILE_ARCH)
 XEN_OS              ?= $(shell uname -s)
diff --git a/config/arm64.mk b/config/arm64.mk
new file mode 100644
index 0000000..b2457eb
--- /dev/null
+++ b/config/arm64.mk
@@ -0,0 +1,12 @@
+CONFIG_ARM := y
+CONFIG_ARM_64 := y
+CONFIG_ARM_$(XEN_OS) := y
+
+CFLAGS += #-marm -march= -mcpu= etc
+
+HAS_PL011 := y
+
+# Use only if calling $(LD) directly.
+LDFLAGS_DIRECT += -maarch64elf
+
+CONFIG_LOAD_ADDRESS ?= 0x80000000
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index f2822f2..7ff67c7 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -1,4 +1,5 @@
 subdir-$(arm32) += arm32
+subdir-$(arm64) += arm64
 
 obj-y += early_printk.o
 obj-y += domain.o
diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk
index 5b5768a..29b605d 100644
--- a/xen/arch/arm/Rules.mk
+++ b/xen/arch/arm/Rules.mk
@@ -26,6 +26,12 @@ arm32 := y
 arm64 := n
 endif
 
+ifeq ($(TARGET_SUBARCH),arm64)
+CFLAGS += -mcpu=generic
+arm32 := n
+arm64 := y
+endif
+
 ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n)
 CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE
 endif
diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
new file mode 100644
index 0000000..dffbeb1
--- /dev/null
+++ b/xen/arch/arm/arm64/Makefile
@@ -0,0 +1 @@
+obj-y += mode_switch.o
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
new file mode 100644
index 0000000..b7ab251
--- /dev/null
+++ b/xen/arch/arm/arm64/head.S
@@ -0,0 +1,393 @@
+/*
+ * xen/arch/arm/head.S
+ *
+ * Start-of-day code for an ARMv8.
+ *
+ * Ian Campbell <ian.campbell@citrix.com>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * Based on ARMv7-A head.S by
+ * Tim Deegan <tim@xen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/config.h>
+#include <asm/page.h>
+#include <asm/asm_defns.h>
+
+#define PT_PT     0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */
+#define PT_MEM    0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
+#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
+
+/* Macro to print a string to the UART, if there is one.
+ * Clobbers r0-r3. */
+#ifdef EARLY_UART_ADDRESS
+#define PRINT(_s)       \
+        adr   x0, 98f ; \
+        bl    puts    ; \
+        b     99f     ; \
+98:     .asciz _s     ; \
+        .align 2      ; \
+99:
+#else
+#define PRINT(s)
+#endif
+
+        /*.aarch64*/
+
+        /*
+         * Kernel startup entry point.
+         * ---------------------------
+         *
+         * The requirements are:
+         *   MMU = off, D-cache = off, I-cache = on or off,
+         *   x0 = physical address to the FDT blob.
+         *
+         * This must be the very first address in the loaded image.
+         * It should be linked at XEN_VIRT_START, and loaded at any
+         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
+         * or the initial pagetable code below will need adjustment.
+         */
+
+        .global start
+start:
+        /*
+         * DO NOT MODIFY. Image header expected by Linux boot-loaders.
+         */
+        b       real_start           /* branch to kernel start, magic */
+        .long   0                    /* reserved */
+        .quad   0                    /* Image load offset from start of RAM */
+        .quad   0                    /* reserved */
+        .quad   0                    /* reserved */
+
+real_start:
+        msr   DAIFSet, 0xf           /* Disable all interrupts */
+
+        /* Save the bootloader arguments in less-clobberable registers */
+        mov   x21, x0                /* x21 := DTB, physical address  */
+
+        /* Find out where we are */
+        ldr   x0, =start
+        adr   x19, start             /* x19 := paddr (start) */
+        sub   x20, x19, x0           /* x20 := phys-offset */
+
+        /* Using the DTB in the .dtb section? */
+#ifdef CONFIG_DTB_FILE
+        ldr   x21, =_sdtb
+        add   x21, x21, x20          /* x21 := paddr(DTB) */
+#endif
+
+        /* Are we the boot CPU? */
+        mov   x22, #0                /* x22 := CPU ID */
+        mrs   x0, mpidr_el1
+        tbz   x0, 31, boot_cpu       /* Multiprocessor extension supported? */
+        tbnz  x0, 30, boot_cpu       /* Uniprocessor system? */
+
+        mov   x13, #(0xff << 24)
+        bics  x22, x0, x13           /* Mask out flags to get CPU ID */
+        b.eq  boot_cpu               /* If we're CPU 0, boot now */
+
+        /* Non-boot CPUs wait here to be woken up one at a time. */
+1:      dsb   sy
+        ldr   x0, =smp_up_cpu        /* VA of gate */
+        add   x0, x0, x20            /* PA of gate */
+        ldr   x1, [x0]               /* Which CPU is being booted? */
+        cmp   x1, x22                /* Is it us? */
+        b.eq  2f
+        wfe
+        b     1b
+2:
+
+boot_cpu:
+#ifdef EARLY_UART_ADDRESS
+        ldr   x23, =EARLY_UART_ADDRESS  /* x23 := UART base address */
+        cbnz  x22, 1f
+        bl    init_uart                 /* CPU 0 sets up the UART too */
+1:      PRINT("- CPU ")
+        mov   x0, x22
+        bl    putn
+        PRINT(" booting -\r\n")
+#endif
+
+        PRINT("- Current EL ")
+        mrs   x0, CurrentEL
+        bl    putn
+        PRINT(" -\r\n")
+
+        /* Are we in EL3 */
+        mrs   x0, CurrentEL
+        cmp   x0, #PSR_MODE_EL3t
+        ccmp  x0, #PSR_MODE_EL3h, #0x4, ne
+        b.eq  1f /* Yes */
+
+        /* Are we in EL2 */
+        cmp   x0, #PSR_MODE_EL2t
+        ccmp  x0, #PSR_MODE_EL2h, #0x4, ne
+        b.eq  2f /* Yes */
+
+        /* Otherwise, it must have been EL0 or EL1 */
+        PRINT("- CPU is not in EL3 or EL2 -\r\n")
+        b     fail
+
+1:      PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n")
+        ldr   x1, =enter_el2_mode    /* VA of function */
+        add   x1, x1, x20            /* PA of function */
+        adr   x30, el2               /* Set return address for call */
+        br    x1                     /* Call function */
+
+2:      PRINT("- Started in EL2 mode -\r\n")
+
+el2:
+        /* Zero BSS On the boot CPU to avoid nasty surprises */
+        cbnz  x22, skip_bss
+
+        PRINT("- Zero BSS -\r\n")
+        ldr   x0, =__bss_start       /* Load start & end of bss */
+        ldr   x1, =__bss_end
+        add   x0, x0, x20            /* Apply physical offset */
+        add   x1, x1, x20
+
+1:      str   xzr, [x0], #8
+        cmp   x0, x1
+        b.lo  1b
+
+skip_bss:
+
+        PRINT("- Setting up control registers -\r\n")
+
+        /* Set up memory attribute type tables */
+        ldr   x0, =MAIRVAL
+        msr   mair_el2, x0
+
+        /* Set up the HTCR:
+         * PASize -- 4G
+         * Top byte is used
+         * PT walks use Outer-Shareable accesses,
+         * PT walks are write-back, no-write-allocate in both cache levels,
+         * Full 64-bit address space goes through this table. */
+        ldr   x0, =0x80802500
+        msr   tcr_el2, x0
+
+        /* Set up the HSCTLR:
+         * Exceptions in LE ARM,
+         * Low-latency IRQs disabled,
+         * Write-implies-XN disabled (for now),
+         * D-cache disabled (for now),
+         * I-cache enabled,
+         * Alignment checking enabled,
+         * MMU translation disabled (for now). */
+        ldr   x0, =(HSCTLR_BASE|SCTLR_A)
+        msr   SCTLR_EL2, x0
+
+        /* Write Xen's PT's paddr into the HTTBR */
+        ldr   x4, =xen_pgtable
+        add   x4, x4, x20            /* x4 := paddr (xen_pagetable) */
+        msr   TTBR0_EL2, x4
+
+        /* Non-boot CPUs don't need to rebuild the pagetable */
+        cbnz  x22, pt_ready
+
+        ldr   x1, =xen_first
+        add   x1, x1, x20            /* x1 := paddr (xen_first) */
+        mov   x3, #PT_PT             /* x2 := table map of xen_first */
+        orr   x2, x1, x3             /* (+ rights for linear PT) */
+        str   x2, [x4, #0]           /* Map it in slot 0 */
+
+        mov   x4, x1                 /* Next level into xen_first */
+
+       /* console fixmap */
+#ifdef EARLY_UART_ADDRESS
+        ldr   x1, =xen_fixmap
+        add   x1, x1, x20            /* x1 := paddr (xen_fixmap) */
+        lsr   x2, x23, #12
+        lsl   x2, x2, #12            /* 4K aligned paddr of UART */
+        mov   x3, #PT_DEV_L3
+        orr   x2, x2, x3             /* x2 := 4K dev map including UART */
+        str   x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap's slot */
+#endif
+
+        /* Build the baseline idle pagetable's first-level entries */
+        ldr   x1, =xen_second
+        add   x1, x1, x20            /* x1 := paddr (xen_second) */
+        mov   x3, #PT_PT             /* x2 := table map of xen_second */
+        orr   x2, x1, x3             /* (+ rights for linear PT) */
+        str   x2, [x4, #0]           /* Map it in slot 0 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #8]           /* Map 2nd page in slot 1 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #16]          /* Map 3rd page in slot 2 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #24]          /* Map 4th page in slot 3 */
+
+        /* Now set up the second-level entries */
+        mov   x3, #PT_MEM
+        orr   x2, x19, x3            /* x2 := 2MB normal map of Xen */
+        orr   x4, xzr, x19, lsr #18
+        str   x2, [x1, x4]           /* Map Xen there */
+        ldr   x4, =start
+        lsr   x4, x4, #18            /* Slot for vaddr(start) */
+        str   x2, [x1, x4]           /* Map Xen there too */
+
+        /* xen_fixmap pagetable */
+        ldr   x2, =xen_fixmap
+        add   x2, x2, x20            /* x2 := paddr (xen_fixmap) */
+        mov   x3, #PT_PT
+        orr   x2, x2, x3             /* x2 := table map of xen_fixmap */
+        add   x4, x4, #8
+        str   x2, [x1, x4]           /* Map it in the fixmap's slot */
+
+        lsr   x2, x21, #21
+        lsl   x2, x2, #21            /* 2MB-aligned paddr of DTB */
+        mov   x3, #PT_MEM            /* x2 := 2MB RAM incl. DTB */
+        orr   x2, x2, x3
+        add   x4, x4, #8
+        str   x2, [x1, x4]           /* Map it in the early boot slot */
+
+pt_ready:
+        PRINT("- Turning on paging -\r\n")
+
+        ldr   x1, =paging            /* Explicit vaddr, not RIP-relative */
+        mrs   x0, SCTLR_EL2
+        orr   x0, x0, #SCTLR_M       /* Enable MMU */
+        orr   x0, x0, #SCTLR_C       /* Enable D-cache */
+        dsb   sy                     /* Flush PTE writes and finish reads */
+        msr   SCTLR_EL2, x0          /* now paging is enabled */
+        isb                          /* Now, flush the icache */
+        br    x1                     /* Get a proper vaddr into PC */
+paging:
+
+#ifdef EARLY_UART_ADDRESS
+        /* Use a virtual address to access the UART. */
+        ldr   x23, =FIXMAP_ADDR(FIXMAP_CONSOLE)
+#endif
+
+        PRINT("- Ready -\r\n")
+
+        /* The boot CPU should go straight into C now */
+        cbz   x22, launch
+
+        /* Non-boot CPUs need to move on to the relocated pagetables */
+        ldr   x4, =boot_ttbr         /* VA of TTBR0_EL2 stashed by CPU 0 */
+        add   x4, x4, x20            /* PA of it */
+        ldr   x4, [x4]               /* Actual value */
+        dsb   sy
+        msr   TTBR0_EL2, x4
+        dsb   sy
+        isb
+        tlbi  alle2
+        dsb   sy                     /* Ensure completion of TLB flush */
+        isb
+
+        /* Non-boot CPUs report that they've got this far */
+        ldr   x0, =ready_cpus
+1:      ldaxr x1, [x0]               /*            { read # of ready CPUs } */
+        add   x1, x1, #1             /* Atomically { ++                   } */
+        stlxr w2, x1, [x0]           /*            { writeback            } */
+        cbnz  w2, 1b
+        dsb   sy
+        dc    cvac, x0               /* Flush D-Cache */
+        dsb   sy
+
+        /* Here, the non-boot CPUs must wait again -- they're now running on
+         * the boot CPU's pagetables so it's safe for the boot CPU to
+         * overwrite the non-relocated copy of Xen.  Once it's done that,
+         * and brought up the memory allocator, non-boot CPUs can get their
+         * own stacks and enter C. */
+1:      wfe
+        dsb   sy
+        ldr   x0, =smp_up_cpu
+        ldr   x1, [x0]               /* Which CPU is being booted? */
+        cmp   x1, x12                /* Is it us? */
+        b.ne  1b
+
+launch:
+        ldr   x0, =init_stack        /* Find the boot-time stack */
+        ldr   x0, [x0]
+        add   x0, x0, #STACK_SIZE    /* (which grows down from the top). */
+        sub   x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */
+        mov   sp, x0
+
+        mov   x0, x20                /* Marshal args: - phys_offset */
+        mov   x1, x21                /*               - FDT */
+        mov   x2, x22                /*               - CPU ID */
+        cbz   x22, start_xen         /* and disappear into the land of C */
+        b     start_secondary        /* (to the appropriate entry point) */
+
+/* Fail-stop
+ * r0: string explaining why */
+fail:   PRINT("- Boot failed -\r\n")
+1:      wfe
+        b     1b
+
+#ifdef EARLY_UART_ADDRESS
+
+/* Bring up the UART. Specific to the PL011 UART.
+ * Clobbers r0-r2 */
+init_uart:
+        mov   x1, #0x0
+        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor fraction) */
+        mov   x1, #0x4               /* 7.3728MHz / 0x4 == 16 * 115200 */
+        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor integer) */
+        mov   x1, #0x60              /* 8n1 */
+        strh  w1, [x23, #0x24]       /* -> UARTLCR_H (Line control) */
+        ldr   x1, =0x00000301        /* RXE | TXE | UARTEN */
+        strh  w1, [x23, #0x30]       /* -> UARTCR (Control Register) */
+        adr   x0, 1f
+        b     puts
+1:      .asciz "- UART enabled -\r\n"
+        .align 4
+
+/* Print early debug messages.  Specific to the PL011 UART.
+ * r0: Nul-terminated string to print.
+ * Clobbers r0-r2 */
+puts:
+        ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
+        tst   w2, #0x8               /* Check BUSY bit */
+        b.ne  puts                   /* Wait for the UART to be ready */
+        ldrb  w2, [x0], #1           /* Load next char */
+        cbz   w2, 1f                 /* Exit on nul */
+        str   w2, [x23]              /* -> UARTDR (Data Register) */
+        b     puts
+1:
+        ret
+
+/* Print a 32-bit number in hex.  Specific to the PL011 UART.
+ * r0: Number to print.
+ * clobbers r0-r3 */
+putn:
+        adr   x1, hex
+        mov   x3, #8
+1:      ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
+        tst   w2, #0x8               /* Check BUSY bit */
+        b.ne  1b                     /* Wait for the UART to be ready */
+        and   x2, x0, #0xf0000000    /* Mask off the top nybble */
+        lsr   x2, x2, #28
+        ldrb  w2, [x1, x2]           /* Convert to a char */
+        strb  w2, [x23]              /* -> UARTDR (Data Register) */
+        lsl   x0, x0, #4             /* Roll it through one nybble at a time */
+        subs  x3, x3, #1
+        b.ne  1b
+        ret
+
+hex:    .ascii "0123456789abcdef"
+        .align 2
+
+#else  /* EARLY_UART_ADDRESS */
+
+init_uart:
+.global early_puts
+early_puts:
+puts:
+putn:   mov   pc, lr
+
+#endif /* EARLY_UART_ADDRESS */
diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S
new file mode 100644
index 0000000..4c38181
--- /dev/null
+++ b/xen/arch/arm/arm64/mode_switch.S
@@ -0,0 +1,83 @@
+/*
+ * xen/arch/arm/arm64/mode_switch.S
+ *
+ * Start-of day code to take a CPU from EL3 to EL2. Largely taken from
+ *       bootwrapper.
+ *
+ * Ian Campbell <ian.campbell@citrix.com>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/config.h>
+#include <asm/page.h>
+#include <asm/asm_defns.h>
+
+/* Get up a CPU into EL2.  Clobbers x0-x3.
+ *
+ * Expects x22 == CPU number
+ * Expects x30  == EL2 entry point
+ *
+ * This code is specific to the VE model, and not intended to be used
+ * on production systems.  As such it's a bit hackier than the main
+ * boot code in head.S.  In future it will be replaced by better
+ * integration with the bootloader/firmware so that Xen always starts
+ * at EL2.
+ */
+
+.globl enter_el2_mode
+enter_el2_mode:
+        mov     x0, #0x30                       // RES1
+        orr     x0, x0, #(1 << 0)               // Non-secure EL1
+        orr     x0, x0, #(1 << 8)               // HVC enable
+        orr     x0, x0, #(1 << 10)              // 64-bit EL2
+        msr     scr_el3, x0
+
+        msr     cptr_el3, xzr                   // Disable copro. traps to EL3
+
+        ldr     x0, =0x01800000                 // 24Mhz
+        msr     cntfrq_el0, x0
+
+        /*
+         * Check for the primary CPU to avoid a race on the distributor
+         * registers.
+         */
+        cbnz    x22, 1f
+
+        ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR
+        mov     w0, #3                          // EnableGrp0 | EnableGrp1
+        str     w0, [x1]
+
+1:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR
+        mov     w0, #~0                         // Grp1 interrupts
+        str     w0, [x1], #4
+        b.ne    2f                              // Only local interrupts for secondary CPUs
+        str     w0, [x1], #4
+        str     w0, [x1], #4
+
+2:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR
+        ldr     w0, [x1]
+        mov     w0, #3                          // EnableGrp0 | EnableGrp1
+        str     w0, [x1]
+
+        mov     w0, #1 << 7                     // allow NS access to GICC_PMR
+        str     w0, [x1, #4]                    // GICC_PMR
+
+        msr     sctlr_el2, xzr
+
+        /*
+         * Prepare the switch to the EL2_SP1 mode from EL3
+         */
+        msr     elr_el3, x30                    // Return to desired function
+        mov     x1, #0x3c9                      // EL2_SP1 | D | A | I | F
+        msr     spsr_el3, x1
+        eret
diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index 410d7db..b1f0a78 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -11,7 +11,13 @@
 
 ENTRY(start)
 
-OUTPUT_ARCH(arm)
+#if defined(__arm__)
+#define FORMAT arm
+#elif defined(__aarch64__)
+#define FORMAT aarch64
+#endif
+
+OUTPUT_ARCH(FORMAT)
 
 PHDRS
 {
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 9acd0af..e0a636f 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -38,6 +38,7 @@
  */
 #define MAIR0VAL 0xeeaa4400
 #define MAIR1VAL 0xff000004
+#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32)
 
 /*
  * Attribute Indexes.
diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
index 8dd9062..dc12524 100644
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -174,6 +174,8 @@ typedef uint64_t xen_callback_t;
 
 /* 0-4: Mode */
 #define PSR_MODE_MASK 0x1f
+
+/* 32 bit modes */
 #define PSR_MODE_USR 0x10
 #define PSR_MODE_FIQ 0x11
 #define PSR_MODE_IRQ 0x12
@@ -184,6 +186,18 @@ typedef uint64_t xen_callback_t;
 #define PSR_MODE_UND 0x1b
 #define PSR_MODE_SYS 0x1f
 
+/* 64 bit modes */
+#ifdef CONFIG_ARM_64
+#define PSR_MODE_BIT  0x10 /* Set iff AArch32 */
+#define PSR_MODE_EL3h 0x0d
+#define PSR_MODE_EL3t 0x0c
+#define PSR_MODE_EL2h 0x09
+#define PSR_MODE_EL2t 0x08
+#define PSR_MODE_EL1h 0x05
+#define PSR_MODE_EL1t 0x04
+#define PSR_MODE_EL0t 0x00
+#endif
+
 #define PSR_THUMB       (1<<5)        /* Thumb Mode enable */
 #define PSR_FIQ_MASK    (1<<6)        /* Fast Interrupt mask */
 #define PSR_IRQ_MASK    (1<<7)        /* Interrupt mask */
diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h
index 5538d8e..cc8b5fd 100644
--- a/xen/include/public/hvm/save.h
+++ b/xen/include/public/hvm/save.h
@@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
 
 #if defined(__i386__) || defined(__x86_64__)
 #include "../arch-x86/hvm/save.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
 #include "../arch-arm/hvm/save.h"
 #else
 #error "unsupported architecture"
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 846f446..a1927c0 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -31,7 +31,7 @@
 
 #if defined(__i386__) || defined(__x86_64__)
 #include "arch-x86/xen.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined (__aarch64__)
 #include "arch-arm.h"
 #else
 #error "Unsupported architecture"
diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
index e8f6508..218bb18 100644
--- a/xen/include/xen/libelf.h
+++ b/xen/include/xen/libelf.h
@@ -23,7 +23,7 @@
 #ifndef __XEN_LIBELF_H__
 #define __XEN_LIBELF_H__
 
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 #define XEN_ELF_LITTLE_ENDIAN
 #else
 #error define architectural endianness
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 16:02         ` Ian Campbell
@ 2013-02-21 16:07           ` Tim Deegan
  2013-02-21 16:08             ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 16:07 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Stefano Stabellini, xen-devel

At 16:02 +0000 on 21 Feb (1361462564), Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:36 +0000, Tim Deegan wrote:
> > At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote:
> > > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> > > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > > > ---
> > > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> > > > >     restoring state.
> > > > 
> > > > You don't seem to have addressed my other comments on v1:
> > > 
> > > I've got them in v3, I noted that I hadn't addresses you comment on this
> > > patch in the #0/46.
> > 
> > So you did; I did read the 0/46, but for some reason all that stuck in
> > my head was the WFE stuff. 
> > 
> > AFAICS you just need to re-roll this and #25,
> 
> I don't think you mean #25? That is "xen: arm64: add guest type to
> domain field." which you've acked.

Sorry, I meant #5.

> When I'm applying my own patches I prefer to do it from the list rather
> than short cutting them from my own tree, keep me honest/from making
> mistakes. How about I include an index of acked/unacked patches in the
> zeroeth mail? You ought to be able to just mark it all as read.

Fair enough -- no need to index them; I just keep all previous versions
of a series around so I can easily find the comments.  But this won't
need a v4, will it? :)

Tim.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
  2013-02-21 16:03       ` Ian Campbell
@ 2013-02-21 16:08         ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 16:08 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Stefano Stabellini, xen-devel

At 16:03 +0000 on 21 Feb (1361462587), Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:26 +0000, Ian Campbell wrote:
> > On Thu, 2013-02-21 at 14:56 +0000, Tim Deegan wrote:
> > > At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote:
> > > > +2:      PRINT("- Started in Hyp mode -\r\n")
> > > > +
> > > > +hyp:
> > > 
> > > I though we were going to use "EL3" instead of "Hyp".
> > 
> > Sorry, looks like I missed a few comments when I went through this one.
> 
> s/EL3/EL2/g
> 
> This is what I have now:
> 
> 8<-----------------------------------------
> 
> From e4587a06df0d04ccbfd04ec7cc371900fe7dabf4 Mon Sep 17 00:00:00 2001
> From: Ian Campbell <ian.campbell@citrix.com>
> Date: Mon, 10 Dec 2012 14:19:00 +0000
> Subject: [PATCH] xen: arm64: initial build + config changes, start of day code
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

> ---
> v3: - s/hyp/el2/
>     - remove dead code
>     - fix comment formatting
> v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of
>       defining in head.S
>     - Nuke hard tabs in head.S and mode_switch.S with expand(1)
> ---
>  Config.mk                        |    2 +-
>  config/arm64.mk                  |   12 ++
>  xen/arch/arm/Makefile            |    1 +
>  xen/arch/arm/Rules.mk            |    6 +
>  xen/arch/arm/arm64/Makefile      |    1 +
>  xen/arch/arm/arm64/head.S        |  393 ++++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/arm64/mode_switch.S |   83 ++++++++
>  xen/arch/arm/xen.lds.S           |    8 +-
>  xen/include/asm-arm/page.h       |    1 +
>  xen/include/public/arch-arm.h    |   14 ++
>  xen/include/public/hvm/save.h    |    2 +-
>  xen/include/public/xen.h         |    2 +-
>  xen/include/xen/libelf.h         |    2 +-
>  13 files changed, 522 insertions(+), 5 deletions(-)
>  create mode 100644 config/arm64.mk
>  create mode 100644 xen/arch/arm/arm64/Makefile
>  create mode 100644 xen/arch/arm/arm64/head.S
>  create mode 100644 xen/arch/arm/arm64/mode_switch.S
> 
> diff --git a/Config.mk b/Config.mk
> index 64541c8..ea64925 100644
> --- a/Config.mk
> +++ b/Config.mk
> @@ -15,7 +15,7 @@ debug_symbols ?= $(debug)
>  
>  XEN_COMPILE_ARCH    ?= $(shell uname -m | sed -e s/i.86/x86_32/ \
>                           -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \
> -                         -e s/armv7.*/arm32/)
> +                         -e s/armv7.*/arm32/ -e s/armv8.*/arm64/)
>  
>  XEN_TARGET_ARCH     ?= $(XEN_COMPILE_ARCH)
>  XEN_OS              ?= $(shell uname -s)
> diff --git a/config/arm64.mk b/config/arm64.mk
> new file mode 100644
> index 0000000..b2457eb
> --- /dev/null
> +++ b/config/arm64.mk
> @@ -0,0 +1,12 @@
> +CONFIG_ARM := y
> +CONFIG_ARM_64 := y
> +CONFIG_ARM_$(XEN_OS) := y
> +
> +CFLAGS += #-marm -march= -mcpu= etc
> +
> +HAS_PL011 := y
> +
> +# Use only if calling $(LD) directly.
> +LDFLAGS_DIRECT += -maarch64elf
> +
> +CONFIG_LOAD_ADDRESS ?= 0x80000000
> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
> index f2822f2..7ff67c7 100644
> --- a/xen/arch/arm/Makefile
> +++ b/xen/arch/arm/Makefile
> @@ -1,4 +1,5 @@
>  subdir-$(arm32) += arm32
> +subdir-$(arm64) += arm64
>  
>  obj-y += early_printk.o
>  obj-y += domain.o
> diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk
> index 5b5768a..29b605d 100644
> --- a/xen/arch/arm/Rules.mk
> +++ b/xen/arch/arm/Rules.mk
> @@ -26,6 +26,12 @@ arm32 := y
>  arm64 := n
>  endif
>  
> +ifeq ($(TARGET_SUBARCH),arm64)
> +CFLAGS += -mcpu=generic
> +arm32 := n
> +arm64 := y
> +endif
> +
>  ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n)
>  CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE
>  endif
> diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
> new file mode 100644
> index 0000000..dffbeb1
> --- /dev/null
> +++ b/xen/arch/arm/arm64/Makefile
> @@ -0,0 +1 @@
> +obj-y += mode_switch.o
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> new file mode 100644
> index 0000000..b7ab251
> --- /dev/null
> +++ b/xen/arch/arm/arm64/head.S
> @@ -0,0 +1,393 @@
> +/*
> + * xen/arch/arm/head.S
> + *
> + * Start-of-day code for an ARMv8.
> + *
> + * Ian Campbell <ian.campbell@citrix.com>
> + * Copyright (c) 2012 Citrix Systems.
> + *
> + * Based on ARMv7-A head.S by
> + * Tim Deegan <tim@xen.org>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +
> +#include <asm/config.h>
> +#include <asm/page.h>
> +#include <asm/asm_defns.h>
> +
> +#define PT_PT     0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */
> +#define PT_MEM    0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */
> +#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
> +#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
> +
> +/* Macro to print a string to the UART, if there is one.
> + * Clobbers r0-r3. */
> +#ifdef EARLY_UART_ADDRESS
> +#define PRINT(_s)       \
> +        adr   x0, 98f ; \
> +        bl    puts    ; \
> +        b     99f     ; \
> +98:     .asciz _s     ; \
> +        .align 2      ; \
> +99:
> +#else
> +#define PRINT(s)
> +#endif
> +
> +        /*.aarch64*/
> +
> +        /*
> +         * Kernel startup entry point.
> +         * ---------------------------
> +         *
> +         * The requirements are:
> +         *   MMU = off, D-cache = off, I-cache = on or off,
> +         *   x0 = physical address to the FDT blob.
> +         *
> +         * This must be the very first address in the loaded image.
> +         * It should be linked at XEN_VIRT_START, and loaded at any
> +         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
> +         * or the initial pagetable code below will need adjustment.
> +         */
> +
> +        .global start
> +start:
> +        /*
> +         * DO NOT MODIFY. Image header expected by Linux boot-loaders.
> +         */
> +        b       real_start           /* branch to kernel start, magic */
> +        .long   0                    /* reserved */
> +        .quad   0                    /* Image load offset from start of RAM */
> +        .quad   0                    /* reserved */
> +        .quad   0                    /* reserved */
> +
> +real_start:
> +        msr   DAIFSet, 0xf           /* Disable all interrupts */
> +
> +        /* Save the bootloader arguments in less-clobberable registers */
> +        mov   x21, x0                /* x21 := DTB, physical address  */
> +
> +        /* Find out where we are */
> +        ldr   x0, =start
> +        adr   x19, start             /* x19 := paddr (start) */
> +        sub   x20, x19, x0           /* x20 := phys-offset */
> +
> +        /* Using the DTB in the .dtb section? */
> +#ifdef CONFIG_DTB_FILE
> +        ldr   x21, =_sdtb
> +        add   x21, x21, x20          /* x21 := paddr(DTB) */
> +#endif
> +
> +        /* Are we the boot CPU? */
> +        mov   x22, #0                /* x22 := CPU ID */
> +        mrs   x0, mpidr_el1
> +        tbz   x0, 31, boot_cpu       /* Multiprocessor extension supported? */
> +        tbnz  x0, 30, boot_cpu       /* Uniprocessor system? */
> +
> +        mov   x13, #(0xff << 24)
> +        bics  x22, x0, x13           /* Mask out flags to get CPU ID */
> +        b.eq  boot_cpu               /* If we're CPU 0, boot now */
> +
> +        /* Non-boot CPUs wait here to be woken up one at a time. */
> +1:      dsb   sy
> +        ldr   x0, =smp_up_cpu        /* VA of gate */
> +        add   x0, x0, x20            /* PA of gate */
> +        ldr   x1, [x0]               /* Which CPU is being booted? */
> +        cmp   x1, x22                /* Is it us? */
> +        b.eq  2f
> +        wfe
> +        b     1b
> +2:
> +
> +boot_cpu:
> +#ifdef EARLY_UART_ADDRESS
> +        ldr   x23, =EARLY_UART_ADDRESS  /* x23 := UART base address */
> +        cbnz  x22, 1f
> +        bl    init_uart                 /* CPU 0 sets up the UART too */
> +1:      PRINT("- CPU ")
> +        mov   x0, x22
> +        bl    putn
> +        PRINT(" booting -\r\n")
> +#endif
> +
> +        PRINT("- Current EL ")
> +        mrs   x0, CurrentEL
> +        bl    putn
> +        PRINT(" -\r\n")
> +
> +        /* Are we in EL3 */
> +        mrs   x0, CurrentEL
> +        cmp   x0, #PSR_MODE_EL3t
> +        ccmp  x0, #PSR_MODE_EL3h, #0x4, ne
> +        b.eq  1f /* Yes */
> +
> +        /* Are we in EL2 */
> +        cmp   x0, #PSR_MODE_EL2t
> +        ccmp  x0, #PSR_MODE_EL2h, #0x4, ne
> +        b.eq  2f /* Yes */
> +
> +        /* Otherwise, it must have been EL0 or EL1 */
> +        PRINT("- CPU is not in EL3 or EL2 -\r\n")
> +        b     fail
> +
> +1:      PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n")
> +        ldr   x1, =enter_el2_mode    /* VA of function */
> +        add   x1, x1, x20            /* PA of function */
> +        adr   x30, el2               /* Set return address for call */
> +        br    x1                     /* Call function */
> +
> +2:      PRINT("- Started in EL2 mode -\r\n")
> +
> +el2:
> +        /* Zero BSS On the boot CPU to avoid nasty surprises */
> +        cbnz  x22, skip_bss
> +
> +        PRINT("- Zero BSS -\r\n")
> +        ldr   x0, =__bss_start       /* Load start & end of bss */
> +        ldr   x1, =__bss_end
> +        add   x0, x0, x20            /* Apply physical offset */
> +        add   x1, x1, x20
> +
> +1:      str   xzr, [x0], #8
> +        cmp   x0, x1
> +        b.lo  1b
> +
> +skip_bss:
> +
> +        PRINT("- Setting up control registers -\r\n")
> +
> +        /* Set up memory attribute type tables */
> +        ldr   x0, =MAIRVAL
> +        msr   mair_el2, x0
> +
> +        /* Set up the HTCR:
> +         * PASize -- 4G
> +         * Top byte is used
> +         * PT walks use Outer-Shareable accesses,
> +         * PT walks are write-back, no-write-allocate in both cache levels,
> +         * Full 64-bit address space goes through this table. */
> +        ldr   x0, =0x80802500
> +        msr   tcr_el2, x0
> +
> +        /* Set up the HSCTLR:
> +         * Exceptions in LE ARM,
> +         * Low-latency IRQs disabled,
> +         * Write-implies-XN disabled (for now),
> +         * D-cache disabled (for now),
> +         * I-cache enabled,
> +         * Alignment checking enabled,
> +         * MMU translation disabled (for now). */
> +        ldr   x0, =(HSCTLR_BASE|SCTLR_A)
> +        msr   SCTLR_EL2, x0
> +
> +        /* Write Xen's PT's paddr into the HTTBR */
> +        ldr   x4, =xen_pgtable
> +        add   x4, x4, x20            /* x4 := paddr (xen_pagetable) */
> +        msr   TTBR0_EL2, x4
> +
> +        /* Non-boot CPUs don't need to rebuild the pagetable */
> +        cbnz  x22, pt_ready
> +
> +        ldr   x1, =xen_first
> +        add   x1, x1, x20            /* x1 := paddr (xen_first) */
> +        mov   x3, #PT_PT             /* x2 := table map of xen_first */
> +        orr   x2, x1, x3             /* (+ rights for linear PT) */
> +        str   x2, [x4, #0]           /* Map it in slot 0 */
> +
> +        mov   x4, x1                 /* Next level into xen_first */
> +
> +       /* console fixmap */
> +#ifdef EARLY_UART_ADDRESS
> +        ldr   x1, =xen_fixmap
> +        add   x1, x1, x20            /* x1 := paddr (xen_fixmap) */
> +        lsr   x2, x23, #12
> +        lsl   x2, x2, #12            /* 4K aligned paddr of UART */
> +        mov   x3, #PT_DEV_L3
> +        orr   x2, x2, x3             /* x2 := 4K dev map including UART */
> +        str   x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap's slot */
> +#endif
> +
> +        /* Build the baseline idle pagetable's first-level entries */
> +        ldr   x1, =xen_second
> +        add   x1, x1, x20            /* x1 := paddr (xen_second) */
> +        mov   x3, #PT_PT             /* x2 := table map of xen_second */
> +        orr   x2, x1, x3             /* (+ rights for linear PT) */
> +        str   x2, [x4, #0]           /* Map it in slot 0 */
> +        add   x2, x2, #0x1000
> +        str   x2, [x4, #8]           /* Map 2nd page in slot 1 */
> +        add   x2, x2, #0x1000
> +        str   x2, [x4, #16]          /* Map 3rd page in slot 2 */
> +        add   x2, x2, #0x1000
> +        str   x2, [x4, #24]          /* Map 4th page in slot 3 */
> +
> +        /* Now set up the second-level entries */
> +        mov   x3, #PT_MEM
> +        orr   x2, x19, x3            /* x2 := 2MB normal map of Xen */
> +        orr   x4, xzr, x19, lsr #18
> +        str   x2, [x1, x4]           /* Map Xen there */
> +        ldr   x4, =start
> +        lsr   x4, x4, #18            /* Slot for vaddr(start) */
> +        str   x2, [x1, x4]           /* Map Xen there too */
> +
> +        /* xen_fixmap pagetable */
> +        ldr   x2, =xen_fixmap
> +        add   x2, x2, x20            /* x2 := paddr (xen_fixmap) */
> +        mov   x3, #PT_PT
> +        orr   x2, x2, x3             /* x2 := table map of xen_fixmap */
> +        add   x4, x4, #8
> +        str   x2, [x1, x4]           /* Map it in the fixmap's slot */
> +
> +        lsr   x2, x21, #21
> +        lsl   x2, x2, #21            /* 2MB-aligned paddr of DTB */
> +        mov   x3, #PT_MEM            /* x2 := 2MB RAM incl. DTB */
> +        orr   x2, x2, x3
> +        add   x4, x4, #8
> +        str   x2, [x1, x4]           /* Map it in the early boot slot */
> +
> +pt_ready:
> +        PRINT("- Turning on paging -\r\n")
> +
> +        ldr   x1, =paging            /* Explicit vaddr, not RIP-relative */
> +        mrs   x0, SCTLR_EL2
> +        orr   x0, x0, #SCTLR_M       /* Enable MMU */
> +        orr   x0, x0, #SCTLR_C       /* Enable D-cache */
> +        dsb   sy                     /* Flush PTE writes and finish reads */
> +        msr   SCTLR_EL2, x0          /* now paging is enabled */
> +        isb                          /* Now, flush the icache */
> +        br    x1                     /* Get a proper vaddr into PC */
> +paging:
> +
> +#ifdef EARLY_UART_ADDRESS
> +        /* Use a virtual address to access the UART. */
> +        ldr   x23, =FIXMAP_ADDR(FIXMAP_CONSOLE)
> +#endif
> +
> +        PRINT("- Ready -\r\n")
> +
> +        /* The boot CPU should go straight into C now */
> +        cbz   x22, launch
> +
> +        /* Non-boot CPUs need to move on to the relocated pagetables */
> +        ldr   x4, =boot_ttbr         /* VA of TTBR0_EL2 stashed by CPU 0 */
> +        add   x4, x4, x20            /* PA of it */
> +        ldr   x4, [x4]               /* Actual value */
> +        dsb   sy
> +        msr   TTBR0_EL2, x4
> +        dsb   sy
> +        isb
> +        tlbi  alle2
> +        dsb   sy                     /* Ensure completion of TLB flush */
> +        isb
> +
> +        /* Non-boot CPUs report that they've got this far */
> +        ldr   x0, =ready_cpus
> +1:      ldaxr x1, [x0]               /*            { read # of ready CPUs } */
> +        add   x1, x1, #1             /* Atomically { ++                   } */
> +        stlxr w2, x1, [x0]           /*            { writeback            } */
> +        cbnz  w2, 1b
> +        dsb   sy
> +        dc    cvac, x0               /* Flush D-Cache */
> +        dsb   sy
> +
> +        /* Here, the non-boot CPUs must wait again -- they're now running on
> +         * the boot CPU's pagetables so it's safe for the boot CPU to
> +         * overwrite the non-relocated copy of Xen.  Once it's done that,
> +         * and brought up the memory allocator, non-boot CPUs can get their
> +         * own stacks and enter C. */
> +1:      wfe
> +        dsb   sy
> +        ldr   x0, =smp_up_cpu
> +        ldr   x1, [x0]               /* Which CPU is being booted? */
> +        cmp   x1, x12                /* Is it us? */
> +        b.ne  1b
> +
> +launch:
> +        ldr   x0, =init_stack        /* Find the boot-time stack */
> +        ldr   x0, [x0]
> +        add   x0, x0, #STACK_SIZE    /* (which grows down from the top). */
> +        sub   x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */
> +        mov   sp, x0
> +
> +        mov   x0, x20                /* Marshal args: - phys_offset */
> +        mov   x1, x21                /*               - FDT */
> +        mov   x2, x22                /*               - CPU ID */
> +        cbz   x22, start_xen         /* and disappear into the land of C */
> +        b     start_secondary        /* (to the appropriate entry point) */
> +
> +/* Fail-stop
> + * r0: string explaining why */
> +fail:   PRINT("- Boot failed -\r\n")
> +1:      wfe
> +        b     1b
> +
> +#ifdef EARLY_UART_ADDRESS
> +
> +/* Bring up the UART. Specific to the PL011 UART.
> + * Clobbers r0-r2 */
> +init_uart:
> +        mov   x1, #0x0
> +        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor fraction) */
> +        mov   x1, #0x4               /* 7.3728MHz / 0x4 == 16 * 115200 */
> +        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor integer) */
> +        mov   x1, #0x60              /* 8n1 */
> +        strh  w1, [x23, #0x24]       /* -> UARTLCR_H (Line control) */
> +        ldr   x1, =0x00000301        /* RXE | TXE | UARTEN */
> +        strh  w1, [x23, #0x30]       /* -> UARTCR (Control Register) */
> +        adr   x0, 1f
> +        b     puts
> +1:      .asciz "- UART enabled -\r\n"
> +        .align 4
> +
> +/* Print early debug messages.  Specific to the PL011 UART.
> + * r0: Nul-terminated string to print.
> + * Clobbers r0-r2 */
> +puts:
> +        ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
> +        tst   w2, #0x8               /* Check BUSY bit */
> +        b.ne  puts                   /* Wait for the UART to be ready */
> +        ldrb  w2, [x0], #1           /* Load next char */
> +        cbz   w2, 1f                 /* Exit on nul */
> +        str   w2, [x23]              /* -> UARTDR (Data Register) */
> +        b     puts
> +1:
> +        ret
> +
> +/* Print a 32-bit number in hex.  Specific to the PL011 UART.
> + * r0: Number to print.
> + * clobbers r0-r3 */
> +putn:
> +        adr   x1, hex
> +        mov   x3, #8
> +1:      ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
> +        tst   w2, #0x8               /* Check BUSY bit */
> +        b.ne  1b                     /* Wait for the UART to be ready */
> +        and   x2, x0, #0xf0000000    /* Mask off the top nybble */
> +        lsr   x2, x2, #28
> +        ldrb  w2, [x1, x2]           /* Convert to a char */
> +        strb  w2, [x23]              /* -> UARTDR (Data Register) */
> +        lsl   x0, x0, #4             /* Roll it through one nybble at a time */
> +        subs  x3, x3, #1
> +        b.ne  1b
> +        ret
> +
> +hex:    .ascii "0123456789abcdef"
> +        .align 2
> +
> +#else  /* EARLY_UART_ADDRESS */
> +
> +init_uart:
> +.global early_puts
> +early_puts:
> +puts:
> +putn:   mov   pc, lr
> +
> +#endif /* EARLY_UART_ADDRESS */
> diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S
> new file mode 100644
> index 0000000..4c38181
> --- /dev/null
> +++ b/xen/arch/arm/arm64/mode_switch.S
> @@ -0,0 +1,83 @@
> +/*
> + * xen/arch/arm/arm64/mode_switch.S
> + *
> + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from
> + *       bootwrapper.
> + *
> + * Ian Campbell <ian.campbell@citrix.com>
> + * Copyright (c) 2012 Citrix Systems.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +
> +#include <asm/config.h>
> +#include <asm/page.h>
> +#include <asm/asm_defns.h>
> +
> +/* Get up a CPU into EL2.  Clobbers x0-x3.
> + *
> + * Expects x22 == CPU number
> + * Expects x30  == EL2 entry point
> + *
> + * This code is specific to the VE model, and not intended to be used
> + * on production systems.  As such it's a bit hackier than the main
> + * boot code in head.S.  In future it will be replaced by better
> + * integration with the bootloader/firmware so that Xen always starts
> + * at EL2.
> + */
> +
> +.globl enter_el2_mode
> +enter_el2_mode:
> +        mov     x0, #0x30                       // RES1
> +        orr     x0, x0, #(1 << 0)               // Non-secure EL1
> +        orr     x0, x0, #(1 << 8)               // HVC enable
> +        orr     x0, x0, #(1 << 10)              // 64-bit EL2
> +        msr     scr_el3, x0
> +
> +        msr     cptr_el3, xzr                   // Disable copro. traps to EL3
> +
> +        ldr     x0, =0x01800000                 // 24Mhz
> +        msr     cntfrq_el0, x0
> +
> +        /*
> +         * Check for the primary CPU to avoid a race on the distributor
> +         * registers.
> +         */
> +        cbnz    x22, 1f
> +
> +        ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR
> +        mov     w0, #3                          // EnableGrp0 | EnableGrp1
> +        str     w0, [x1]
> +
> +1:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR
> +        mov     w0, #~0                         // Grp1 interrupts
> +        str     w0, [x1], #4
> +        b.ne    2f                              // Only local interrupts for secondary CPUs
> +        str     w0, [x1], #4
> +        str     w0, [x1], #4
> +
> +2:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR
> +        ldr     w0, [x1]
> +        mov     w0, #3                          // EnableGrp0 | EnableGrp1
> +        str     w0, [x1]
> +
> +        mov     w0, #1 << 7                     // allow NS access to GICC_PMR
> +        str     w0, [x1, #4]                    // GICC_PMR
> +
> +        msr     sctlr_el2, xzr
> +
> +        /*
> +         * Prepare the switch to the EL2_SP1 mode from EL3
> +         */
> +        msr     elr_el3, x30                    // Return to desired function
> +        mov     x1, #0x3c9                      // EL2_SP1 | D | A | I | F
> +        msr     spsr_el3, x1
> +        eret
> diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
> index 410d7db..b1f0a78 100644
> --- a/xen/arch/arm/xen.lds.S
> +++ b/xen/arch/arm/xen.lds.S
> @@ -11,7 +11,13 @@
>  
>  ENTRY(start)
>  
> -OUTPUT_ARCH(arm)
> +#if defined(__arm__)
> +#define FORMAT arm
> +#elif defined(__aarch64__)
> +#define FORMAT aarch64
> +#endif
> +
> +OUTPUT_ARCH(FORMAT)
>  
>  PHDRS
>  {
> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 9acd0af..e0a636f 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -38,6 +38,7 @@
>   */
>  #define MAIR0VAL 0xeeaa4400
>  #define MAIR1VAL 0xff000004
> +#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32)
>  
>  /*
>   * Attribute Indexes.
> diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
> index 8dd9062..dc12524 100644
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -174,6 +174,8 @@ typedef uint64_t xen_callback_t;
>  
>  /* 0-4: Mode */
>  #define PSR_MODE_MASK 0x1f
> +
> +/* 32 bit modes */
>  #define PSR_MODE_USR 0x10
>  #define PSR_MODE_FIQ 0x11
>  #define PSR_MODE_IRQ 0x12
> @@ -184,6 +186,18 @@ typedef uint64_t xen_callback_t;
>  #define PSR_MODE_UND 0x1b
>  #define PSR_MODE_SYS 0x1f
>  
> +/* 64 bit modes */
> +#ifdef CONFIG_ARM_64
> +#define PSR_MODE_BIT  0x10 /* Set iff AArch32 */
> +#define PSR_MODE_EL3h 0x0d
> +#define PSR_MODE_EL3t 0x0c
> +#define PSR_MODE_EL2h 0x09
> +#define PSR_MODE_EL2t 0x08
> +#define PSR_MODE_EL1h 0x05
> +#define PSR_MODE_EL1t 0x04
> +#define PSR_MODE_EL0t 0x00
> +#endif
> +
>  #define PSR_THUMB       (1<<5)        /* Thumb Mode enable */
>  #define PSR_FIQ_MASK    (1<<6)        /* Fast Interrupt mask */
>  #define PSR_IRQ_MASK    (1<<7)        /* Interrupt mask */
> diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h
> index 5538d8e..cc8b5fd 100644
> --- a/xen/include/public/hvm/save.h
> +++ b/xen/include/public/hvm/save.h
> @@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
>  
>  #if defined(__i386__) || defined(__x86_64__)
>  #include "../arch-x86/hvm/save.h"
> -#elif defined(__arm__)
> +#elif defined(__arm__) || defined(__aarch64__)
>  #include "../arch-arm/hvm/save.h"
>  #else
>  #error "unsupported architecture"
> diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
> index 846f446..a1927c0 100644
> --- a/xen/include/public/xen.h
> +++ b/xen/include/public/xen.h
> @@ -31,7 +31,7 @@
>  
>  #if defined(__i386__) || defined(__x86_64__)
>  #include "arch-x86/xen.h"
> -#elif defined(__arm__)
> +#elif defined(__arm__) || defined (__aarch64__)
>  #include "arch-arm.h"
>  #else
>  #error "Unsupported architecture"
> diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
> index e8f6508..218bb18 100644
> --- a/xen/include/xen/libelf.h
> +++ b/xen/include/xen/libelf.h
> @@ -23,7 +23,7 @@
>  #ifndef __XEN_LIBELF_H__
>  #define __XEN_LIBELF_H__
>  
> -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
> +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
>  #define XEN_ELF_LITTLE_ENDIAN
>  #else
>  #error define architectural endianness
> -- 
> 1.7.2.5
> 
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 27/46] xen: arm: arm64 trap handling.
  2013-02-21 16:07           ` Tim Deegan
@ 2013-02-21 16:08             ` Ian Campbell
  0 siblings, 0 replies; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 16:08 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Stefano Stabellini, xen-devel

On Thu, 2013-02-21 at 16:07 +0000, Tim Deegan wrote:
> At 16:02 +0000 on 21 Feb (1361462564), Ian Campbell wrote:
> > On Thu, 2013-02-21 at 15:36 +0000, Tim Deegan wrote:
> > > At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote:
> > > > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:
> > > > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:
> > > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > > > > ---
> > > > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while
> > > > > >     restoring state.
> > > > > 
> > > > > You don't seem to have addressed my other comments on v1:
> > > > 
> > > > I've got them in v3, I noted that I hadn't addresses you comment on this
> > > > patch in the #0/46.
> > > 
> > > So you did; I did read the 0/46, but for some reason all that stuck in
> > > my head was the WFE stuff. 
> > > 
> > > AFAICS you just need to re-roll this and #25,
> > 
> > I don't think you mean #25? That is "xen: arm64: add guest type to
> > domain field." which you've acked.
> 
> Sorry, I meant #5.

Ah yes, I've just resent that one.

> > When I'm applying my own patches I prefer to do it from the list rather
> > than short cutting them from my own tree, keep me honest/from making
> > mistakes. How about I include an index of acked/unacked patches in the
> > zeroeth mail? You ought to be able to just mark it all as read.
> 
> Fair enough -- no need to index them; I just keep all previous versions
> of a series around so I can easily find the comments.  But this won't
> need a v4, will it? :)

I hope not!

Ian.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
  2013-02-21 16:01   ` Ian Campbell
@ 2013-02-21 16:23     ` Stefano Stabellini
  2013-02-21 16:46       ` Ian Campbell
  0 siblings, 1 reply; 81+ messages in thread
From: Stefano Stabellini @ 2013-02-21 16:23 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Stefano Stabellini, Tim (Xen.org), xen-devel

On Thu, 21 Feb 2013, Ian Campbell wrote:
> On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > Acked-by: Tim Deegan <tim@xen.org>
> > but:
> >         This is mostly a matter of coding taste, so I'd like Stefano's
> >         ack/nack here as well.
> 
> Stefano, any strong opinion?

Are there any concrete benefits in introducing register_t compared to
using unsigned long?


> >  xen/arch/arm/domain_build.c |    2 +-
> >  xen/arch/arm/smpboot.c      |    2 +-
> >  xen/arch/arm/traps.c        |   44 ++++++++++++++++++++++--------------------
> >  xen/arch/arm/vgic.c         |   18 ++++++++--------
> >  xen/arch/arm/vpl011.c       |    6 ++--
> >  xen/arch/arm/vtimer.c       |    6 ++--
> >  xen/include/asm-arm/regs.h  |    2 +-
> >  xen/include/asm-arm/types.h |    4 +++
> >  8 files changed, 45 insertions(+), 39 deletions(-)
> >
> > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> > index 7403f1a..30d014a 100644
> > --- a/xen/arch/arm/domain_build.c
> > +++ b/xen/arch/arm/domain_build.c
> > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo)
> >
> >  static void dtb_load(struct kernel_info *kinfo)
> >  {
> > -    void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr;
> > +    void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr;
> >
> >      raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt));
> >      xfree(kinfo->fdt);
> > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> > index 86379b7..d8eb5d3 100644
> > --- a/xen/arch/arm/smpboot.c
> > +++ b/xen/arch/arm/smpboot.c
> > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
> >      set_processor_id(cpuid);
> >
> >      /* Setup Hyp vector base */
> > -    WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
> > +    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
> >
> >      mmu_init_secondary_cpu();
> >      enable_vfp();
> > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> > index eaf1f52..0299b33 100644
> > --- a/xen/arch/arm/traps.c
> > +++ b/xen/arch/arm/traps.c
> > @@ -68,7 +68,7 @@ static void print_xen_info(void)
> >             debug_build() ? 'y' : 'n', print_tainted(taint_str));
> >  }
> >
> > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> >  {
> >      BUG_ON( !guest_mode(regs) );
> >
> > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> >
> >      switch ( reg ) {
> >      case 0 ... 7: /* Unbanked registers */
> > -        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7));
> > +        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7));
> >          return &regs->r0 + reg;
> >      case 8 ... 12: /* Register banked in FIQ mode */
> > -        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq));
> > +        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq));
> >          if ( fiq_mode(regs) )
> >              return &regs->r8_fiq + reg - 8;
> >          else
> >              return &regs->r8 + reg - 8;
> >      case 13 ... 14: /* Banked SP + LR registers */
> > -        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq));
> > -        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq));
> > -        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc));
> > -        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt));
> > -        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und));
> > +        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq));
> > +        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq));
> > +        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc));
> > +        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt));
> > +        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und));
> >          switch ( regs->cpsr & PSR_MODE_MASK )
> >          {
> >          case PSR_MODE_USR:
> > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs)
> >      printk("GUEST STACK GOES HERE\n");
> >  }
> >
> > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp)
> > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp)
> >
> >  static void show_trace(struct cpu_user_regs *regs)
> >  {
> > -    uint32_t *frame, next, addr, low, high;
> > +    register_t *frame, next, addr, low, high;
> >
> >      printk("Xen call trace:\n   ");
> >
> > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs)
> >      print_symbol(" %s\n   ", regs->pc);
> >
> >      /* Bounds for range of valid frame pointer. */
> > -    low  = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> > +    low  = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> >      high = (low & ~(STACK_SIZE - 1)) +
> >          (STACK_SIZE - sizeof(struct cpu_info));
> >
> > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs)
> >              break;
> >          {
> >              /* Ordinary stack frame. */
> > -            frame = (uint32_t *)next;
> > +            frame = (register_t *)next;
> >              next  = frame[-1];
> >              addr  = frame[0];
> >          }
> > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs)
> >          printk("[<%p>]", _p(addr));
> >          print_symbol(" %s\n   ", addr);
> >
> > -        low = (uint32_t)&frame[1];
> > +        low = (register_t)&frame[1];
> >      }
> >
> >      printk("\n");
> > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs)
> >
> >  void show_stack(struct cpu_user_regs *regs)
> >  {
> > -    uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> > +    register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> >      int i;
> >
> >      if ( guest_mode(regs) )
> > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = {
> >
> >  static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code)
> >  {
> > -    uint32_t reg, *r;
> > +    register_t *r;
> > +    uint32_t reg;
> >      uint32_t domid = current->domain->domain_id;
> >      switch ( code ) {
> >      case 0xe0 ... 0xef:
> >          reg = code - 0xe0;
> >          r = select_user_reg(regs, reg);
> > -        printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n",
> > +        printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n",
> >                 domid, reg, *r, regs->pc);
> >          break;
> >      case 0xfd:
> > -        printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc);
> > +        printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc);
> >          break;
> >      case 0xfe:
> > -        printk("%c", (char)(regs->r0 & 0xff));
> > +        r = select_user_reg(regs, 0);
> > +        printk("%c", (char)(*r & 0xff));
> >          break;
> >      case 0xff:
> >          printk("DOM%d: DEBUG\n", domid);
> > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
> >                         union hsr hsr)
> >  {
> >      struct hsr_cp32 cp32 = hsr.cp32;
> > -    uint32_t *r = select_user_reg(regs, cp32.reg);
> > +    uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg);
> >
> >      if ( !cp32.ccvalid ) {
> >          dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n");
> > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
> >          BUG_ON(!vtimer_emulate(regs, hsr));
> >          break;
> >      default:
> > -        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n",
> > +        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
> >                 cp32.read ? "mrc" : "mcr",
> >                 cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
> >          panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK);
> > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
> >          BUG_ON(!vtimer_emulate(regs, hsr));
> >          break;
> >      default:
> > -        printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n",
> > +        printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
> >                 cp64.read ? "mrrc" : "mcrr",
> >                 cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
> >          panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK);
> > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> > index 39b9775..57147d5 100644
> > --- a/xen/arch/arm/vgic.c
> > +++ b/xen/arch/arm/vgic.c
> > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
> >  {
> >      struct hsr_dabt dabt = info->dabt;
> >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > +    register_t *r = select_user_reg(regs, dabt.reg);
> >      struct vgic_irq_rank *rank;
> >      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
> >      int gicd_reg = REG(offset);
> > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> >  {
> >      struct hsr_dabt dabt = info->dabt;
> >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > +    register_t *r = select_user_reg(regs, dabt.reg);
> >      struct vgic_irq_rank *rank;
> >      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
> >      int gicd_reg = REG(offset);
> > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> >
> >      case GICD_ISPENDR ... GICD_ISPENDRN:
> >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n",
> > +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
> >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
> >          return 0;
> >
> >      case GICD_ICPENDR ... GICD_ICPENDRN:
> >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n",
> > +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
> >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
> >          return 0;
> >
> > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> >
> >      case GICD_SGIR:
> >          if ( dabt.size != 2 ) goto bad_width;
> > -        printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n",
> > +        printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n",
> >                 *r, gicd_reg - GICD_ICFGR);
> >          return 0;
> >
> >      case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
> >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n",
> > +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
> >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
> >          return 0;
> >
> >      case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
> >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n",
> > +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
> >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
> >          return 0;
> >
> > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> >          goto write_ignore;
> >
> >      default:
> > -        printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n",
> > +        printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
> >                 dabt.reg, *r, offset);
> >          return 0;
> >      }
> >
> >  bad_width:
> > -    printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n",
> > +    printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
> >             dabt.size, dabt.reg, *r, offset);
> >      domain_crash_synchronous();
> >      return 0;
> > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
> > index 7dcee90..db5094e 100644
> > --- a/xen/arch/arm/vpl011.c
> > +++ b/xen/arch/arm/vpl011.c
> > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info)
> >  {
> >      struct hsr_dabt dabt = info->dabt;
> >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > +    register_t *r = select_user_reg(regs, dabt.reg);
> >      int offset = (int)(info->gpa - UART0_START);
> >
> >      switch ( offset )
> > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
> >  {
> >      struct hsr_dabt dabt = info->dabt;
> >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > +    register_t *r = select_user_reg(regs, dabt.reg);
> >      int offset = (int)(info->gpa - UART0_START);
> >
> >      switch ( offset )
> > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
> >          /* Silently ignore */
> >          return 1;
> >      default:
> > -        printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n",
> > +        printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n",
> >                 dabt.reg, *r, offset);
> >          domain_crash_synchronous();
> >      }
> > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
> > index 85201b5..291b87e 100644
> > --- a/xen/arch/arm/vtimer.c
> > +++ b/xen/arch/arm/vtimer.c
> > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr)
> >  {
> >      struct vcpu *v = current;
> >      struct hsr_cp32 cp32 = hsr.cp32;
> > -    uint32_t *r = select_user_reg(regs, cp32.reg);
> > +    uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg);
> >      s_time_t now;
> >
> >      switch ( hsr.bits & HSR_CP32_REGS_MASK )
> > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr)
> >  {
> >      struct vcpu *v = current;
> >      struct hsr_cp64 cp64 = hsr.cp64;
> > -    uint32_t *r1 = select_user_reg(regs, cp64.reg1);
> > -    uint32_t *r2 = select_user_reg(regs, cp64.reg2);
> > +    uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1);
> > +    uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2);
> >      uint64_t ticks;
> >      s_time_t now;
> >
> > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
> > index 7486944..a723f92 100644
> > --- a/xen/include/asm-arm/regs.h
> > +++ b/xen/include/asm-arm/regs.h
> > @@ -34,7 +34,7 @@
> >   * Returns a pointer to the given register value in regs, taking the
> >   * processor mode (CPSR) into account.
> >   */
> > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> >
> >  #endif /* __ARM_REGS_H__ */
> >  /*
> > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
> > index d3e16d8..9ca32f1 100644
> > --- a/xen/include/asm-arm/types.h
> > +++ b/xen/include/asm-arm/types.h
> > @@ -41,6 +41,8 @@ typedef u32 vaddr_t;
> >  typedef u64 paddr_t;
> >  #define INVALID_PADDR (~0ULL)
> >  #define PRIpaddr "016llx"
> > +typedef u32 register_t;
> > +#define PRIregister "x"
> >  #elif defined (CONFIG_ARM_64)
> >  typedef signed long s64;
> >  typedef unsigned long u64;
> > @@ -49,6 +51,8 @@ typedef u64 vaddr_t;
> >  typedef u64 paddr_t;
> >  #define INVALID_PADDR (~0UL)
> >  #define PRIpaddr "016lx"
> > +typedef u64 register_t;
> > +#define PRIregister "lx"
> >  #endif
> >
> >  typedef unsigned long size_t;
> > --
> > 1.7.2.5
> >
> 
> 
> 

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 20/46] xen: arm64: add to foreign struct checks
  2013-02-14 16:47 ` [PATCH V2 20/46] xen: arm64: add to foreign struct checks Ian Campbell
@ 2013-02-21 16:33   ` Stefano Stabellini
  0 siblings, 0 replies; 81+ messages in thread
From: Stefano Stabellini @ 2013-02-21 16:33 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Stefano Stabellini, Tim (Xen.org), xen-devel

On Thu, 14 Feb 2013, Ian Campbell wrote:
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>


>  .gitignore                               |    1 +
>  tools/include/xen-foreign/Makefile       |    5 ++++-
>  tools/include/xen-foreign/mkheader.py    |   19 +++++++++++++++++++
>  tools/include/xen-foreign/reference.size |   20 ++++++++++----------
>  tools/include/xen-foreign/structs.py     |    1 +
>  5 files changed, 35 insertions(+), 11 deletions(-)
> 
> diff --git a/.gitignore b/.gitignore
> index 73c5b77..2242344 100644
> --- a/.gitignore
> +++ b/.gitignore
> @@ -364,6 +364,7 @@ tools/include/xen-foreign/structs.pyc
>  tools/include/xen-foreign/x86_32.h
>  tools/include/xen-foreign/x86_64.h
>  tools/include/xen-foreign/arm32.h
> +tools/include/xen-foreign/arm64.h
>  
>  .git
>  tools/misc/xen-hptool
> diff --git a/tools/include/xen-foreign/Makefile b/tools/include/xen-foreign/Makefile
> index 53cc6b4..06b844c 100644
> --- a/tools/include/xen-foreign/Makefile
> +++ b/tools/include/xen-foreign/Makefile
> @@ -3,7 +3,7 @@ include $(XEN_ROOT)/tools/Rules.mk
>  
>  ROOT = $(XEN_ROOT)/xen/include/public
>  
> -architectures := arm32 x86_32 x86_64
> +architectures := arm32 arm64 x86_32 x86_64
>  headers := $(patsubst %, %.h, $(architectures))
>  
>  .PHONY: all clean check-headers
> @@ -25,6 +25,9 @@ check-headers: checker
>  arm32.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h
>  	$(PYTHON) $< $* $@ $(filter %.h,$^)
>  
> +arm64.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h
> +	$(PYTHON) $< $* $@ $(filter %.h,$^)
> +
>  x86_32.h: mkheader.py structs.py $(ROOT)/arch-x86/xen-x86_32.h $(ROOT)/arch-x86/xen.h $(ROOT)/xen.h
>  	$(PYTHON) $< $* $@ $(filter %.h,$^)
>  
> diff --git a/tools/include/xen-foreign/mkheader.py b/tools/include/xen-foreign/mkheader.py
> index b7c34b1..4858687 100644
> --- a/tools/include/xen-foreign/mkheader.py
> +++ b/tools/include/xen-foreign/mkheader.py
> @@ -26,6 +26,22 @@ inttypes["arm32"] = {
>  header["arm32"] = """
>  #define __arm___ARM32 1
>  """;
> +footer["arm32"] = """
> +#undef __DECL_REG
> +"""
> +
> +inttypes["arm64"] = {
> +    "unsigned long" : "__danger_unsigned_long_on_arm64",
> +    "long"          : "__danger_long_on_arm64",
> +    "xen_pfn_t"     : "uint64_t",
> +    "xen_ulong_t"   : "uint64_t",
> +};
> +header["arm64"] = """
> +#define __aarch64___ARM64 1
> +""";
> +footer["arm64"] = """
> +#undef __DECL_REG
> +"""
>  
>  # x86_32
>  inttypes["x86_32"] = {
> @@ -59,6 +75,9 @@ header["x86_64"] = """
>  #endif
>  #define __x86_64___X86_64 1
>  """;
> +footer["x86_64"] = """
> +#undef __DECL_REG
> +"""
>  
>  ###########################################################################
>  # main
> diff --git a/tools/include/xen-foreign/reference.size b/tools/include/xen-foreign/reference.size
> index 0e5529d..7659c64 100644
> --- a/tools/include/xen-foreign/reference.size
> +++ b/tools/include/xen-foreign/reference.size
> @@ -1,13 +1,13 @@
>  
> -structs                   |   arm32  x86_32  x86_64
> +structs                   |   arm32   arm64  x86_32  x86_64
>  
> -start_info                |       -    1112    1168
> -trap_info                 |       -       8      16
> -cpu_user_regs             |     160      68     200
> -vcpu_guest_context        |     180    2800    5168
> -arch_vcpu_info            |       0      24      16
> -vcpu_time_info            |      32      32      32
> -vcpu_info                 |      48      64      64
> -arch_shared_info          |       0     268     280
> -shared_info               |    1088    2584    3368
> +start_info                |       -       -    1112    1168
> +trap_info                 |       -       -       8      16
> +cpu_user_regs             |     160     160      68     200
> +vcpu_guest_context        |     180     180    2800    5168
> +arch_vcpu_info            |       0       0      24      16
> +vcpu_time_info            |      32      32      32      32
> +vcpu_info                 |      48      48      64      64
> +arch_shared_info          |       0       0     268     280
> +shared_info               |    1088    1088    2584    3368
>  
> diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py
> index 51a77c0..5aec2c5 100644
> --- a/tools/include/xen-foreign/structs.py
> +++ b/tools/include/xen-foreign/structs.py
> @@ -14,6 +14,7 @@ structs = [ "start_info",
>              "shared_info" ];
>  
>  defines = [ "__arm__",
> +            "__aarch64__",
>              "__i386__",
>              "__x86_64__",
>  
> -- 
> 1.7.2.5
> 

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
  2013-02-21 16:23     ` Stefano Stabellini
@ 2013-02-21 16:46       ` Ian Campbell
  2013-02-21 16:49         ` Stefano Stabellini
  0 siblings, 1 reply; 81+ messages in thread
From: Ian Campbell @ 2013-02-21 16:46 UTC (permalink / raw)
  To: Stefano Stabellini; +Cc: Tim (Xen.org), xen-devel

On Thu, 2013-02-21 at 16:23 +0000, Stefano Stabellini wrote:
> On Thu, 21 Feb 2013, Ian Campbell wrote:
> > On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > Acked-by: Tim Deegan <tim@xen.org>
> > > but:
> > >         This is mostly a matter of coding taste, so I'd like Stefano's
> > >         ack/nack here as well.
> >
> > Stefano, any strong opinion?
> 
> Are there any concrete benefits in introducing register_t compared to
> using unsigned long?

It decouples us from assuming a compiler where unsigned long is the size
of a register ;-)

In the ARM port we have mostly been trying to define and use fixed size
and/or semantic types (uintXX_t, paddr_t etc) rather than compiler
variant things like int and long.

Ian.
> 
> 
> > >  xen/arch/arm/domain_build.c |    2 +-
> > >  xen/arch/arm/smpboot.c      |    2 +-
> > >  xen/arch/arm/traps.c        |   44 ++++++++++++++++++++++--------------------
> > >  xen/arch/arm/vgic.c         |   18 ++++++++--------
> > >  xen/arch/arm/vpl011.c       |    6 ++--
> > >  xen/arch/arm/vtimer.c       |    6 ++--
> > >  xen/include/asm-arm/regs.h  |    2 +-
> > >  xen/include/asm-arm/types.h |    4 +++
> > >  8 files changed, 45 insertions(+), 39 deletions(-)
> > >
> > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> > > index 7403f1a..30d014a 100644
> > > --- a/xen/arch/arm/domain_build.c
> > > +++ b/xen/arch/arm/domain_build.c
> > > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo)
> > >
> > >  static void dtb_load(struct kernel_info *kinfo)
> > >  {
> > > -    void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr;
> > > +    void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr;
> > >
> > >      raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt));
> > >      xfree(kinfo->fdt);
> > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> > > index 86379b7..d8eb5d3 100644
> > > --- a/xen/arch/arm/smpboot.c
> > > +++ b/xen/arch/arm/smpboot.c
> > > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
> > >      set_processor_id(cpuid);
> > >
> > >      /* Setup Hyp vector base */
> > > -    WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
> > > +    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
> > >
> > >      mmu_init_secondary_cpu();
> > >      enable_vfp();
> > > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> > > index eaf1f52..0299b33 100644
> > > --- a/xen/arch/arm/traps.c
> > > +++ b/xen/arch/arm/traps.c
> > > @@ -68,7 +68,7 @@ static void print_xen_info(void)
> > >             debug_build() ? 'y' : 'n', print_tainted(taint_str));
> > >  }
> > >
> > > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > >  {
> > >      BUG_ON( !guest_mode(regs) );
> > >
> > > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > >
> > >      switch ( reg ) {
> > >      case 0 ... 7: /* Unbanked registers */
> > > -        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7));
> > > +        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7));
> > >          return &regs->r0 + reg;
> > >      case 8 ... 12: /* Register banked in FIQ mode */
> > > -        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq));
> > > +        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq));
> > >          if ( fiq_mode(regs) )
> > >              return &regs->r8_fiq + reg - 8;
> > >          else
> > >              return &regs->r8 + reg - 8;
> > >      case 13 ... 14: /* Banked SP + LR registers */
> > > -        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq));
> > > -        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq));
> > > -        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc));
> > > -        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt));
> > > -        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und));
> > > +        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq));
> > > +        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq));
> > > +        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc));
> > > +        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt));
> > > +        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und));
> > >          switch ( regs->cpsr & PSR_MODE_MASK )
> > >          {
> > >          case PSR_MODE_USR:
> > > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs)
> > >      printk("GUEST STACK GOES HERE\n");
> > >  }
> > >
> > > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp)
> > > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp)
> > >
> > >  static void show_trace(struct cpu_user_regs *regs)
> > >  {
> > > -    uint32_t *frame, next, addr, low, high;
> > > +    register_t *frame, next, addr, low, high;
> > >
> > >      printk("Xen call trace:\n   ");
> > >
> > > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > >      print_symbol(" %s\n   ", regs->pc);
> > >
> > >      /* Bounds for range of valid frame pointer. */
> > > -    low  = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> > > +    low  = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> > >      high = (low & ~(STACK_SIZE - 1)) +
> > >          (STACK_SIZE - sizeof(struct cpu_info));
> > >
> > > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > >              break;
> > >          {
> > >              /* Ordinary stack frame. */
> > > -            frame = (uint32_t *)next;
> > > +            frame = (register_t *)next;
> > >              next  = frame[-1];
> > >              addr  = frame[0];
> > >          }
> > > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > >          printk("[<%p>]", _p(addr));
> > >          print_symbol(" %s\n   ", addr);
> > >
> > > -        low = (uint32_t)&frame[1];
> > > +        low = (register_t)&frame[1];
> > >      }
> > >
> > >      printk("\n");
> > > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > >
> > >  void show_stack(struct cpu_user_regs *regs)
> > >  {
> > > -    uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> > > +    register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> > >      int i;
> > >
> > >      if ( guest_mode(regs) )
> > > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = {
> > >
> > >  static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code)
> > >  {
> > > -    uint32_t reg, *r;
> > > +    register_t *r;
> > > +    uint32_t reg;
> > >      uint32_t domid = current->domain->domain_id;
> > >      switch ( code ) {
> > >      case 0xe0 ... 0xef:
> > >          reg = code - 0xe0;
> > >          r = select_user_reg(regs, reg);
> > > -        printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n",
> > > +        printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n",
> > >                 domid, reg, *r, regs->pc);
> > >          break;
> > >      case 0xfd:
> > > -        printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc);
> > > +        printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc);
> > >          break;
> > >      case 0xfe:
> > > -        printk("%c", (char)(regs->r0 & 0xff));
> > > +        r = select_user_reg(regs, 0);
> > > +        printk("%c", (char)(*r & 0xff));
> > >          break;
> > >      case 0xff:
> > >          printk("DOM%d: DEBUG\n", domid);
> > > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
> > >                         union hsr hsr)
> > >  {
> > >      struct hsr_cp32 cp32 = hsr.cp32;
> > > -    uint32_t *r = select_user_reg(regs, cp32.reg);
> > > +    uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg);
> > >
> > >      if ( !cp32.ccvalid ) {
> > >          dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n");
> > > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
> > >          BUG_ON(!vtimer_emulate(regs, hsr));
> > >          break;
> > >      default:
> > > -        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n",
> > > +        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
> > >                 cp32.read ? "mrc" : "mcr",
> > >                 cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
> > >          panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK);
> > > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
> > >          BUG_ON(!vtimer_emulate(regs, hsr));
> > >          break;
> > >      default:
> > > -        printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n",
> > > +        printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
> > >                 cp64.read ? "mrrc" : "mcrr",
> > >                 cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
> > >          panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK);
> > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> > > index 39b9775..57147d5 100644
> > > --- a/xen/arch/arm/vgic.c
> > > +++ b/xen/arch/arm/vgic.c
> > > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
> > >  {
> > >      struct hsr_dabt dabt = info->dabt;
> > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > >      struct vgic_irq_rank *rank;
> > >      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
> > >      int gicd_reg = REG(offset);
> > > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > >  {
> > >      struct hsr_dabt dabt = info->dabt;
> > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > >      struct vgic_irq_rank *rank;
> > >      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
> > >      int gicd_reg = REG(offset);
> > > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > >
> > >      case GICD_ISPENDR ... GICD_ISPENDRN:
> > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n",
> > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
> > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
> > >          return 0;
> > >
> > >      case GICD_ICPENDR ... GICD_ICPENDRN:
> > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n",
> > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
> > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
> > >          return 0;
> > >
> > > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > >
> > >      case GICD_SGIR:
> > >          if ( dabt.size != 2 ) goto bad_width;
> > > -        printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n",
> > > +        printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n",
> > >                 *r, gicd_reg - GICD_ICFGR);
> > >          return 0;
> > >
> > >      case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
> > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n",
> > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
> > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
> > >          return 0;
> > >
> > >      case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
> > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n",
> > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
> > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
> > >          return 0;
> > >
> > > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > >          goto write_ignore;
> > >
> > >      default:
> > > -        printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n",
> > > +        printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
> > >                 dabt.reg, *r, offset);
> > >          return 0;
> > >      }
> > >
> > >  bad_width:
> > > -    printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n",
> > > +    printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
> > >             dabt.size, dabt.reg, *r, offset);
> > >      domain_crash_synchronous();
> > >      return 0;
> > > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
> > > index 7dcee90..db5094e 100644
> > > --- a/xen/arch/arm/vpl011.c
> > > +++ b/xen/arch/arm/vpl011.c
> > > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info)
> > >  {
> > >      struct hsr_dabt dabt = info->dabt;
> > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > >      int offset = (int)(info->gpa - UART0_START);
> > >
> > >      switch ( offset )
> > > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
> > >  {
> > >      struct hsr_dabt dabt = info->dabt;
> > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > >      int offset = (int)(info->gpa - UART0_START);
> > >
> > >      switch ( offset )
> > > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
> > >          /* Silently ignore */
> > >          return 1;
> > >      default:
> > > -        printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n",
> > > +        printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n",
> > >                 dabt.reg, *r, offset);
> > >          domain_crash_synchronous();
> > >      }
> > > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
> > > index 85201b5..291b87e 100644
> > > --- a/xen/arch/arm/vtimer.c
> > > +++ b/xen/arch/arm/vtimer.c
> > > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr)
> > >  {
> > >      struct vcpu *v = current;
> > >      struct hsr_cp32 cp32 = hsr.cp32;
> > > -    uint32_t *r = select_user_reg(regs, cp32.reg);
> > > +    uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg);
> > >      s_time_t now;
> > >
> > >      switch ( hsr.bits & HSR_CP32_REGS_MASK )
> > > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr)
> > >  {
> > >      struct vcpu *v = current;
> > >      struct hsr_cp64 cp64 = hsr.cp64;
> > > -    uint32_t *r1 = select_user_reg(regs, cp64.reg1);
> > > -    uint32_t *r2 = select_user_reg(regs, cp64.reg2);
> > > +    uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1);
> > > +    uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2);
> > >      uint64_t ticks;
> > >      s_time_t now;
> > >
> > > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
> > > index 7486944..a723f92 100644
> > > --- a/xen/include/asm-arm/regs.h
> > > +++ b/xen/include/asm-arm/regs.h
> > > @@ -34,7 +34,7 @@
> > >   * Returns a pointer to the given register value in regs, taking the
> > >   * processor mode (CPSR) into account.
> > >   */
> > > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> > > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> > >
> > >  #endif /* __ARM_REGS_H__ */
> > >  /*
> > > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
> > > index d3e16d8..9ca32f1 100644
> > > --- a/xen/include/asm-arm/types.h
> > > +++ b/xen/include/asm-arm/types.h
> > > @@ -41,6 +41,8 @@ typedef u32 vaddr_t;
> > >  typedef u64 paddr_t;
> > >  #define INVALID_PADDR (~0ULL)
> > >  #define PRIpaddr "016llx"
> > > +typedef u32 register_t;
> > > +#define PRIregister "x"
> > >  #elif defined (CONFIG_ARM_64)
> > >  typedef signed long s64;
> > >  typedef unsigned long u64;
> > > @@ -49,6 +51,8 @@ typedef u64 vaddr_t;
> > >  typedef u64 paddr_t;
> > >  #define INVALID_PADDR (~0UL)
> > >  #define PRIpaddr "016lx"
> > > +typedef u64 register_t;
> > > +#define PRIregister "lx"
> > >  #endif
> > >
> > >  typedef unsigned long size_t;
> > > --
> > > 1.7.2.5
> > >
> >
> >
> >

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
  2013-02-21 16:46       ` Ian Campbell
@ 2013-02-21 16:49         ` Stefano Stabellini
  0 siblings, 0 replies; 81+ messages in thread
From: Stefano Stabellini @ 2013-02-21 16:49 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Tim (Xen.org), Stefano Stabellini

On Thu, 21 Feb 2013, Ian Campbell wrote:
> On Thu, 2013-02-21 at 16:23 +0000, Stefano Stabellini wrote:
> > On Thu, 21 Feb 2013, Ian Campbell wrote:
> > > On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > > Acked-by: Tim Deegan <tim@xen.org>
> > > > but:
> > > >         This is mostly a matter of coding taste, so I'd like Stefano's
> > > >         ack/nack here as well.
> > >
> > > Stefano, any strong opinion?
> >
> > Are there any concrete benefits in introducing register_t compared to
> > using unsigned long?
> 
> It decouples us from assuming a compiler where unsigned long is the size
> of a register ;-)
> 
> In the ARM port we have mostly been trying to define and use fixed size
> and/or semantic types (uintXX_t, paddr_t etc) rather than compiler
> variant things like int and long.

OK.

Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>


> >
> >
> > > >  xen/arch/arm/domain_build.c |    2 +-
> > > >  xen/arch/arm/smpboot.c      |    2 +-
> > > >  xen/arch/arm/traps.c        |   44 ++++++++++++++++++++++--------------------
> > > >  xen/arch/arm/vgic.c         |   18 ++++++++--------
> > > >  xen/arch/arm/vpl011.c       |    6 ++--
> > > >  xen/arch/arm/vtimer.c       |    6 ++--
> > > >  xen/include/asm-arm/regs.h  |    2 +-
> > > >  xen/include/asm-arm/types.h |    4 +++
> > > >  8 files changed, 45 insertions(+), 39 deletions(-)
> > > >
> > > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> > > > index 7403f1a..30d014a 100644
> > > > --- a/xen/arch/arm/domain_build.c
> > > > +++ b/xen/arch/arm/domain_build.c
> > > > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo)
> > > >
> > > >  static void dtb_load(struct kernel_info *kinfo)
> > > >  {
> > > > -    void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr;
> > > > +    void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr;
> > > >
> > > >      raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt));
> > > >      xfree(kinfo->fdt);
> > > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> > > > index 86379b7..d8eb5d3 100644
> > > > --- a/xen/arch/arm/smpboot.c
> > > > +++ b/xen/arch/arm/smpboot.c
> > > > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset,
> > > >      set_processor_id(cpuid);
> > > >
> > > >      /* Setup Hyp vector base */
> > > > -    WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR);
> > > > +    WRITE_CP32((register_t) hyp_traps_vector, HVBAR);
> > > >
> > > >      mmu_init_secondary_cpu();
> > > >      enable_vfp();
> > > > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> > > > index eaf1f52..0299b33 100644
> > > > --- a/xen/arch/arm/traps.c
> > > > +++ b/xen/arch/arm/traps.c
> > > > @@ -68,7 +68,7 @@ static void print_xen_info(void)
> > > >             debug_build() ? 'y' : 'n', print_tainted(taint_str));
> > > >  }
> > > >
> > > > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > > > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > > >  {
> > > >      BUG_ON( !guest_mode(regs) );
> > > >
> > > > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg)
> > > >
> > > >      switch ( reg ) {
> > > >      case 0 ... 7: /* Unbanked registers */
> > > > -        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7));
> > > > +        BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7));
> > > >          return &regs->r0 + reg;
> > > >      case 8 ... 12: /* Register banked in FIQ mode */
> > > > -        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq));
> > > > +        BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq));
> > > >          if ( fiq_mode(regs) )
> > > >              return &regs->r8_fiq + reg - 8;
> > > >          else
> > > >              return &regs->r8 + reg - 8;
> > > >      case 13 ... 14: /* Banked SP + LR registers */
> > > > -        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq));
> > > > -        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq));
> > > > -        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc));
> > > > -        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt));
> > > > -        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und));
> > > > +        BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq));
> > > > +        BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq));
> > > > +        BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc));
> > > > +        BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt));
> > > > +        BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und));
> > > >          switch ( regs->cpsr & PSR_MODE_MASK )
> > > >          {
> > > >          case PSR_MODE_USR:
> > > > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs)
> > > >      printk("GUEST STACK GOES HERE\n");
> > > >  }
> > > >
> > > > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp)
> > > > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp)
> > > >
> > > >  static void show_trace(struct cpu_user_regs *regs)
> > > >  {
> > > > -    uint32_t *frame, next, addr, low, high;
> > > > +    register_t *frame, next, addr, low, high;
> > > >
> > > >      printk("Xen call trace:\n   ");
> > > >
> > > > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > > >      print_symbol(" %s\n   ", regs->pc);
> > > >
> > > >      /* Bounds for range of valid frame pointer. */
> > > > -    low  = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> > > > +    low  = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/);
> > > >      high = (low & ~(STACK_SIZE - 1)) +
> > > >          (STACK_SIZE - sizeof(struct cpu_info));
> > > >
> > > > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > > >              break;
> > > >          {
> > > >              /* Ordinary stack frame. */
> > > > -            frame = (uint32_t *)next;
> > > > +            frame = (register_t *)next;
> > > >              next  = frame[-1];
> > > >              addr  = frame[0];
> > > >          }
> > > > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > > >          printk("[<%p>]", _p(addr));
> > > >          print_symbol(" %s\n   ", addr);
> > > >
> > > > -        low = (uint32_t)&frame[1];
> > > > +        low = (register_t)&frame[1];
> > > >      }
> > > >
> > > >      printk("\n");
> > > > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs)
> > > >
> > > >  void show_stack(struct cpu_user_regs *regs)
> > > >  {
> > > > -    uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> > > > +    register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr;
> > > >      int i;
> > > >
> > > >      if ( guest_mode(regs) )
> > > > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = {
> > > >
> > > >  static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code)
> > > >  {
> > > > -    uint32_t reg, *r;
> > > > +    register_t *r;
> > > > +    uint32_t reg;
> > > >      uint32_t domid = current->domain->domain_id;
> > > >      switch ( code ) {
> > > >      case 0xe0 ... 0xef:
> > > >          reg = code - 0xe0;
> > > >          r = select_user_reg(regs, reg);
> > > > -        printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n",
> > > > +        printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n",
> > > >                 domid, reg, *r, regs->pc);
> > > >          break;
> > > >      case 0xfd:
> > > > -        printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc);
> > > > +        printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc);
> > > >          break;
> > > >      case 0xfe:
> > > > -        printk("%c", (char)(regs->r0 & 0xff));
> > > > +        r = select_user_reg(regs, 0);
> > > > +        printk("%c", (char)(*r & 0xff));
> > > >          break;
> > > >      case 0xff:
> > > >          printk("DOM%d: DEBUG\n", domid);
> > > > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
> > > >                         union hsr hsr)
> > > >  {
> > > >      struct hsr_cp32 cp32 = hsr.cp32;
> > > > -    uint32_t *r = select_user_reg(regs, cp32.reg);
> > > > +    uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg);
> > > >
> > > >      if ( !cp32.ccvalid ) {
> > > >          dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n");
> > > > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs,
> > > >          BUG_ON(!vtimer_emulate(regs, hsr));
> > > >          break;
> > > >      default:
> > > > -        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n",
> > > > +        printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
> > > >                 cp32.read ? "mrc" : "mcr",
> > > >                 cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
> > > >          panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK);
> > > > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs,
> > > >          BUG_ON(!vtimer_emulate(regs, hsr));
> > > >          break;
> > > >      default:
> > > > -        printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n",
> > > > +        printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
> > > >                 cp64.read ? "mrrc" : "mcrr",
> > > >                 cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
> > > >          panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK);
> > > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> > > > index 39b9775..57147d5 100644
> > > > --- a/xen/arch/arm/vgic.c
> > > > +++ b/xen/arch/arm/vgic.c
> > > > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
> > > >  {
> > > >      struct hsr_dabt dabt = info->dabt;
> > > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > > >      struct vgic_irq_rank *rank;
> > > >      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
> > > >      int gicd_reg = REG(offset);
> > > > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > > >  {
> > > >      struct hsr_dabt dabt = info->dabt;
> > > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > > >      struct vgic_irq_rank *rank;
> > > >      int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS);
> > > >      int gicd_reg = REG(offset);
> > > > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > > >
> > > >      case GICD_ISPENDR ... GICD_ISPENDRN:
> > > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n",
> > > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
> > > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
> > > >          return 0;
> > > >
> > > >      case GICD_ICPENDR ... GICD_ICPENDRN:
> > > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n",
> > > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
> > > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
> > > >          return 0;
> > > >
> > > > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > > >
> > > >      case GICD_SGIR:
> > > >          if ( dabt.size != 2 ) goto bad_width;
> > > > -        printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n",
> > > > +        printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n",
> > > >                 *r, gicd_reg - GICD_ICFGR);
> > > >          return 0;
> > > >
> > > >      case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
> > > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n",
> > > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
> > > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
> > > >          return 0;
> > > >
> > > >      case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
> > > >          if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
> > > > -        printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n",
> > > > +        printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
> > > >                 dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
> > > >          return 0;
> > > >
> > > > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> > > >          goto write_ignore;
> > > >
> > > >      default:
> > > > -        printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n",
> > > > +        printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
> > > >                 dabt.reg, *r, offset);
> > > >          return 0;
> > > >      }
> > > >
> > > >  bad_width:
> > > > -    printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n",
> > > > +    printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
> > > >             dabt.size, dabt.reg, *r, offset);
> > > >      domain_crash_synchronous();
> > > >      return 0;
> > > > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
> > > > index 7dcee90..db5094e 100644
> > > > --- a/xen/arch/arm/vpl011.c
> > > > +++ b/xen/arch/arm/vpl011.c
> > > > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info)
> > > >  {
> > > >      struct hsr_dabt dabt = info->dabt;
> > > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > > >      int offset = (int)(info->gpa - UART0_START);
> > > >
> > > >      switch ( offset )
> > > > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
> > > >  {
> > > >      struct hsr_dabt dabt = info->dabt;
> > > >      struct cpu_user_regs *regs = guest_cpu_user_regs();
> > > > -    uint32_t *r = select_user_reg(regs, dabt.reg);
> > > > +    register_t *r = select_user_reg(regs, dabt.reg);
> > > >      int offset = (int)(info->gpa - UART0_START);
> > > >
> > > >      switch ( offset )
> > > > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info)
> > > >          /* Silently ignore */
> > > >          return 1;
> > > >      default:
> > > > -        printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n",
> > > > +        printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n",
> > > >                 dabt.reg, *r, offset);
> > > >          domain_crash_synchronous();
> > > >      }
> > > > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
> > > > index 85201b5..291b87e 100644
> > > > --- a/xen/arch/arm/vtimer.c
> > > > +++ b/xen/arch/arm/vtimer.c
> > > > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr)
> > > >  {
> > > >      struct vcpu *v = current;
> > > >      struct hsr_cp32 cp32 = hsr.cp32;
> > > > -    uint32_t *r = select_user_reg(regs, cp32.reg);
> > > > +    uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg);
> > > >      s_time_t now;
> > > >
> > > >      switch ( hsr.bits & HSR_CP32_REGS_MASK )
> > > > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr)
> > > >  {
> > > >      struct vcpu *v = current;
> > > >      struct hsr_cp64 cp64 = hsr.cp64;
> > > > -    uint32_t *r1 = select_user_reg(regs, cp64.reg1);
> > > > -    uint32_t *r2 = select_user_reg(regs, cp64.reg2);
> > > > +    uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1);
> > > > +    uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2);
> > > >      uint64_t ticks;
> > > >      s_time_t now;
> > > >
> > > > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
> > > > index 7486944..a723f92 100644
> > > > --- a/xen/include/asm-arm/regs.h
> > > > +++ b/xen/include/asm-arm/regs.h
> > > > @@ -34,7 +34,7 @@
> > > >   * Returns a pointer to the given register value in regs, taking the
> > > >   * processor mode (CPSR) into account.
> > > >   */
> > > > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> > > > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg);
> > > >
> > > >  #endif /* __ARM_REGS_H__ */
> > > >  /*
> > > > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
> > > > index d3e16d8..9ca32f1 100644
> > > > --- a/xen/include/asm-arm/types.h
> > > > +++ b/xen/include/asm-arm/types.h
> > > > @@ -41,6 +41,8 @@ typedef u32 vaddr_t;
> > > >  typedef u64 paddr_t;
> > > >  #define INVALID_PADDR (~0ULL)
> > > >  #define PRIpaddr "016llx"
> > > > +typedef u32 register_t;
> > > > +#define PRIregister "x"
> > > >  #elif defined (CONFIG_ARM_64)
> > > >  typedef signed long s64;
> > > >  typedef unsigned long u64;
> > > > @@ -49,6 +51,8 @@ typedef u64 vaddr_t;
> > > >  typedef u64 paddr_t;
> > > >  #define INVALID_PADDR (~0UL)
> > > >  #define PRIpaddr "016lx"
> > > > +typedef u64 register_t;
> > > > +#define PRIregister "lx"
> > > >  #endif
> > > >
> > > >  typedef unsigned long size_t;
> > > > --
> > > > 1.7.2.5
> > > >
> > >
> > >
> > >
> 
> 
> 

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH] xen: arm: implement cpuinfo
  2013-02-15 12:06 ` [PATCH] xen: arm: implement cpuinfo Ian Campbell
@ 2013-02-21 17:19   ` Tim Deegan
  0 siblings, 0 replies; 81+ messages in thread
From: Tim Deegan @ 2013-02-21 17:19 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Stefano Stabellini, xen-devel

At 12:06 +0000 on 15 Feb (1360930007), Ian Campbell wrote:
> On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:
> > You can also run 32-bit on the V8 model (using -C
> > cluster.cpu0.CONFIG64=0) if you comment out the ThumbEE in
> > ctxt_switch_from and ctxt_switch_to (making this dynamic is on my TODO
> > list). 
> 
> 8<-----------------------------------------------
> 
> From e45c4e4f45e72e404052629c619af8810dadd76f Mon Sep 17 00:00:00 2001
> From: Ian Campbell <ian.campbell@citrix.com>
> Date: Fri, 15 Feb 2013 10:30:48 +0000
> Subject: [PATCH] xen: arm: implement cpuinfo
> 
> Use to:
> 
>  - Only context switch ThumbEE state if the processor implements it. In
>    particular the ARMv8 FastModels do not.
>  - Detect the generic timer, and therefore call identify_cpu before
>    init_xen_time.
> 
> Also improve the boot time messages a bit.
> 
> I haven't added decoding for all of the CPUID words, it seems like overkill
> for the moment.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 81+ messages in thread

end of thread, other threads:[~2013-02-21 17:19 UTC | newest]

Thread overview: 81+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-02-14 16:47 [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
2013-02-14 16:47 ` [PATCH V2 01/46] xen: arm32: Don't bother with the bootloader provided ARM-Linux machine type Ian Campbell
2013-02-15 13:36   ` Ian Campbell
2013-02-14 16:47 ` [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr Ian Campbell
2013-02-15 13:36   ` Ian Campbell
2013-02-14 16:47 ` [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0 Ian Campbell
2013-02-15 13:37   ` Ian Campbell
2013-02-14 16:47 ` [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev Ian Campbell
2013-02-21 14:51   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code Ian Campbell
2013-02-21 14:56   ` Tim Deegan
2013-02-21 15:26     ` Ian Campbell
2013-02-21 16:03       ` Ian Campbell
2013-02-21 16:08         ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 06/46] xen: arm64: basic config and types headers Ian Campbell
2013-02-14 16:47 ` [PATCH V2 07/46] xen: arm64: spinlocks Ian Campbell
2013-02-14 16:47 ` [PATCH V2 08/46] xen: arm64: atomics Ian Campbell
2013-02-21 14:57   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 09/46] xen: arm: refactor co-pro and sysreg reg handling Ian Campbell
2013-02-14 16:47 ` [PATCH V2 10/46] xen: arm64: TLB flushes Ian Campbell
2013-02-21 15:00   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 11/46] xen: arm64: PTE handling Ian Campbell
2013-02-14 16:47 ` [PATCH V2 12/46] xen: arm64: dcache flush Ian Campbell
2013-02-14 16:47 ` [PATCH V2 13/46] xen: arm64: address translation Ian Campbell
2013-02-14 16:47 ` [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events Ian Campbell
2013-02-21 15:01   ` Tim Deegan
2013-02-21 15:27     ` Ian Campbell
2013-02-21 15:58       ` Ian Campbell
2013-02-14 16:47 ` [PATCH V2 15/46] xen: arm64: xchg and cmpxchg Ian Campbell
2013-02-14 16:47 ` [PATCH V2 16/46] xen: arm64: interrupt/abort mask/unmask Ian Campbell
2013-02-14 16:47 ` [PATCH V2 17/46] xen: arm64: div64 Ian Campbell
2013-02-14 16:47 ` [PATCH V2 18/46] xen: arm64: start of day changes to setup.c Ian Campbell
2013-02-14 16:47 ` [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c Ian Campbell
2013-02-21 15:04   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 20/46] xen: arm64: add to foreign struct checks Ian Campbell
2013-02-21 16:33   ` Stefano Stabellini
2013-02-14 16:47 ` [PATCH V2 21/46] xen: arm: extend HSR struct definitions to 64-bit Ian Campbell
2013-02-14 16:47 ` [PATCH V2 22/46] xen: arm: use vaddr_t more widely Ian Campbell
2013-02-14 16:47 ` [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor Ian Campbell
2013-02-21 16:01   ` Ian Campbell
2013-02-21 16:23     ` Stefano Stabellini
2013-02-21 16:46       ` Ian Campbell
2013-02-21 16:49         ` Stefano Stabellini
2013-02-14 16:47 ` [PATCH V2 24/46] xen: arm: separate guest user regs from internal guest state Ian Campbell
2013-02-14 16:47 ` [PATCH V2 25/46] xen: arm64: add guest type to domain field Ian Campbell
2013-02-21 15:05   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 26/46] xen: arm: move arm32 specific trap handlers to xen/arch/arm/arm32 Ian Campbell
2013-02-14 16:47 ` [PATCH V2 27/46] xen: arm: arm64 trap handling Ian Campbell
2013-02-21 15:10   ` Tim Deegan
2013-02-21 15:25     ` Ian Campbell
2013-02-21 15:36       ` Tim Deegan
2013-02-21 16:02         ` Ian Campbell
2013-02-21 16:07           ` Tim Deegan
2013-02-21 16:08             ` Ian Campbell
2013-02-21 15:49       ` Ian Campbell
2013-02-21 15:53         ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 28/46] xen: arm: pcpu context switch Ian Campbell
2013-02-14 16:47 ` [PATCH V2 29/46] xen: arm64: percpu variable support Ian Campbell
2013-02-14 16:47 ` [PATCH V2 30/46] xen: arm: guest context switching Ian Campbell
2013-02-14 16:47 ` [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit Ian Campbell
2013-02-21 15:11   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 32/46] xen: arm: make dom0 builder work on 64-bit hypervisor Ian Campbell
2013-02-14 16:47 ` [PATCH V2 33/46] xen: arm: gic: use 64-bit compatible registers Ian Campbell
2013-02-14 16:47 ` [PATCH V2 34/46] xen: arm: time: " Ian Campbell
2013-02-14 16:47 ` [PATCH V2 35/46] xen: arm: p2m: " Ian Campbell
2013-02-14 16:47 ` [PATCH V2 36/46] xen: arm: Use 64-bit compatible registers in vtimer Ian Campbell
2013-02-14 16:47 ` [PATCH V2 37/46] xen: arm: select_user_reg support for 64-bit hypervisor Ian Campbell
2013-02-14 16:47 ` [PATCH V2 38/46] xen: arm: handle 32-bit guest CP register traps on " Ian Campbell
2013-02-14 16:47 ` [PATCH V2 39/46] xen: arm: guest stage 1 walks " Ian Campbell
2013-02-14 16:47 ` [PATCH V2 40/46] xen: arm: implement do_multicall_call for both 32 and 64-bit Ian Campbell
2013-02-14 16:47 ` [PATCH V2 41/46] xen: arm: Enable VFP is a nop on 64-bit Ian Campbell
2013-02-14 16:47 ` [PATCH V2 42/46] xen: arm: Use generic mem{cpy, move, set, zero} " Ian Campbell
2013-02-14 16:47 ` [PATCH V2 43/46] xen: arm: Explicitly setup VPIDR & VMPIDR at start of day Ian Campbell
2013-02-14 16:47 ` [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate Ian Campbell
2013-02-21 15:12   ` Tim Deegan
2013-02-14 16:47 ` [PATCH V2 45/46] xen: arm: Fix guest mode for 64-bit Ian Campbell
2013-02-21 15:18   ` Tim Deegan
2013-02-14 16:48 ` [PATCH V2 46/46] xen: arm: skanky "appended kernel" option Ian Campbell
2013-02-14 16:59 ` [PATCH 00/46] initial arm v8 (64-bit) support Ian Campbell
2013-02-15 12:06 ` [PATCH] xen: arm: implement cpuinfo Ian Campbell
2013-02-21 17:19   ` Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.