All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tklengyel@sec.in.tum.de>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org,
	ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com,
	andres@lagarcavilla.org, jbeulich@suse.com,
	dgdegra@tycho.nsa.gov, Tamas K Lengyel <tklengyel@sec.in.tum.de>
Subject: [PATCH v3 10/15] xen/arm: Data abort exception (R/W) mem_events.
Date: Mon,  1 Sep 2014 16:22:04 +0200	[thread overview]
Message-ID: <1409581329-2607-11-git-send-email-tklengyel@sec.in.tum.de> (raw)
In-Reply-To: <1409581329-2607-1-git-send-email-tklengyel@sec.in.tum.de>

This patch enables to store, set, check and deliver LPAE R/W mem_events.

Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
v3: - Add new function for updating the PTE entries, p2m_set_entry.
    - Use the new struct npfec to pass violation information.
    - Implement n2rwx, rx2rw and listener required routines.

v2: - Patch been split to ease the review process.
    - Add definitions of data abort data fetch status codes (enum dabt_dfsc)
      and only call p2m_mem_access_check for traps caused by permission violations.
    - Only call p2m_write_pte in p2m_lookup if the PTE permission actually changed.
    - Properly save settings in the Radix tree and pause the VCPU with
      mem_event_vcpu_pause.
---
 xen/arch/arm/p2m.c              | 439 ++++++++++++++++++++++++++++++++++++----
 xen/arch/arm/traps.c            |  31 ++-
 xen/include/asm-arm/p2m.h       |  20 ++
 xen/include/asm-arm/processor.h |  30 +++
 4 files changed, 481 insertions(+), 39 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index a6dea5b..e8f5671 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,7 @@
 #include <asm/event.h>
 #include <asm/hardirq.h>
 #include <asm/page.h>
+#include <xen/radix-tree.h>
 #include <xen/mem_event.h>
 #include <public/mem_event.h>
 #include <xen/mem_access.h>
@@ -148,6 +149,74 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m, paddr_t addr)
     return __map_domain_page(page);
 }
 
+static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
+{
+    /* First apply type permissions */
+    switch (t)
+    {
+    case p2m_ram_rw:
+        e->p2m.xn = 0;
+        e->p2m.write = 1;
+        break;
+
+    case p2m_ram_ro:
+        e->p2m.xn = 0;
+        e->p2m.write = 0;
+        break;
+
+    case p2m_iommu_map_rw:
+    case p2m_map_foreign:
+    case p2m_grant_map_rw:
+    case p2m_mmio_direct:
+        e->p2m.xn = 1;
+        e->p2m.write = 1;
+        break;
+
+    case p2m_iommu_map_ro:
+    case p2m_grant_map_ro:
+    case p2m_invalid:
+        e->p2m.xn = 1;
+        e->p2m.write = 0;
+        break;
+
+    case p2m_max_real_type:
+        BUG();
+        break;
+    }
+
+    /* Then restrict with access permissions */
+    switch(a)
+    {
+    case p2m_access_n:
+        e->p2m.read = e->p2m.write = 0;
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_r:
+        e->p2m.write = 0;
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_x:
+        e->p2m.write = 0;
+        e->p2m.read = 0;
+        break;
+    case p2m_access_rx:
+        e->p2m.write = 0;
+        break;
+    case p2m_access_w:
+        e->p2m.read = 0;
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_rw:
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_wx:
+        e->p2m.read = 0;
+        break;
+    case p2m_access_rwx:
+        break;
+    }
+}
+
 /*
  * Lookup the MFN corresponding to a domain's PFN.
  *
@@ -228,7 +297,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
 }
 
 static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
-                               p2m_type_t t)
+                               p2m_type_t t, p2m_access_t a)
 {
     paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
     /* sh, xn and write bit will be defined in the following switches
@@ -258,37 +327,7 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
         break;
     }
 
-    switch (t)
-    {
-    case p2m_ram_rw:
-        e.p2m.xn = 0;
-        e.p2m.write = 1;
-        break;
-
-    case p2m_ram_ro:
-        e.p2m.xn = 0;
-        e.p2m.write = 0;
-        break;
-
-    case p2m_iommu_map_rw:
-    case p2m_map_foreign:
-    case p2m_grant_map_rw:
-    case p2m_mmio_direct:
-        e.p2m.xn = 1;
-        e.p2m.write = 1;
-        break;
-
-    case p2m_iommu_map_ro:
-    case p2m_grant_map_ro:
-    case p2m_invalid:
-        e.p2m.xn = 1;
-        e.p2m.write = 0;
-        break;
-
-    case p2m_max_real_type:
-        BUG();
-        break;
-    }
+    p2m_set_permission(&e, t, a);
 
     ASSERT(!(pa & ~PAGE_MASK));
     ASSERT(!(pa & ~PADDR_MASK));
@@ -346,7 +385,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
          for ( i=0 ; i < LPAE_ENTRIES; i++ )
          {
              pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
-                                    MATTR_MEM, t);
+                                    MATTR_MEM, t, p2m->default_access);
 
              /*
               * First and second level super pages set p2m.table = 0, but
@@ -366,7 +405,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
 
     unmap_domain_page(p);
 
-    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid, p2m->default_access);
 
     p2m_write_pte(entry, pte, flush_cache);
 
@@ -498,7 +537,7 @@ static int apply_one_level(struct domain *d,
             page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
             if ( page )
             {
-                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                 if ( level < 3 )
                     pte.p2m.table = 0;
                 p2m_write_pte(entry, pte, flush_cache);
@@ -533,7 +572,7 @@ static int apply_one_level(struct domain *d,
              (level == 3 || !p2m_table(orig_pte)) )
         {
             /* New mapping is superpage aligned, make it */
-            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
             if ( level < 3 )
                 pte.p2m.table = 0; /* Superpage entry */
 
@@ -640,6 +679,7 @@ static int apply_one_level(struct domain *d,
 
         memset(&pte, 0x00, sizeof(pte));
         p2m_write_pte(entry, pte, flush_cache);
+        radix_tree_delete(&p2m->mem_access_settings, paddr_to_pfn(*addr));
 
         *addr += level_size;
 
@@ -1080,6 +1120,333 @@ err:
     return page;
 }
 
+static bool_t p2m_set_entry(struct domain *d, paddr_t paddr, p2m_access_t p2ma)
+{
+    bool_t rc = 0;
+    struct p2m_domain *p2m = &d->arch.p2m;
+    lpae_t *pte, *first = NULL, *second = NULL, *third = NULL;
+
+    first = p2m_map_first(p2m, paddr);
+    if ( !first )
+        goto err;
+
+    pte = &first[first_table_offset(paddr)];
+    if ( !p2m_table(*pte) )
+        goto done;
+
+    second = map_domain_page(pte->p2m.base);
+    pte = &second[second_table_offset(paddr)];
+    if ( !p2m_table(*pte) )
+        goto done;
+
+    third = map_domain_page(pte->p2m.base);
+    pte = &third[third_table_offset(paddr)];
+
+    BUILD_BUG_ON(THIRD_MASK != PAGE_MASK);
+
+    /* This bit must be one in the level 3 entry */
+    if ( !p2m_table(*pte) )
+        pte->bits = 0;
+
+done:
+    if ( p2m_valid(*pte) )
+    {
+        ASSERT(pte->p2m.type != p2m_invalid);
+
+        p2m_set_permission(pte, pte->p2m.type, p2ma);
+        p2m_write_pte(pte, *pte, 1);
+
+        rc = 1;
+    }
+
+    if (third) unmap_domain_page(third);
+    if (second) unmap_domain_page(second);
+    if (first) unmap_domain_page(first);
+
+err:
+    return rc;
+}
+
+int p2m_mem_access_check(paddr_t gpa, vaddr_t gla, struct npfec npfec)
+{
+    struct vcpu *v = current;
+    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
+    mem_event_request_t *req = NULL;
+    xenmem_access_t xma;
+    bool_t violation;
+    int rc;
+
+    rc = p2m_get_mem_access(v->domain, paddr_to_pfn(gpa), &xma);
+    if ( rc )
+    {
+        /* No setting was found, reinject */
+        return 1;
+    }
+    else
+    {
+        /* First, handle rx2rw and n2rwx conversion automatically. */
+        if ( npfec.write_access && xma == XENMEM_access_rx2rw )
+        {
+            rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+                                    0, ~0, XENMEM_access_rw);
+            ASSERT(rc == 0);
+            return 0;
+        }
+        else if ( xma == XENMEM_access_n2rwx )
+        {
+            rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+                                    0, ~0, XENMEM_access_rwx);
+            ASSERT(rc == 0);
+        }
+    }
+
+    /* Otherwise, check if there is a memory event listener, and send the message along */
+    if ( !mem_event_check_ring( &v->domain->mem_event->access ) )
+    {
+        /* No listener */
+        if ( p2m->access_required )
+        {
+            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
+                                  "no mem_event listener VCPU %d, dom %d\n",
+                                  v->vcpu_id, v->domain->domain_id);
+            domain_crash(v->domain);
+        }
+        else
+        {
+            /* n2rwx was already handled */
+            if ( xma != XENMEM_access_n2rwx)
+            {
+                /* A listener is not required, so clear the access
+                 * restrictions. */
+                rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+                                        0, ~0, XENMEM_access_rwx);
+                ASSERT(rc == 0);
+            }
+        }
+
+        /* No need to reinject */
+        return 0;
+    }
+
+    switch ( xma )
+    {
+    default:
+    case XENMEM_access_n:
+        violation = npfec.read_access || npfec.write_access || npfec.insn_fetch;
+        break;
+    case XENMEM_access_r:
+        violation = npfec.write_access || npfec.insn_fetch;
+        break;
+    case XENMEM_access_w:
+        violation = npfec.read_access || npfec.insn_fetch;
+        break;
+    case XENMEM_access_x:
+        violation = npfec.read_access || npfec.write_access;
+        break;
+    case XENMEM_access_rx:
+        violation = npfec.write_access;
+        break;
+    case XENMEM_access_wx:
+        violation = npfec.read_access;
+        break;
+    case XENMEM_access_rw:
+        violation = npfec.insn_fetch;
+        break;
+    case XENMEM_access_rwx:
+        violation = 0;
+        break;
+    }
+
+    if ( !violation )
+        return 1;
+
+    req = xzalloc(mem_event_request_t);
+    if ( req )
+    {
+        req->reason = MEM_EVENT_REASON_VIOLATION;
+        if ( xma != XENMEM_access_n2rwx )
+            req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+        req->gfn = gpa >> PAGE_SHIFT;
+        req->offset =  gpa & ((1 << PAGE_SHIFT) - 1);
+        req->gla = gla;
+        req->gla_valid = npfec.gla_valid;
+        req->access_r = npfec.read_access;
+        req->access_w = npfec.write_access;
+        req->access_x = npfec.insn_fetch;
+        if ( npfec_kind_in_gpt == npfec.kind )
+            req->fault_in_gpt = 1;
+        if ( npfec_kind_with_gla == npfec.kind )
+            req->fault_with_gla = 1;
+        req->vcpu_id = v->vcpu_id;
+
+        mem_access_send_req(v->domain, req);
+        xfree(req);
+    }
+
+    /* Pause the current VCPU */
+    if ( xma != XENMEM_access_n2rwx )
+        mem_event_vcpu_pause(v);
+
+    return 0;
+}
+
+void p2m_mem_access_resume(struct domain *d)
+{
+    mem_event_response_t rsp;
+
+    /* Pull all responses off the ring */
+    while( mem_event_get_response(d, &d->mem_event->access, &rsp) )
+    {
+        struct vcpu *v;
+
+        if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+            continue;
+
+        /* Validate the vcpu_id in the response. */
+        if ( (rsp.vcpu_id >= d->max_vcpus) || !d->vcpu[rsp.vcpu_id] )
+            continue;
+
+        v = d->vcpu[rsp.vcpu_id];
+
+        /* Unpause domain */
+        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+            mem_event_vcpu_unpause(v);
+    }
+}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_access_t a;
+    long rc = 0;
+
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+#undef ACCESS
+    };
+
+    switch ( access )
+    {
+    case 0 ... ARRAY_SIZE(memaccess) - 1:
+        a = memaccess[access];
+        break;
+    case XENMEM_access_default:
+        a = p2m->default_access;
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    /* If request to set default access */
+    if ( pfn == ~0ul )
+    {
+        p2m->default_access = a;
+        return 0;
+    }
+
+    spin_lock(&p2m->lock);
+    for ( pfn += start; nr > start; ++pfn )
+    {
+
+        bool_t pte_update = p2m_set_entry(d, pfn_to_paddr(pfn), a);
+
+        if ( !pte_update )
+            break;
+
+        rc = radix_tree_insert(&p2m->mem_access_settings, pfn,
+                                    radix_tree_int_to_ptr(a));
+
+        switch ( rc )
+        {
+        case 0:
+            /* Nothing to do, setting saved successfully */
+            break;
+        case -EEXIST:
+            /* If a setting existed already, change it to the new one */
+            radix_tree_replace_slot(
+                radix_tree_lookup_slot(
+                    &p2m->mem_access_settings, pfn),
+                radix_tree_int_to_ptr(a));
+            rc = 0;
+            break;
+        default:
+            /* If we fail to save the setting in the Radix tree, we
+             * need to reset the PTE permissions to default. */
+            p2m_set_entry(d, pfn_to_paddr(pfn), p2m->default_access);
+            break;
+        }
+
+        /* Check for continuation if it's not the last iteration. */
+        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+        {
+            rc = start;
+            break;
+        }
+    }
+
+    /* Flush the TLB of the domain to ensure consistency */
+    flush_tlb_domain(d);
+
+    spin_unlock(&p2m->lock);
+    return rc;
+}
+
+int p2m_get_mem_access(struct domain *d, unsigned long gpfn,
+                       xenmem_access_t *access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    void *i;
+    int index;
+
+    static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+            ACCESS(n),
+            ACCESS(r),
+            ACCESS(w),
+            ACCESS(rw),
+            ACCESS(x),
+            ACCESS(rx),
+            ACCESS(wx),
+            ACCESS(rwx),
+#undef ACCESS
+    };
+
+    /* If request to get default access */
+    if ( gpfn == ~0ull )
+    {
+        *access = memaccess[p2m->default_access];
+        return 0;
+    }
+
+    spin_lock(&p2m->lock);
+
+    i = radix_tree_lookup(&p2m->mem_access_settings, gpfn);
+
+    spin_unlock(&p2m->lock);
+
+    if (!i)
+        return -ESRCH;
+
+    index = radix_tree_ptr_to_int(i);
+
+    if ( (unsigned) index >= ARRAY_SIZE(memaccess) )
+        return -ERANGE;
+
+    *access =  memaccess[ (unsigned) index];
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 019991f..7eb875a 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1852,13 +1852,38 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
     info.gva = READ_SYSREG64(FAR_EL2);
 #endif
 
-    if (dabt.s1ptw)
-        goto bad_data_abort;
-
     rc = gva_to_ipa(info.gva, &info.gpa);
     if ( rc == -EFAULT )
         goto bad_data_abort;
 
+    switch ( dabt.dfsc )
+    {
+    case DABT_DFSC_PERMISSION_1:
+    case DABT_DFSC_PERMISSION_2:
+    case DABT_DFSC_PERMISSION_3:
+    {
+        struct npfec npfec = {
+            .read_access = 1,
+            .write_access = dabt.write,
+            .gla_valid = 1,
+            .kind = dabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla
+        };
+
+        rc = p2m_mem_access_check(info.gpa, info.gva, npfec);
+
+        /* Trap was triggered by mem_access, work here is done */
+        if ( !rc )
+            return;
+    }
+    break;
+
+    default:
+        break;
+    }
+
+    if ( dabt.s1ptw )
+        goto bad_data_abort;
+
     /* XXX: Decode the instruction if ISS is not valid */
     if ( !dabt.valid )
         goto bad_data_abort;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index afdbf84..2a9e0a3 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -233,6 +233,26 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+/* get host p2m table */
+#define p2m_get_hostp2m(d) (&((d)->arch.p2m))
+
+/* Send mem event based on the access (gla is -1ull if not available). Boolean
+ * return value indicates if trap needs to be injected into guest. */
+int p2m_mem_access_check(paddr_t gpa, vaddr_t gla, struct npfec npfec);
+
+/* Resumes the running of the VCPU, restarting the last instruction */
+void p2m_mem_access_resume(struct domain *d);
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access);
+
+/* Get access type for a pfn
+ * If pfn == -1ul, gets the default access type */
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+                       xenmem_access_t *access);
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 0cc5b6d..b844f1d 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -262,6 +262,36 @@ enum dabt_size {
     DABT_DOUBLE_WORD = 3,
 };
 
+/* Data abort data fetch status codes */
+enum dabt_dfsc {
+    DABT_DFSC_ADDR_SIZE_0       = 0b000000,
+    DABT_DFSC_ADDR_SIZE_1       = 0b000001,
+    DABT_DFSC_ADDR_SIZE_2       = 0b000010,
+    DABT_DFSC_ADDR_SIZE_3       = 0b000011,
+    DABT_DFSC_TRANSLATION_0     = 0b000100,
+    DABT_DFSC_TRANSLATION_1     = 0b000101,
+    DABT_DFSC_TRANSLATION_2     = 0b000110,
+    DABT_DFSC_TRANSLATION_3     = 0b000111,
+    DABT_DFSC_ACCESS_1          = 0b001001,
+    DABT_DFSC_ACCESS_2          = 0b001010,
+    DABT_DFSC_ACCESS_3          = 0b001011,
+    DABT_DFSC_PERMISSION_1      = 0b001101,
+    DABT_DFSC_PERMISSION_2      = 0b001110,
+    DABT_DFSC_PERMISSION_3      = 0b001111,
+    DABT_DFSC_SYNC_EXT          = 0b010000,
+    DABT_DFSC_SYNC_PARITY       = 0b011000,
+    DABT_DFSC_SYNC_EXT_TTW_0    = 0b010100,
+    DABT_DFSC_SYNC_EXT_TTW_1    = 0b010101,
+    DABT_DFSC_SYNC_EXT_TTW_2    = 0b010110,
+    DABT_DFSC_SYNC_EXT_TTW_3    = 0b010111,
+    DABT_DFSC_SYNC_PARITY_TTW_0 = 0b011100,
+    DABT_DFSC_SYNC_PARITY_TTW_1 = 0b011101,
+    DABT_DFSC_SYNC_PARITY_TTW_2 = 0b011110,
+    DABT_DFSC_SYNC_PARITY_TTW_3 = 0b011111,
+    DABT_DFSC_ALIGNMENT         = 0b100001,
+    DABT_DFSC_TLB_CONFLICT      = 0b110000,
+};
+
 union hsr {
     uint32_t bits;
     struct {
-- 
2.1.0.rc1

  parent reply	other threads:[~2014-09-01 14:22 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-01 14:21 [PATCH v3 00/15] Mem_event and mem_access for ARM Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 01/15] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-09-01 15:06   ` Jan Beulich
2014-09-01 15:15     ` Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 02/15] xen: Relocate struct npfec definition " Tamas K Lengyel
2014-09-01 15:44   ` Jan Beulich
2014-09-01 14:21 ` [PATCH v3 03/15] xen: Relocate mem_event_op domctl and access_op memop " Tamas K Lengyel
2014-09-01 15:46   ` Jan Beulich
2014-09-01 16:25     ` Tamas K Lengyel
2014-09-02  6:30       ` Jan Beulich
2014-09-02  7:43         ` Tamas K Lengyel
2014-09-01 18:11   ` Julien Grall
2014-09-01 20:51     ` Tamas K Lengyel
2014-09-02  6:53       ` Jan Beulich
2014-09-02  7:41         ` Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 04/15] xen/mem_event: Clean out superfluous white-spaces Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 05/15] xen/mem_event: Relax error condition on debug builds Tamas K Lengyel
2014-09-01 15:47   ` Jan Beulich
2014-09-01 14:22 ` [PATCH v3 06/15] xen/mem_event: Abstract architecture specific sanity checks Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 07/15] xen/mem_access: Abstract architecture specific sanity check Tamas K Lengyel
2014-09-01 15:50   ` Jan Beulich
2014-09-01 14:22 ` [PATCH v3 08/15] xen/arm: p2m type definitions and changes Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 09/15] xen/arm: Add set access required domctl Tamas K Lengyel
2014-09-01 19:10   ` Julien Grall
2014-09-02  7:48     ` Tamas K Lengyel
2014-09-02  8:17       ` Jan Beulich
2014-09-02  9:23         ` Tamas K Lengyel
2014-09-01 14:22 ` Tamas K Lengyel [this message]
2014-09-01 21:07   ` [PATCH v3 10/15] xen/arm: Data abort exception (R/W) mem_events Julien Grall
2014-09-02  9:06     ` Tamas K Lengyel
2014-09-03 20:20       ` Julien Grall
2014-09-03 21:56         ` Tamas K Lengyel
2014-09-08 20:41           ` Julien Grall
2014-09-09  9:20             ` Ian Campbell
2014-09-09 13:08               ` Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 11/15] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 12/15] xen/arm: Shatter large pages when using mem_acces Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 13/15] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-09-03 14:38   ` Daniel De Graaf
2014-09-01 14:22 ` [PATCH v3 14/15] tools/libxc: Allocate magic page for mem access " Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 15/15] tools/tests: Enable xen-access " Tamas K Lengyel
2014-09-01 21:26   ` Julien Grall
2014-09-02  8:49     ` Tamas K Lengyel
2014-09-02 12:15     ` Tamas K Lengyel
2014-09-03 20:27       ` Julien Grall
2014-09-03 22:06         ` Tamas K Lengyel
2014-09-01 19:56 ` [PATCH v3 00/15] Mem_event and mem_access for ARM Julien Grall
2014-09-02  9:47   ` Tamas K Lengyel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409581329-2607-11-git-send-email-tklengyel@sec.in.tum.de \
    --to=tklengyel@sec.in.tum.de \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@linaro.org \
    --cc=stefano.stabellini@citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.