All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2 3/4] amd iommu: Implement p2m sharing
@ 2011-04-15 12:13 Wei Wang2
  0 siblings, 0 replies; only message in thread
From: Wei Wang2 @ 2011-04-15 12:13 UTC (permalink / raw)
  To: Tim Deegan; +Cc: xen-devel, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 201 bytes --]

--
Advanced Micro Devices GmbH
Sitz: Dornach, Gemeinde Aschheim, 
Landkreis München Registergericht München, 
HRB Nr. 43632
WEEE-Reg-Nr: DE 12919551
Geschäftsführer:
Alberto Bozzo, Andrew Bowd

[-- Attachment #2: amd_iommu_p2m_3.patch --]
[-- Type: text/x-diff, Size: 11496 bytes --]

# HG changeset patch
# User Wei Wang <wei.wang2@amd.com>
# Node ID 1d9b0e45566ec3cd0e293212d9a454920116b2b1
# Parent  c7359ee0b22f8b81841ed495ba58957e0714646e
Implement p2m table sharing for AMD IOMMU.

Signed-off-by: Wei Wang <wei.wang2@amd.com>

diff -r c7359ee0b22f -r 1d9b0e45566e xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c	Fri Apr 15 12:04:47 2011 +0200
+++ b/xen/arch/x86/mm/p2m.c	Fri Apr 15 12:10:39 2011 +0200
@@ -35,6 +35,7 @@
 #include <asm/mem_sharing.h>
 #include <xen/event.h>
 #include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
 
 /* Debugging and auditing of the P2M code? */
 #define P2M_AUDIT     0
@@ -1418,6 +1419,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
     unsigned int iommu_pte_flags = (p2mt == p2m_ram_rw) ?
                                    IOMMUF_readable|IOMMUF_writable:
                                    0; 
+    unsigned long old_mfn = 0;
 
     if ( tb_init_done )
     {
@@ -1468,7 +1470,10 @@ p2m_set_entry(struct p2m_domain *p2m, un
         entry_content.l1 = l3e_content.l3;
 
         if ( entry_content.l1 != 0 )
+        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+            old_mfn = l1e_get_pfn(*p2m_entry);
+        }
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1510,8 +1515,10 @@ p2m_set_entry(struct p2m_domain *p2m, un
             entry_content = l1e_empty();
 
         if ( entry_content.l1 != 0 )
+        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
+            old_mfn = l1e_get_pfn(*p2m_entry);
+        }
         /* level 1 entry */
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1544,7 +1551,10 @@ p2m_set_entry(struct p2m_domain *p2m, un
         entry_content.l1 = l2e_content.l2;
 
         if ( entry_content.l1 != 0 )
+        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+            old_mfn = l1e_get_pfn(*p2m_entry);
+        }
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1561,13 +1571,21 @@ p2m_set_entry(struct p2m_domain *p2m, un
 
     if ( iommu_enabled && need_iommu(p2m->domain) )
     {
-        if ( p2mt == p2m_ram_rw )
-            for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
-                               IOMMUF_readable|IOMMUF_writable);
+        if ( iommu_hap_pt_share )
+        {
+            if ( old_mfn && (old_mfn != mfn_x(mfn)) )
+                amd_iommu_flush_pages(p2m->domain, gfn, page_order);
+        }
         else
-            for ( int i = 0; i < (1UL << page_order); i++ )
-                iommu_unmap_page(p2m->domain, gfn+i);
+        {
+            if ( p2mt == p2m_ram_rw )
+                for ( i = 0; i < (1UL << page_order); i++ )
+                    iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
+                                   IOMMUF_readable|IOMMUF_writable);
+            else
+                for ( int i = 0; i < (1UL << page_order); i++ )
+                    iommu_unmap_page(p2m->domain, gfn+i);
+        }
     }
 
     /* Success */
diff -r c7359ee0b22f -r 1d9b0e45566e xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c	Fri Apr 15 12:04:47 2011 +0200
+++ b/xen/drivers/passthrough/amd/iommu_init.c	Fri Apr 15 12:10:39 2011 +0200
@@ -889,7 +889,7 @@ static void invalidate_all_domain_pages(
 {
     struct domain *d;
     for_each_domain( d )
-        invalidate_all_iommu_pages(d);
+        amd_iommu_flush_all_pages(d);
 }
 
 static void invalidate_all_devices(void)
diff -r c7359ee0b22f -r 1d9b0e45566e xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c	Fri Apr 15 12:04:47 2011 +0200
+++ b/xen/drivers/passthrough/amd/iommu_map.c	Fri Apr 15 12:10:39 2011 +0200
@@ -19,6 +19,7 @@
  */
 
 #include <xen/sched.h>
+#include <asm/p2m.h>
 #include <xen/hvm/iommu.h>
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
@@ -184,19 +185,32 @@ static void clear_iommu_l1e_present(u64 
     unmap_domain_page(l1_table);
 }
 
-static void set_iommu_l1e_present(u64 l2e, unsigned long gfn,
+static int set_iommu_l1e_present(u64 l2e, unsigned long gfn,
                                  u64 maddr, int iw, int ir)
 {
-    u64 addr_lo, addr_hi;
+    u64 addr_lo, addr_hi, maddr_old;
     u32 entry;
     void *l1_table;
     int offset;
     u32 *l1e;
+    int need_flush = 0;
 
     l1_table = map_domain_page(l2e >> PAGE_SHIFT);
 
     offset = gfn & (~PTE_PER_TABLE_MASK);
     l1e = (u32*)((u8*)l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
+
+    addr_hi = get_field_from_reg_u32(l1e[1],
+                                     IOMMU_PTE_ADDR_HIGH_MASK,
+                                     IOMMU_PTE_ADDR_HIGH_SHIFT);
+    addr_lo = get_field_from_reg_u32(l1e[0],
+                                     IOMMU_PTE_ADDR_LOW_MASK,
+                                     IOMMU_PTE_ADDR_LOW_SHIFT);
+
+    maddr_old = ((addr_hi << 32) | addr_lo) << PAGE_SHIFT;
+
+    if ( maddr_old && (maddr_old != maddr) )
+        need_flush = 1;
 
     addr_lo = maddr & DMA_32BIT_MASK;
     addr_hi = maddr >> 32;
@@ -226,6 +240,7 @@ static void set_iommu_l1e_present(u64 l2
     l1e[0] = entry;
 
     unmap_domain_page(l1_table);
+    return need_flush;
 }
 
 static void amd_iommu_set_page_directory_entry(u32 *pde, 
@@ -551,7 +566,7 @@ static int update_paging_mode(struct dom
         }
 
         /* For safety, invalidate all entries */
-        invalidate_all_iommu_pages(d);
+        amd_iommu_flush_all_pages(d);
     }
     return 0;
 }
@@ -560,9 +575,13 @@ int amd_iommu_map_page(struct domain *d,
                        unsigned int flags)
 {
     u64 iommu_l2e;
+    int need_flush = 0;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
     BUG_ON( !hd->root_table );
+
+    if ( iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
 
     spin_lock(&hd->mapping_lock);
 
@@ -587,9 +606,11 @@ int amd_iommu_map_page(struct domain *d,
         return -EFAULT;
     }
 
-    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
-                          !!(flags & IOMMUF_writable),
-                          !!(flags & IOMMUF_readable));
+    need_flush = set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
+                                       !!(flags & IOMMUF_writable),
+                                       !!(flags & IOMMUF_readable));
+    if ( need_flush )
+        amd_iommu_flush_pages(d, gfn, 0);
 
     spin_unlock(&hd->mapping_lock);
     return 0;
@@ -598,11 +619,12 @@ int amd_iommu_unmap_page(struct domain *
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
 {
     u64 iommu_l2e;
-    unsigned long flags;
-    struct amd_iommu *iommu;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
     BUG_ON( !hd->root_table );
+
+    if ( iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
 
     spin_lock(&hd->mapping_lock);
 
@@ -632,14 +654,7 @@ int amd_iommu_unmap_page(struct domain *
     clear_iommu_l1e_present(iommu_l2e, gfn);
     spin_unlock(&hd->mapping_lock);
 
-    /* send INVALIDATE_IOMMU_PAGES command */
-    for_each_amd_iommu ( iommu )
-    {
-        spin_lock_irqsave(&iommu->lock, flags);
-        invalidate_iommu_pages(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id, 0);
-        flush_command_buffer(iommu);
-        spin_unlock_irqrestore(&iommu->lock, flags);
-    }
+    amd_iommu_flush_pages(d, gfn, 0);
 
     return 0;
 }
@@ -667,17 +682,53 @@ int amd_iommu_reserve_domain_unity_map(s
     return 0;
 }
 
-void invalidate_all_iommu_pages(struct domain *d)
+void amd_iommu_flush_all_pages(struct domain *d)
+{
+    amd_iommu_flush_pages(d, 0x7FFFFFFFFFFFFULL, 0);
+}
+
+/* Flush iommu cache after p2m changes. */
+void amd_iommu_flush_pages(struct domain *d,
+                           unsigned long gfn, unsigned int order)
 {
     unsigned long flags;
     struct amd_iommu *iommu;
-
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+    unsigned int dom_id = hd->domain_id;
+    u64 gaddr = (u64)gfn << PAGE_SHIFT;
+
+    /* send INVALIDATE_IOMMU_PAGES command */
     for_each_amd_iommu ( iommu )
     {
         spin_lock_irqsave(&iommu->lock, flags);
-        invalidate_iommu_pages(iommu, 0x7FFFFFFFFFFFF000ULL,
-                               d->domain_id, 0);
+        invalidate_iommu_pages(iommu, gaddr, dom_id, order);
         flush_command_buffer(iommu);
         spin_unlock_irqrestore(&iommu->lock, flags);
     }
 }
+
+/* Share p2m table with iommu. */
+void amd_iommu_share_p2m(struct domain *d)
+{
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+    struct page_info *p2m_table;
+    mfn_t pgd_mfn;
+
+    ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
+
+    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
+    p2m_table = mfn_to_page(mfn_x(pgd_mfn));
+
+    if ( hd->root_table != p2m_table )
+    {
+        free_amd_iommu_pgtable(hd->root_table);
+        hd->root_table = p2m_table;
+
+        /* When sharing p2m with iommu, paging mode = 4 */
+        hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
+        iommu_hap_pt_share = 1;
+
+        AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
+                        mfn_x(pgd_mfn));
+    }
+}
diff -r c7359ee0b22f -r 1d9b0e45566e xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Apr 15 12:04:47 2011 +0200
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Apr 15 12:10:39 2011 +0200
@@ -362,6 +362,9 @@ static void deallocate_iommu_page_tables
 {
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
 
+    if ( iommu_hap_pt_share )
+        return;
+
     spin_lock(&hd->mapping_lock);
     if ( hd->root_table )
     {
@@ -375,7 +378,7 @@ static void amd_iommu_domain_destroy(str
 static void amd_iommu_domain_destroy(struct domain *d)
 {
     deallocate_iommu_page_tables(d);
-    invalidate_all_iommu_pages(d);
+    amd_iommu_flush_all_pages(d);
 }
 
 static int amd_iommu_return_device(
diff -r c7359ee0b22f -r 1d9b0e45566e xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Apr 15 12:04:47 2011 +0200
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Apr 15 12:10:39 2011 +0200
@@ -51,10 +51,17 @@ int amd_iommu_map_page(struct domain *d,
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
                        unsigned int flags);
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
+                           unsigned int order);
+void amd_iommu_flush_all_pages(struct domain *d);
+
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
-        u64 phys_addr, unsigned long size, int iw, int ir);
-void invalidate_all_iommu_pages(struct domain *d);
+                                       u64 phys_addr, unsigned long size,
+                                       int iw, int ir);
+
+/* Share p2m table with iommu */
+void amd_iommu_share_p2m(struct domain *d);
 
 /* device table functions */
 int get_dma_requestor_id(u16 bdf);

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2011-04-15 12:13 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-04-15 12:13 [PATCH V2 3/4] amd iommu: Implement p2m sharing Wei Wang2

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.