All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 00/11] tmem: some basic cleanup
@ 2013-11-08  1:03 Bob Liu
  2013-11-08  1:03 ` [PATCH v2 01/11] tmem: cleanup: drop COMPARE_COPY_PAGE_SSE2 Bob Liu
                   ` (10 more replies)
  0 siblings, 11 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

When I try to read tmem source code, I found it's not very straightforward.
There are too many typedefs and referenced once functions in tmem, perhaps the
reason was tmem was designed can be ported to other hypersivor easily. But I
don't think it's really neccessary nowadays, this patchset try to clean up it.
And I will continue to try make tmem more readable and better.

Changelog v2:
Forgot to set client->domain in [patch 07/11].

Bob Liu (11):
  tmem: cleanup: drop COMPARE_COPY_PAGE_SSE2
  tmem: cleanup: drop pfp_t typedef
  tmem: cleanup: drop tmem_cli_mfn_t typedef
  tmem: cleanup: rename 'tmh_' with 'tmem_'
  tmem: cleanup: drop most of the typedefs
  tmem: cleanup: drop tmem_alloc/free_infra
  tmem: cleanup: drop tmem_client_t
  tmem: cleanup: drop useless wrap function
  tmem: cleanup: drop unused function 'domain_fully_allocated'
  tmem: cleanup: drop useless _subpage function wrap
  tmem: cleanup: drop unneeded functions

 xen/common/tmem.c          |  851 ++++++++++++++++++++++----------------------
 xen/common/tmem_xen.c      |  171 +++------
 xen/include/public/tmem.h  |    3 +-
 xen/include/xen/tmem_xen.h |  252 ++++---------
 4 files changed, 541 insertions(+), 736 deletions(-)

-- 
1.7.10.4

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 01/11] tmem: cleanup: drop COMPARE_COPY_PAGE_SSE2
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 02/11] tmem: cleanup: drop typedef pfp_t Bob Liu
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

COMPARE_COPY_PAGE_SSE2 never be used in other place, memcpy() is enough.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c     |   14 --------------
 xen/common/tmem_xen.c |   35 -----------------------------------
 2 files changed, 49 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index a122651..215722b 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -95,14 +95,7 @@ DECL_CYC_COUNTER(non_succ_get);
 DECL_CYC_COUNTER(non_succ_put);
 DECL_CYC_COUNTER(flush);
 DECL_CYC_COUNTER(flush_obj);
-#ifdef COMPARE_COPY_PAGE_SSE2
-EXTERN_CYC_COUNTER(pg_copy1);
-EXTERN_CYC_COUNTER(pg_copy2);
-EXTERN_CYC_COUNTER(pg_copy3);
-EXTERN_CYC_COUNTER(pg_copy4);
-#else
 EXTERN_CYC_COUNTER(pg_copy);
-#endif
 DECL_CYC_COUNTER(compress);
 DECL_CYC_COUNTER(decompress);
 
@@ -2172,14 +2165,7 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
     n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,non_succ_put,"p");
     n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,flush,"F");
     n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,flush_obj,"O");
-#ifdef COMPARE_COPY_PAGE_SSE2
-    n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,pg_copy1,"1");
-    n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,pg_copy2,"2");
-    n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,pg_copy3,"3");
-    n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,pg_copy4,"4");
-#else
     n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,pg_copy,"C");
-#endif
     n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,compress,"c");
     n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,decompress,"d");
     n--; /* overwrite trailing comma */
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 54ec09f..4725558 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -36,14 +36,7 @@ integer_param("tmem_lock", opt_tmem_lock);
 
 EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0);
 
-#ifdef COMPARE_COPY_PAGE_SSE2
-DECL_CYC_COUNTER(pg_copy1);
-DECL_CYC_COUNTER(pg_copy2);
-DECL_CYC_COUNTER(pg_copy3);
-DECL_CYC_COUNTER(pg_copy4);
-#else
 DECL_CYC_COUNTER(pg_copy);
-#endif
 
 /* these are a concurrency bottleneck, could be percpu and dynamically
  * allocated iff opt_tmem_compress */
@@ -53,40 +46,12 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
 static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
 
-#ifdef COMPARE_COPY_PAGE_SSE2
-#include <asm/flushtlb.h>  /* REMOVE ME AFTER TEST */
-#include <asm/page.h>  /* REMOVE ME AFTER TEST */
-#endif
 void tmh_copy_page(char *to, char*from)
 {
-#ifdef COMPARE_COPY_PAGE_SSE2
-    DECL_LOCAL_CYC_COUNTER(pg_copy1);
-    DECL_LOCAL_CYC_COUNTER(pg_copy2);
-    DECL_LOCAL_CYC_COUNTER(pg_copy3);
-    DECL_LOCAL_CYC_COUNTER(pg_copy4);
-    *to = *from;  /* don't measure TLB misses */
-    flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
-    flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
-    START_CYC_COUNTER(pg_copy1);
-    copy_page_sse2(to, from);  /* cold cache */
-    END_CYC_COUNTER(pg_copy1);
-    START_CYC_COUNTER(pg_copy2);
-    copy_page_sse2(to, from);  /* hot cache */
-    END_CYC_COUNTER(pg_copy2);
-    flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
-    flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
-    START_CYC_COUNTER(pg_copy3);
-    memcpy(to, from, PAGE_SIZE);  /* cold cache */
-    END_CYC_COUNTER(pg_copy3);
-    START_CYC_COUNTER(pg_copy4);
-    memcpy(to, from, PAGE_SIZE); /* hot cache */
-    END_CYC_COUNTER(pg_copy4);
-#else
     DECL_LOCAL_CYC_COUNTER(pg_copy);
     START_CYC_COUNTER(pg_copy);
     memcpy(to, from, PAGE_SIZE);
     END_CYC_COUNTER(pg_copy);
-#endif
 }
 
 #if defined(CONFIG_ARM)
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 02/11] tmem: cleanup: drop typedef pfp_t
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
  2013-11-08  1:03 ` [PATCH v2 01/11] tmem: cleanup: drop COMPARE_COPY_PAGE_SSE2 Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 03/11] tmem: cleanup: drop typedef tmem_cli_mfn_t Bob Liu
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

Using 'struct page_info' directly instead of 'pfp_t' to make code more
straightforward and readable.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |   14 +++++++-------
 xen/common/tmem_xen.c      |   22 +++++++++++-----------
 xen/include/xen/tmem_xen.h |   17 ++++++++---------
 3 files changed, 26 insertions(+), 27 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 215722b..4a278f2 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -232,7 +232,7 @@ struct tmem_page_descriptor {
     bool_t eviction_attempted;  /* CHANGE TO lifetimes? (settable) */
     struct list_head pcd_siblings;
     union {
-        pfp_t *pfp;  /* page frame pointer */
+        struct page_info *pfp;  /* page frame pointer */
         char *cdata; /* compressed data */
         struct tmem_page_content_descriptor *pcd; /* page dedup */
     };
@@ -248,7 +248,7 @@ typedef struct tmem_page_descriptor pgp_t;
 
 struct tmem_page_content_descriptor {
     union {
-        pfp_t *pfp;  /* page frame pointer */
+        struct page_info *pfp;  /* page frame pointer */
         char *cdata; /* if compression_enabled */
         char *tze; /* if !compression_enabled, trailing zeroes eliminated */
     };
@@ -341,9 +341,9 @@ static NOINLINE void tmem_free(void *p, size_t size, pool_t *pool)
         tmh_free_subpage_thispool(pool,p,size);
 }
 
-static NOINLINE pfp_t *tmem_page_alloc(pool_t *pool)
+static NOINLINE struct page_info *tmem_page_alloc(pool_t *pool)
 {
-    pfp_t *pfp = NULL;
+    struct page_info *pfp = NULL;
 
     if ( pool != NULL && is_persistent(pool) )
         pfp = tmh_alloc_page_thispool(pool);
@@ -356,7 +356,7 @@ static NOINLINE pfp_t *tmem_page_alloc(pool_t *pool)
     return pfp;
 }
 
-static NOINLINE void tmem_page_free(pool_t *pool, pfp_t *pfp)
+static NOINLINE void tmem_page_free(pool_t *pool, struct page_info *pfp)
 {
     ASSERT(pfp);
     if ( pool == NULL || !is_persistent(pool) )
@@ -397,7 +397,7 @@ static NOINLINE int pcd_copy_to_client(tmem_cli_mfn_t cmfn, pgp_t *pgp)
 static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_rwlock)
 {
     pcd_t *pcd = pgp->pcd;
-    pfp_t *pfp = pgp->pcd->pfp;
+    struct page_info *pfp = pgp->pcd->pfp;
     uint16_t firstbyte = pgp->firstbyte;
     char *pcd_tze = pgp->pcd->tze;
     pagesize_t pcd_size = pcd->size;
@@ -2873,7 +2873,7 @@ EXPORT void tmem_freeze_all(unsigned char key)
 
 EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
 {
-    pfp_t *pfp;
+    struct page_info *pfp;
     unsigned long evicts_per_relinq = 0;
     int max_evictions = 10;
 
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 4725558..cbcdb1a 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -56,13 +56,13 @@ void tmh_copy_page(char *to, char*from)
 
 #if defined(CONFIG_ARM)
 static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
-                                 pfp_t **pcli_pfp, bool_t cli_write)
+                                 struct page_info **pcli_pfp, bool_t cli_write)
 {
     ASSERT(0);
     return NULL;
 }
 
-static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
                                 unsigned long cli_mfn, bool_t mark_dirty)
 {
     ASSERT(0);
@@ -71,7 +71,7 @@ static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
 #include <asm/p2m.h>
 
 static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
-                                 pfp_t **pcli_pfp, bool_t cli_write)
+                                 struct page_info **pcli_pfp, bool_t cli_write)
 {
     p2m_type_t t;
     struct page_info *page;
@@ -95,7 +95,7 @@ static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
     return map_domain_page(*pcli_mfn);
 }
 
-static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
                                 unsigned long cli_mfn, bool_t mark_dirty)
 {
     if ( mark_dirty )
@@ -109,13 +109,13 @@ static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
 }
 #endif
 
-EXPORT int tmh_copy_from_client(pfp_t *pfp,
+EXPORT int tmh_copy_from_client(struct page_info *pfp,
     tmem_cli_mfn_t cmfn, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
     unsigned long tmem_mfn, cli_mfn = 0;
     char *tmem_va, *cli_va = NULL;
-    pfp_t *cli_pfp = NULL;
+    struct page_info *cli_pfp = NULL;
     int rc = 1;
 
     if ( tmem_offset > PAGE_SIZE || pfn_offset > PAGE_SIZE || len > PAGE_SIZE )
@@ -165,7 +165,7 @@ EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
     unsigned char *dmem = this_cpu(dstmem);
     unsigned char *wmem = this_cpu(workmem);
     char *scratch = this_cpu(scratch_page);
-    pfp_t *cli_pfp = NULL;
+    struct page_info *cli_pfp = NULL;
     unsigned long cli_mfn = 0;
     void *cli_va = NULL;
 
@@ -190,13 +190,13 @@ EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
     return 1;
 }
 
-EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp,
+EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, struct page_info *pfp,
     pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len,
     tmem_cli_va_param_t clibuf)
 {
     unsigned long tmem_mfn, cli_mfn = 0;
     char *tmem_va, *cli_va = NULL;
-    pfp_t *cli_pfp = NULL;
+    struct page_info *cli_pfp = NULL;
     int rc = 1;
 
     if ( tmem_offset > PAGE_SIZE || pfn_offset > PAGE_SIZE || len > PAGE_SIZE )
@@ -233,7 +233,7 @@ EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
                                     size_t size, tmem_cli_va_param_t clibuf)
 {
     unsigned long cli_mfn = 0;
-    pfp_t *cli_pfp = NULL;
+    struct page_info *cli_pfp = NULL;
     void *cli_va = NULL;
     char *scratch = this_cpu(scratch_page);
     size_t out_len = PAGE_SIZE;
@@ -263,7 +263,7 @@ EXPORT int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
 {
     void *cli_va;
     unsigned long cli_mfn;
-    pfp_t *cli_pfp = NULL;
+    struct page_info *cli_pfp = NULL;
 
     ASSERT(!(len & (sizeof(uint64_t)-1)));
     ASSERT(len <= PAGE_SIZE);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index ad1ddd5..f05d0f5 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -277,7 +277,6 @@ static inline void tmh_free_infra(void *p)
 struct client;
 typedef domid_t cli_id_t;
 typedef struct domain tmh_cli_ptr_t;
-typedef struct page_info pfp_t;
 
 extern tmh_client_t *tmh_client_init(cli_id_t);
 extern void tmh_client_destroy(tmh_client_t *);
@@ -337,14 +336,14 @@ static inline bool_t tmh_current_is_privileged(void)
     return !xsm_tmem_control(XSM_PRIV);
 }
 
-static inline uint8_t tmh_get_first_byte(pfp_t *pfp)
+static inline uint8_t tmh_get_first_byte(struct page_info *pfp)
 {
     void *p = __map_domain_page(pfp);
 
     return (uint8_t)(*(char *)p);
 }
 
-static inline int tmh_page_cmp(pfp_t *pfp1, pfp_t *pfp2)
+static inline int tmh_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
 {
     const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
     const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp2);
@@ -382,14 +381,14 @@ static inline int tmh_pcd_cmp(void *va1, pagesize_t len1, void *va2, pagesize_t
     return 1;
 }
 
-static inline int tmh_tze_pfp_cmp(pfp_t *pfp1, pagesize_t pfp_len, void *tva, pagesize_t tze_len)
+static inline int tmh_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, void *tva, pagesize_t tze_len)
 {
     const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
     const uint64_t *p2;
     pagesize_t i;
 
     if ( tze_len == PAGE_SIZE )
-       p2 = (uint64_t *)__map_domain_page((pfp_t *)tva);
+       p2 = (uint64_t *)__map_domain_page((struct page_info *)tva);
     else
        p2 = (uint64_t *)tva;
     ASSERT(pfp_len <= PAGE_SIZE);
@@ -411,7 +410,7 @@ static inline int tmh_tze_pfp_cmp(pfp_t *pfp1, pagesize_t pfp_len, void *tva, pa
 
 /* return the size of the data in the pfp, ignoring trailing zeroes and
  * rounded up to the nearest multiple of 8 */
-static inline pagesize_t tmh_tze_pfp_scan(pfp_t *pfp)
+static inline pagesize_t tmh_tze_pfp_scan(struct page_info *pfp)
 {
     const uint64_t *p = (uint64_t *)__map_domain_page(pfp);
     pagesize_t bytecount = PAGE_SIZE;
@@ -422,7 +421,7 @@ static inline pagesize_t tmh_tze_pfp_scan(pfp_t *pfp)
     return bytecount;
 }
 
-static inline void tmh_tze_copy_from_pfp(void *tva, pfp_t *pfp, pagesize_t len)
+static inline void tmh_tze_copy_from_pfp(void *tva, struct page_info *pfp, pagesize_t len)
 {
     uint64_t *p1 = (uint64_t *)tva;
     const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp);
@@ -496,10 +495,10 @@ int tmh_decompress_to_client(tmem_cli_mfn_t, void *, size_t,
 int tmh_compress_from_client(tmem_cli_mfn_t, void **, size_t *,
 			     tmem_cli_va_param_t);
 
-int tmh_copy_from_client(pfp_t *, tmem_cli_mfn_t, pagesize_t tmem_offset,
+int tmh_copy_from_client(struct page_info *, tmem_cli_mfn_t, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-int tmh_copy_to_client(tmem_cli_mfn_t, pfp_t *, pagesize_t tmem_offset,
+int tmh_copy_to_client(tmem_cli_mfn_t, struct page_info *, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
 extern int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn, void *tmem_va, pagesize_t len);
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 03/11] tmem: cleanup: drop typedef tmem_cli_mfn_t
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
  2013-11-08  1:03 ` [PATCH v2 01/11] tmem: cleanup: drop COMPARE_COPY_PAGE_SSE2 Bob Liu
  2013-11-08  1:03 ` [PATCH v2 02/11] tmem: cleanup: drop typedef pfp_t Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 04/11] tmem: cleanup: rename 'tmh_' with 'tmem_' Bob Liu
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

Using 'xen_pfn_t' directly instead of 'tmem_cli_mfn_t' to make code more
readable.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |   10 +++++-----
 xen/common/tmem_xen.c      |   14 +++++++-------
 xen/include/public/tmem.h  |    3 +--
 xen/include/xen/tmem_xen.h |   10 +++++-----
 4 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 4a278f2..f3a0d91 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -370,7 +370,7 @@ static NOINLINE void tmem_page_free(pool_t *pool, struct page_info *pfp)
 
 #define NOT_SHAREABLE ((uint16_t)-1UL)
 
-static NOINLINE int pcd_copy_to_client(tmem_cli_mfn_t cmfn, pgp_t *pgp)
+static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, pgp_t *pgp)
 {
     uint8_t firstbyte = pgp->firstbyte;
     pcd_t *pcd;
@@ -1436,7 +1436,7 @@ static inline void tmem_ensure_avail_pages(void)
 
 /************ TMEM CORE OPERATIONS ************************************/
 
-static NOINLINE int do_tmem_put_compress(pgp_t *pgp, tmem_cli_mfn_t cmfn,
+static NOINLINE int do_tmem_put_compress(pgp_t *pgp, xen_pfn_t cmfn,
                                          tmem_cli_va_param_t clibuf)
 {
     void *dst, *p;
@@ -1479,7 +1479,7 @@ out:
     return ret;
 }
 
-static NOINLINE int do_tmem_dup_put(pgp_t *pgp, tmem_cli_mfn_t cmfn,
+static NOINLINE int do_tmem_dup_put(pgp_t *pgp, xen_pfn_t cmfn,
        pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len,
        tmem_cli_va_param_t clibuf)
 {
@@ -1571,7 +1571,7 @@ cleanup:
 
 static NOINLINE int do_tmem_put(pool_t *pool,
               OID *oidp, uint32_t index,
-              tmem_cli_mfn_t cmfn, pagesize_t tmem_offset,
+              xen_pfn_t cmfn, pagesize_t tmem_offset,
               pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
     obj_t *obj = NULL, *objfound = NULL, *objnew = NULL;
@@ -1714,7 +1714,7 @@ free:
 }
 
 static NOINLINE int do_tmem_get(pool_t *pool, OID *oidp, uint32_t index,
-              tmem_cli_mfn_t cmfn, pagesize_t tmem_offset,
+              xen_pfn_t cmfn, pagesize_t tmem_offset,
               pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
     obj_t *obj;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index cbcdb1a..e1e83d2 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -55,7 +55,7 @@ void tmh_copy_page(char *to, char*from)
 }
 
 #if defined(CONFIG_ARM)
-static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
+static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
                                  struct page_info **pcli_pfp, bool_t cli_write)
 {
     ASSERT(0);
@@ -70,7 +70,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
 #else
 #include <asm/p2m.h>
 
-static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
+static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
                                  struct page_info **pcli_pfp, bool_t cli_write)
 {
     p2m_type_t t;
@@ -110,7 +110,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
 #endif
 
 EXPORT int tmh_copy_from_client(struct page_info *pfp,
-    tmem_cli_mfn_t cmfn, pagesize_t tmem_offset,
+    xen_pfn_t cmfn, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
     unsigned long tmem_mfn, cli_mfn = 0;
@@ -158,7 +158,7 @@ EXPORT int tmh_copy_from_client(struct page_info *pfp,
     return rc;
 }
 
-EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
+EXPORT int tmh_compress_from_client(xen_pfn_t cmfn,
     void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf)
 {
     int ret = 0;
@@ -190,7 +190,7 @@ EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
     return 1;
 }
 
-EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, struct page_info *pfp,
+EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
     pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len,
     tmem_cli_va_param_t clibuf)
 {
@@ -229,7 +229,7 @@ EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, struct page_info *pfp,
     return rc;
 }
 
-EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
+EXPORT int tmh_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
                                     size_t size, tmem_cli_va_param_t clibuf)
 {
     unsigned long cli_mfn = 0;
@@ -258,7 +258,7 @@ EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
     return 1;
 }
 
-EXPORT int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
+EXPORT int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
                                     pagesize_t len)
 {
     void *cli_va;
diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h
index bf53798..dd685ee 100644
--- a/xen/include/public/tmem.h
+++ b/xen/include/public/tmem.h
@@ -95,7 +95,6 @@
 
 
 #ifndef __ASSEMBLY__
-typedef xen_pfn_t tmem_cli_mfn_t;
 typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t;
 struct tmem_op {
     uint32_t cmd;
@@ -121,7 +120,7 @@ struct tmem_op {
             uint32_t tmem_offset;
             uint32_t pfn_offset;
             uint32_t len;
-            tmem_cli_mfn_t cmfn; /* client machine page frame */
+            xen_pfn_t cmfn; /* client machine page frame */
         } gen; /* for all other cmd ("generic") */
     } u;
 };
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index f05d0f5..b24246c 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -489,19 +489,19 @@ static inline void tmh_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
 #define tmh_cli_id_str "domid"
 #define tmh_client_str "domain"
 
-int tmh_decompress_to_client(tmem_cli_mfn_t, void *, size_t,
+int tmh_decompress_to_client(xen_pfn_t, void *, size_t,
 			     tmem_cli_va_param_t);
 
-int tmh_compress_from_client(tmem_cli_mfn_t, void **, size_t *,
+int tmh_compress_from_client(xen_pfn_t, void **, size_t *,
 			     tmem_cli_va_param_t);
 
-int tmh_copy_from_client(struct page_info *, tmem_cli_mfn_t, pagesize_t tmem_offset,
+int tmh_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-int tmh_copy_to_client(tmem_cli_mfn_t, struct page_info *, pagesize_t tmem_offset,
+int tmh_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-extern int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn, void *tmem_va, pagesize_t len);
+extern int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len);
 
 #define tmh_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
 #define tmh_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 04/11] tmem: cleanup: rename 'tmh_' with 'tmem_'
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (2 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 03/11] tmem: cleanup: drop typedef tmem_cli_mfn_t Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 05/11] tmem: cleanup: drop most of the typedefs Bob Liu
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

tmem was designed can be ported to other platform besides xen easily, but I
don't think anybody will port tmem to other platform. And this flexible
character made tmem not easy for understand, there are too many 'tmh_' and
'tmem_' functions and variables.

This patch replace all 'tmh_' functions/variables with 'tmem_' to make code
more readable.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |  416 ++++++++++++++++++++++----------------------
 xen/common/tmem_xen.c      |  102 +++++------
 xen/include/xen/tmem_xen.h |  210 +++++++++++-----------
 3 files changed, 356 insertions(+), 372 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index f3a0d91..7d22e0c 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -29,12 +29,6 @@
 
 #define TMEM_SPEC_VERSION 1
 
-/************  INTERFACE TO TMEM HOST-DEPENDENT (tmh) CODE ************/
-
-#define CLI_ID_NULL TMH_CLI_ID_NULL
-#define cli_id_str  tmh_cli_id_str
-#define client_str  tmh_client_str
-
 /************ DEBUG and STATISTICS (+ some compression testing) *******/
 
 #ifndef NDEBUG
@@ -110,7 +104,7 @@ struct tmem_page_content_descriptor;
 struct client {
     struct list_head client_list;
     struct tm_pool *pools[MAX_POOLS_PER_DOMAIN];
-    tmh_client_t *tmh;
+    tmem_client_t *tmem;
     struct list_head ephemeral_page_list;
     long eph_count, eph_count_max;
     cli_id_t cli_id;
@@ -275,22 +269,22 @@ static int tmem_initialized = 0;
 
 /************ CONCURRENCY  ***********************************************/
 
-EXPORT DEFINE_SPINLOCK(tmem_spinlock);  /* used iff tmh_lock_all */
-EXPORT DEFINE_RWLOCK(tmem_rwlock);      /* used iff !tmh_lock_all */
+EXPORT DEFINE_SPINLOCK(tmem_spinlock);  /* used iff tmem_lock_all */
+EXPORT DEFINE_RWLOCK(tmem_rwlock);      /* used iff !tmem_lock_all */
 static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
 static DEFINE_SPINLOCK(pers_lists_spinlock);
 
-#define tmem_spin_lock(_l)  do {if (!tmh_lock_all) spin_lock(_l);}while(0)
-#define tmem_spin_unlock(_l)  do {if (!tmh_lock_all) spin_unlock(_l);}while(0)
-#define tmem_read_lock(_l)  do {if (!tmh_lock_all) read_lock(_l);}while(0)
-#define tmem_read_unlock(_l)  do {if (!tmh_lock_all) read_unlock(_l);}while(0)
-#define tmem_write_lock(_l)  do {if (!tmh_lock_all) write_lock(_l);}while(0)
-#define tmem_write_unlock(_l)  do {if (!tmh_lock_all) write_unlock(_l);}while(0)
-#define tmem_write_trylock(_l)  ((tmh_lock_all)?1:write_trylock(_l))
-#define tmem_spin_trylock(_l)  (tmh_lock_all?1:spin_trylock(_l))
+#define tmem_spin_lock(_l)  do {if (!tmem_lock_all) spin_lock(_l);}while(0)
+#define tmem_spin_unlock(_l)  do {if (!tmem_lock_all) spin_unlock(_l);}while(0)
+#define tmem_read_lock(_l)  do {if (!tmem_lock_all) read_lock(_l);}while(0)
+#define tmem_read_unlock(_l)  do {if (!tmem_lock_all) read_unlock(_l);}while(0)
+#define tmem_write_lock(_l)  do {if (!tmem_lock_all) write_lock(_l);}while(0)
+#define tmem_write_unlock(_l)  do {if (!tmem_lock_all) write_unlock(_l);}while(0)
+#define tmem_write_trylock(_l)  ((tmem_lock_all)?1:write_trylock(_l))
+#define tmem_spin_trylock(_l)  (tmem_lock_all?1:spin_trylock(_l))
 
-#define ASSERT_SPINLOCK(_l) ASSERT(tmh_lock_all || spin_is_locked(_l))
-#define ASSERT_WRITELOCK(_l) ASSERT(tmh_lock_all || rw_is_write_locked(_l))
+#define ASSERT_SPINLOCK(_l) ASSERT(tmem_lock_all || spin_is_locked(_l))
+#define ASSERT_WRITELOCK(_l) ASSERT(tmem_lock_all || rw_is_write_locked(_l))
 
 /* global counters (should use long_atomic_t access) */
 static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
@@ -325,9 +319,9 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t align, pool_t *pool)
     void *v;
 
     if ( (pool != NULL) && is_persistent(pool) )
-        v = tmh_alloc_subpage_thispool(pool,size,align);
+        v = tmem_alloc_subpage_thispool(pool,size,align);
     else
-        v = tmh_alloc_subpage(pool, size, align);
+        v = tmem_alloc_subpage(pool, size, align);
     if ( v == NULL )
         alloc_failed++;
     return v;
@@ -336,9 +330,9 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t align, pool_t *pool)
 static NOINLINE void tmem_free(void *p, size_t size, pool_t *pool)
 {
     if ( pool == NULL || !is_persistent(pool) )
-        tmh_free_subpage(p,size);
+        tmem_free_subpage(p,size);
     else
-        tmh_free_subpage_thispool(pool,p,size);
+        tmem_free_subpage_thispool(pool,p,size);
 }
 
 static NOINLINE struct page_info *tmem_page_alloc(pool_t *pool)
@@ -346,9 +340,9 @@ static NOINLINE struct page_info *tmem_page_alloc(pool_t *pool)
     struct page_info *pfp = NULL;
 
     if ( pool != NULL && is_persistent(pool) )
-        pfp = tmh_alloc_page_thispool(pool);
+        pfp = tmem_alloc_page_thispool(pool);
     else
-        pfp = tmh_alloc_page(pool,0);
+        pfp = tmem_alloc_page(pool,0);
     if ( pfp == NULL )
         alloc_page_failed++;
     else
@@ -360,9 +354,9 @@ static NOINLINE void tmem_page_free(pool_t *pool, struct page_info *pfp)
 {
     ASSERT(pfp);
     if ( pool == NULL || !is_persistent(pool) )
-        tmh_free_page(pfp);
+        tmem_free_page(pfp);
     else
-        tmh_free_page_thispool(pool,pfp);
+        tmem_free_page_thispool(pool,pfp);
     atomic_dec_and_assert(global_page_count);
 }
 
@@ -376,18 +370,18 @@ static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, pgp_t *pgp)
     pcd_t *pcd;
     int ret;
 
-    ASSERT(tmh_dedup_enabled());
+    ASSERT(tmem_dedup_enabled());
     tmem_read_lock(&pcd_tree_rwlocks[firstbyte]);
     pcd = pgp->pcd;
     if ( pgp->size < PAGE_SIZE && pgp->size != 0 &&
          pcd->size < PAGE_SIZE && pcd->size != 0 )
-        ret = tmh_decompress_to_client(cmfn, pcd->cdata, pcd->size,
-                                       tmh_cli_buf_null);
-    else if ( tmh_tze_enabled() && pcd->size < PAGE_SIZE )
-        ret = tmh_copy_tze_to_client(cmfn, pcd->tze, pcd->size);
+        ret = tmem_decompress_to_client(cmfn, pcd->cdata, pcd->size,
+                                       tmem_cli_buf_null);
+    else if ( tmem_tze_enabled() && pcd->size < PAGE_SIZE )
+        ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size);
     else
-        ret = tmh_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE,
-                                 tmh_cli_buf_null);
+        ret = tmem_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE,
+                                 tmem_cli_buf_null);
     tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]);
     return ret;
 }
@@ -405,7 +399,7 @@ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_
     char *pcd_cdata = pgp->pcd->cdata;
     pagesize_t pcd_csize = pgp->pcd->size;
 
-    ASSERT(tmh_dedup_enabled());
+    ASSERT(tmem_dedup_enabled());
     ASSERT(firstbyte != NOT_SHAREABLE);
     ASSERT(firstbyte < 256);
 
@@ -447,9 +441,9 @@ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_
             tmem_free(pcd_tze,pcd_size,pool);
     } else {
         /* real physical page */
-        if ( tmh_tze_enabled() )
+        if ( tmem_tze_enabled() )
             pcd_tot_tze_size -= PAGE_SIZE;
-        if ( tmh_compression_enabled() )
+        if ( tmem_compression_enabled() )
             pcd_tot_csize -= PAGE_SIZE;
         tmem_page_free(pool,pfp);
     }
@@ -464,10 +458,10 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
     pcd_t *pcd;
     int cmp;
     pagesize_t pfp_size = 0;
-    uint8_t firstbyte = (cdata == NULL) ? tmh_get_first_byte(pgp->pfp) : *cdata;
+    uint8_t firstbyte = (cdata == NULL) ? tmem_get_first_byte(pgp->pfp) : *cdata;
     int ret = 0;
 
-    if ( !tmh_dedup_enabled() )
+    if ( !tmem_dedup_enabled() )
         return 0;
     ASSERT(pgp->us.obj != NULL);
     ASSERT(pgp->us.obj->pool != NULL);
@@ -476,9 +470,9 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
     {
         ASSERT(pgp->pfp != NULL);
         pfp_size = PAGE_SIZE;
-        if ( tmh_tze_enabled() )
+        if ( tmem_tze_enabled() )
         {
-            pfp_size = tmh_tze_pfp_scan(pgp->pfp);
+            pfp_size = tmem_tze_pfp_scan(pgp->pfp);
             if ( pfp_size > PCD_TZE_MAX_SIZE )
                 pfp_size = PAGE_SIZE;
         }
@@ -499,25 +493,25 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
         {
             if ( pcd->size < PAGE_SIZE )
                 /* both new entry and rbtree entry are compressed */
-                cmp = tmh_pcd_cmp(cdata,csize,pcd->cdata,pcd->size);
+                cmp = tmem_pcd_cmp(cdata,csize,pcd->cdata,pcd->size);
             else
                 /* new entry is compressed, rbtree entry is not */
                 cmp = -1;
         } else if ( pcd->size < PAGE_SIZE )
             /* rbtree entry is compressed, rbtree entry is not */
             cmp = 1;
-        else if ( tmh_tze_enabled() ) {
+        else if ( tmem_tze_enabled() ) {
             if ( pcd->size < PAGE_SIZE )
                 /* both new entry and rbtree entry are trailing zero */
-                cmp = tmh_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->tze,pcd->size);
+                cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->tze,pcd->size);
             else
                 /* new entry is trailing zero, rbtree entry is not */
-                cmp = tmh_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->pfp,PAGE_SIZE);
+                cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->pfp,PAGE_SIZE);
         } else  {
             /* both new entry and rbtree entry are full physical pages */
             ASSERT(pgp->pfp != NULL);
             ASSERT(pcd->pfp != NULL);
-            cmp = tmh_page_cmp(pgp->pfp,pcd->pfp);
+            cmp = tmem_page_cmp(pgp->pfp,pcd->pfp);
         }
 
         /* walk tree or match depending on cmp */
@@ -559,21 +553,21 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
         pcd->size = csize;
         pcd_tot_csize += csize;
     } else if ( pfp_size == 0 ) {
-        ASSERT(tmh_tze_enabled());
+        ASSERT(tmem_tze_enabled());
         pcd->size = 0;
         pcd->tze = NULL;
     } else if ( pfp_size < PAGE_SIZE &&
          ((pcd->tze = tmem_malloc_bytes(pfp_size,pgp->us.obj->pool)) != NULL) ) {
-        tmh_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
+        tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
         pcd->size = pfp_size;
         pcd_tot_tze_size += pfp_size;
         tmem_page_free(pgp->us.obj->pool,pgp->pfp);
     } else {
         pcd->pfp = pgp->pfp;
         pcd->size = PAGE_SIZE;
-        if ( tmh_tze_enabled() )
+        if ( tmem_tze_enabled() )
             pcd_tot_tze_size += PAGE_SIZE;
-        if ( tmh_compression_enabled() )
+        if ( tmem_compression_enabled() )
             pcd_tot_csize += PAGE_SIZE;
     }
     rb_link_node(&pcd->pcd_rb_tree_node, parent, new);
@@ -608,7 +602,7 @@ static NOINLINE pgp_t *pgp_alloc(obj_t *obj)
     INIT_LIST_HEAD(&pgp->global_eph_pages);
     INIT_LIST_HEAD(&pgp->us.client_eph_pages);
     pgp->pfp = NULL;
-    if ( tmh_dedup_enabled() )
+    if ( tmem_dedup_enabled() )
     {
         pgp->firstbyte = NOT_SHAREABLE;
         pgp->eviction_attempted = 0;
@@ -639,7 +633,7 @@ static NOINLINE void pgp_free_data(pgp_t *pgp, pool_t *pool)
 
     if ( pgp->pfp == NULL )
         return;
-    if ( tmh_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
+    if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
         pcd_disassociate(pgp,pool,0); /* pgp->size lost */
     else if ( pgp_size )
         tmem_free(pgp->cdata,pgp_size,pool);
@@ -876,7 +870,7 @@ void oid_set_invalid(OID *oidp)
 
 unsigned oid_hash(OID *oidp)
 {
-    return (tmh_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
+    return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
 }
 
@@ -895,7 +889,7 @@ restart_find:
         switch ( oid_compare(&obj->oid, oidp) )
         {
             case 0: /* equal */
-                if ( tmh_lock_all )
+                if ( tmem_lock_all )
                     obj->no_evict = 1;
                 else
                 {
@@ -942,7 +936,7 @@ static NOINLINE void obj_free(obj_t *obj, int no_rebalance)
     obj->pool = NULL;
     old_oid = obj->oid;
     oid_set_invalid(&obj->oid);
-    obj->last_client = CLI_ID_NULL;
+    obj->last_client = TMEM_CLI_ID_NULL;
     atomic_dec_and_assert(global_obj_count);
     /* use no_rebalance only if all objects are being destroyed anyway */
     if ( !no_rebalance )
@@ -1001,7 +995,7 @@ static NOINLINE obj_t * obj_new(pool_t *pool, OID *oidp)
     obj->oid = *oidp;
     obj->objnode_count = 0;
     obj->pgp_count = 0;
-    obj->last_client = CLI_ID_NULL;
+    obj->last_client = TMEM_CLI_ID_NULL;
     SET_SENTINEL(obj,OBJ);
     tmem_spin_lock(&obj->obj_spinlock);
     obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj);
@@ -1056,7 +1050,7 @@ static pool_t * pool_alloc(void)
     pool_t *pool;
     int i;
 
-    if ( (pool = tmh_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL )
+    if ( (pool = tmem_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL )
         return NULL;
     for (i = 0; i < OBJ_HASH_BUCKETS; i++)
         pool->obj_rb_root[i] = RB_ROOT;
@@ -1085,7 +1079,7 @@ static NOINLINE void pool_free(pool_t *pool)
     INVERT_SENTINEL(pool,POOL);
     pool->client = NULL;
     list_del(&pool->pool_list);
-    tmh_free_infra(pool);
+    tmem_free_infra(pool);
 }
 
 /* register new_client as a user of this shared pool and return new
@@ -1100,8 +1094,8 @@ static int shared_pool_join(pool_t *pool, client_t *new_client)
     sl->client = new_client;
     list_add_tail(&sl->share_list, &pool->share_list);
     if ( new_client->cli_id != pool->client->cli_id )
-        tmh_client_info("adding new %s %d to shared pool owned by %s %d\n",
-            client_str, new_client->cli_id, client_str, pool->client->cli_id);
+        tmem_client_info("adding new %s %d to shared pool owned by %s %d\n",
+            tmem_client_str, new_client->cli_id, tmem_client_str, pool->client->cli_id);
     return ++pool->shared_count;
 }
 
@@ -1130,8 +1124,8 @@ static NOINLINE void shared_pool_reassign(pool_t *pool)
     old_client->eph_count -= _atomic_read(pool->pgp_count);
     list_splice_init(&old_client->ephemeral_page_list,
                      &new_client->ephemeral_page_list);
-    tmh_client_info("reassigned shared pool from %s=%d to %s=%d pool_id=%d\n",
-        cli_id_str, old_client->cli_id, cli_id_str, new_client->cli_id, poolid);
+    tmem_client_info("reassigned shared pool from %s=%d to %s=%d pool_id=%d\n",
+        tmem_cli_id_str, old_client->cli_id, tmem_cli_id_str, new_client->cli_id, poolid);
     pool->pool_id = poolid;
 }
 
@@ -1166,8 +1160,8 @@ static NOINLINE int shared_pool_quit(pool_t *pool, cli_id_t cli_id)
             }
         return 0;
     }
-    tmh_client_warn("tmem: no match unsharing pool, %s=%d\n",
-        cli_id_str,pool->client->cli_id);
+    tmem_client_warn("tmem: no match unsharing pool, %s=%d\n",
+        tmem_cli_id_str,pool->client->cli_id);
     return -1;
 }
 
@@ -1177,22 +1171,22 @@ static void pool_flush(pool_t *pool, cli_id_t cli_id, bool_t destroy)
     ASSERT(pool != NULL);
     if ( (is_shared(pool)) && (shared_pool_quit(pool,cli_id) > 0) )
     {
-        tmh_client_warn("tmem: %s=%d no longer using shared pool %d owned by %s=%d\n",
-           cli_id_str, cli_id, pool->pool_id, cli_id_str,pool->client->cli_id);
+        tmem_client_warn("tmem: %s=%d no longer using shared pool %d owned by %s=%d\n",
+           tmem_cli_id_str, cli_id, pool->pool_id, tmem_cli_id_str,pool->client->cli_id);
         return;
     }
-    tmh_client_info("%s %s-%s tmem pool %s=%d pool_id=%d\n",
+    tmem_client_info("%s %s-%s tmem pool %s=%d pool_id=%d\n",
                     destroy ? "destroying" : "flushing",
                     is_persistent(pool) ? "persistent" : "ephemeral" ,
                     is_shared(pool) ? "shared" : "private",
-                    cli_id_str, pool->client->cli_id, pool->pool_id);
+                    tmem_cli_id_str, pool->client->cli_id, pool->pool_id);
     if ( pool->client->live_migrating )
     {
-        tmh_client_warn("can't %s pool while %s is live-migrating\n",
-               destroy?"destroy":"flush", client_str);
+        tmem_client_warn("can't %s pool while %s is live-migrating\n",
+               destroy?"destroy":"flush", tmem_client_str);
         return;
     }
-    pool_destroy_objs(pool,0,CLI_ID_NULL);
+    pool_destroy_objs(pool,0,TMEM_CLI_ID_NULL);
     if ( destroy )
     {
         pool->client->pools[pool->pool_id] = NULL;
@@ -1204,30 +1198,30 @@ static void pool_flush(pool_t *pool, cli_id_t cli_id, bool_t destroy)
 
 static client_t *client_create(cli_id_t cli_id)
 {
-    client_t *client = tmh_alloc_infra(sizeof(client_t),__alignof__(client_t));
+    client_t *client = tmem_alloc_infra(sizeof(client_t),__alignof__(client_t));
     int i;
 
-    tmh_client_info("tmem: initializing tmem capability for %s=%d...",
-                    cli_id_str, cli_id);
+    tmem_client_info("tmem: initializing tmem capability for %s=%d...",
+                    tmem_cli_id_str, cli_id);
     if ( client == NULL )
     {
-        tmh_client_err("failed... out of memory\n");
+        tmem_client_err("failed... out of memory\n");
         goto fail;
     }
     memset(client,0,sizeof(client_t));
-    if ( (client->tmh = tmh_client_init(cli_id)) == NULL )
+    if ( (client->tmem = tmem_client_init(cli_id)) == NULL )
     {
-        tmh_client_err("failed... can't allocate host-dependent part of client\n");
+        tmem_client_err("failed... can't allocate host-dependent part of client\n");
         goto fail;
     }
-    if ( !tmh_set_client_from_id(client, client->tmh, cli_id) )
+    if ( !tmem_set_client_from_id(client, client->tmem, cli_id) )
     {
-        tmh_client_err("failed... can't set client\n");
+        tmem_client_err("failed... can't set client\n");
         goto fail;
     }
     client->cli_id = cli_id;
-    client->compress = tmh_compression_enabled();
-    client->shared_auth_required = tmh_shared_auth();
+    client->compress = tmem_compression_enabled();
+    client->shared_auth_required = tmem_shared_auth();
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
         client->shared_auth_uuid[i][0] =
             client->shared_auth_uuid[i][1] = -1L;
@@ -1240,19 +1234,19 @@ static client_t *client_create(cli_id_t cli_id)
     client->eph_count = client->eph_count_max = 0;
     client->total_cycles = 0; client->succ_pers_puts = 0;
     client->succ_eph_gets = 0; client->succ_pers_gets = 0;
-    tmh_client_info("ok\n");
+    tmem_client_info("ok\n");
     return client;
 
  fail:
-    tmh_free_infra(client);
+    tmem_free_infra(client);
     return NULL;
 }
 
 static void client_free(client_t *client)
 {
     list_del(&client->client_list);
-    tmh_client_destroy(client->tmh);
-    tmh_free_infra(client);
+    tmem_client_destroy(client->tmem);
+    tmem_free_infra(client);
 }
 
 /* flush all data from a client and, optionally, free it */
@@ -1301,11 +1295,11 @@ static bool_t tmem_try_to_evict_pgp(pgp_t *pgp, bool_t *hold_pool_rwlock)
 
     if ( pool->is_dying )
         return 0;
-    if ( tmh_lock_all && !obj->no_evict )
+    if ( tmem_lock_all && !obj->no_evict )
        return 1;
     if ( tmem_spin_trylock(&obj->obj_spinlock) )
     {
-        if ( tmh_dedup_enabled() )
+        if ( tmem_dedup_enabled() )
         {
             firstbyte = pgp->firstbyte;
             if ( firstbyte ==  NOT_SHAREABLE )
@@ -1340,7 +1334,7 @@ obj_unlock:
 
 static int tmem_evict(void)
 {
-    client_t *client = tmh_client_from_current();
+    client_t *client = tmem_client_from_current();
     pgp_t *pgp = NULL, *pgp2, *pgp_del;
     obj_t *obj;
     pool_t *pool;
@@ -1379,7 +1373,7 @@ found:
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     pgp_del = pgp_delete_from_obj(obj, pgp->index);
     ASSERT(pgp_del == pgp);
-    if ( tmh_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
+    if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
     {
         ASSERT(pgp->pcd->pgp_ref_count == 1 || pgp->eviction_attempted);
         pcd_disassociate(pgp,pool,1);
@@ -1406,13 +1400,13 @@ static unsigned long tmem_relinquish_npages(unsigned long n)
 {
     unsigned long avail_pages = 0;
 
-    while ( (avail_pages = tmh_avail_pages()) < n )
+    while ( (avail_pages = tmem_page_list_pages) < n )
     {
         if (  !tmem_evict() )
             break;
     }
     if ( avail_pages )
-        tmh_release_avail_pages_to_host();
+        tmem_release_avail_pages_to_host();
     return avail_pages;
 }
 
@@ -1425,7 +1419,7 @@ static inline void tmem_ensure_avail_pages(void)
 {
     int failed_evict = 10;
 
-    while ( !tmh_free_mb() )
+    while ( !tmem_free_mb() )
     {
         if ( tmem_evict() )
             continue;
@@ -1453,13 +1447,13 @@ static NOINLINE int do_tmem_put_compress(pgp_t *pgp, xen_pfn_t cmfn,
     if ( pgp->pfp != NULL )
         pgp_free_data(pgp, pgp->us.obj->pool);
     START_CYC_COUNTER(compress);
-    ret = tmh_compress_from_client(cmfn, &dst, &size, clibuf);
+    ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf);
     if ( ret <= 0 )
         goto out;
     else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
         ret = 0;
         goto out;
-    } else if ( tmh_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
+    } else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
         if ( (ret = pcd_associate(pgp,dst,size)) == -ENOMEM )
             goto out;
     } else if ( (p = tmem_malloc_bytes(size,pgp->us.obj->pool)) == NULL ) {
@@ -1520,12 +1514,12 @@ copy_uncompressed:
     if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
         goto failed_dup;
     pgp->size = 0;
-    /* tmh_copy_from_client properly handles len==0 and offsets != 0 */
-    ret = tmh_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
-                               tmh_cli_buf_null);
+    /* tmem_copy_from_client properly handles len==0 and offsets != 0 */
+    ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
+                               tmem_cli_buf_null);
     if ( ret < 0 )
         goto bad_copy;
-    if ( tmh_dedup_enabled() && !is_persistent(pool) )
+    if ( tmem_dedup_enabled() && !is_persistent(pool) )
     {
         if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
             goto failed_dup;
@@ -1645,12 +1639,12 @@ copy_uncompressed:
         ret = -ENOMEM;
         goto delete_and_free;
     }
-    /* tmh_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */
-    ret = tmh_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
+    /* tmem_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */
+    ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len,
                                clibuf);
     if ( ret < 0 )
         goto bad_copy;
-    if ( tmh_dedup_enabled() && !is_persistent(pool) )
+    if ( tmem_dedup_enabled() && !is_persistent(pool) )
     {
         if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
             goto delete_and_free;
@@ -1743,18 +1737,18 @@ static NOINLINE int do_tmem_get(pool_t *pool, OID *oidp, uint32_t index,
         return 0;
     }
     ASSERT(pgp->size != -1);
-    if ( tmh_dedup_enabled() && !is_persistent(pool) &&
+    if ( tmem_dedup_enabled() && !is_persistent(pool) &&
               pgp->firstbyte != NOT_SHAREABLE )
         rc = pcd_copy_to_client(cmfn, pgp);
     else if ( pgp->size != 0 )
     {
         START_CYC_COUNTER(decompress);
-        rc = tmh_decompress_to_client(cmfn, pgp->cdata,
+        rc = tmem_decompress_to_client(cmfn, pgp->cdata,
                                       pgp->size, clibuf);
         END_CYC_COUNTER(decompress);
     }
     else
-        rc = tmh_copy_to_client(cmfn, pgp->pfp, tmem_offset,
+        rc = tmem_copy_to_client(cmfn, pgp->pfp, tmem_offset,
                                 pfn_offset, len, clibuf);
     if ( rc <= 0 )
         goto bad_copy;
@@ -1778,7 +1772,7 @@ static NOINLINE int do_tmem_get(pool_t *pool, OID *oidp, uint32_t index,
             list_del(&pgp->us.client_eph_pages);
             list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list);
             tmem_spin_unlock(&eph_lists_spinlock);
-            obj->last_client = tmh_get_cli_id_from_current();
+            obj->last_client = tmem_get_cli_id_from_current();
         }
     }
     if ( obj != NULL )
@@ -1857,7 +1851,7 @@ out:
 
 static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id)
 {
-    client_t *client = tmh_client_from_current();
+    client_t *client = tmem_client_from_current();
     pool_t *pool;
 
     if ( client->pools == NULL )
@@ -1887,57 +1881,57 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
     int s_poolid, first_unused_s_poolid;
     int i;
 
-    if ( this_cli_id == CLI_ID_NULL )
-        cli_id = tmh_get_cli_id_from_current();
+    if ( this_cli_id == TMEM_CLI_ID_NULL )
+        cli_id = tmem_get_cli_id_from_current();
     else
         cli_id = this_cli_id;
-    tmh_client_info("tmem: allocating %s-%s tmem pool for %s=%d...",
+    tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...",
         persistent ? "persistent" : "ephemeral" ,
-        shared ? "shared" : "private", cli_id_str, cli_id);
+        shared ? "shared" : "private", tmem_cli_id_str, cli_id);
     if ( specversion != TMEM_SPEC_VERSION )
     {
-        tmh_client_err("failed... unsupported spec version\n");
+        tmem_client_err("failed... unsupported spec version\n");
         return -EPERM;
     }
     if ( pagebits != (PAGE_SHIFT - 12) )
     {
-        tmh_client_err("failed... unsupported pagesize %d\n",
+        tmem_client_err("failed... unsupported pagesize %d\n",
                        1 << (pagebits + 12));
         return -EPERM;
     }
     if ( flags & TMEM_POOL_PRECOMPRESSED )
     {
-        tmh_client_err("failed... precompression flag set but unsupported\n");
+        tmem_client_err("failed... precompression flag set but unsupported\n");
         return -EPERM;
     }
     if ( flags & TMEM_POOL_RESERVED_BITS )
     {
-        tmh_client_err("failed... reserved bits must be zero\n");
+        tmem_client_err("failed... reserved bits must be zero\n");
         return -EPERM;
     }
     if ( (pool = pool_alloc()) == NULL )
     {
-        tmh_client_err("failed... out of memory\n");
+        tmem_client_err("failed... out of memory\n");
         return -ENOMEM;
     }
-    if ( this_cli_id != CLI_ID_NULL )
+    if ( this_cli_id != TMEM_CLI_ID_NULL )
     {
-        if ( (client = tmh_client_from_cli_id(this_cli_id)) == NULL
+        if ( (client = tmem_client_from_cli_id(this_cli_id)) == NULL
              || d_poolid >= MAX_POOLS_PER_DOMAIN
              || client->pools[d_poolid] != NULL )
             goto fail;
     }
     else
     {
-        client = tmh_client_from_current();
+        client = tmem_client_from_current();
         ASSERT(client != NULL);
         for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ )
             if ( client->pools[d_poolid] == NULL )
                 break;
         if ( d_poolid >= MAX_POOLS_PER_DOMAIN )
         {
-            tmh_client_err("failed... no more pool slots available for this %s\n",
-                   client_str);
+            tmem_client_err("failed... no more pool slots available for this %s\n",
+                   tmem_client_str);
             goto fail;
         }
     }
@@ -1966,7 +1960,7 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
             {
                 if ( shpool->uuid[0] == uuid_lo && shpool->uuid[1] == uuid_hi )
                 {
-                    tmh_client_info("(matches shared pool uuid=%"PRIx64".%"PRIx64") pool_id=%d\n",
+                    tmem_client_info("(matches shared pool uuid=%"PRIx64".%"PRIx64") pool_id=%d\n",
                         uuid_hi, uuid_lo, d_poolid);
                     client->pools[d_poolid] = global_shared_pools[s_poolid];
                     shared_pool_join(global_shared_pools[s_poolid], client);
@@ -1979,7 +1973,7 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
         }
         if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS )
         {
-            tmh_client_warn("tmem: failed... no global shared pool slots available\n");
+            tmem_client_warn("tmem: failed... no global shared pool slots available\n");
             goto fail;
         }
         else
@@ -1995,7 +1989,7 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
     pool->pool_id = d_poolid;
     pool->persistent = persistent;
     pool->uuid[0] = uuid_lo; pool->uuid[1] = uuid_hi;
-    tmh_client_info("pool_id=%d\n", d_poolid);
+    tmem_client_info("pool_id=%d\n", d_poolid);
     return d_poolid;
 
 fail:
@@ -2014,19 +2008,19 @@ static int tmemc_freeze_pools(cli_id_t cli_id, int arg)
     char *s;
 
     s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" );
-    if ( cli_id == CLI_ID_NULL )
+    if ( cli_id == TMEM_CLI_ID_NULL )
     {
         list_for_each_entry(client,&global_client_list,client_list)
             client_freeze(client,freeze);
-        tmh_client_info("tmem: all pools %s for all %ss\n", s, client_str);
+        tmem_client_info("tmem: all pools %s for all %ss\n", s, tmem_client_str);
     }
     else
     {
-        if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
+        if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
             return -1;
         client_freeze(client,freeze);
-        tmh_client_info("tmem: all pools %s for %s=%d\n",
-                         s, cli_id_str, cli_id);
+        tmem_client_info("tmem: all pools %s for %s=%d\n",
+                         s, tmem_cli_id_str, cli_id);
     }
     return 0;
 }
@@ -2035,10 +2029,10 @@ static int tmemc_flush_mem(cli_id_t cli_id, uint32_t kb)
 {
     uint32_t npages, flushed_pages, flushed_kb;
 
-    if ( cli_id != CLI_ID_NULL )
+    if ( cli_id != TMEM_CLI_ID_NULL )
     {
-        tmh_client_warn("tmem: %s-specific flush not supported yet, use --all\n",
-           client_str);
+        tmem_client_warn("tmem: %s-specific flush not supported yet, use --all\n",
+           tmem_client_str);
         return -1;
     }
     /* convert kb to pages, rounding up if necessary */
@@ -2078,7 +2072,7 @@ static int tmemc_list_client(client_t *c, tmem_cli_va_param_t buf,
              c->eph_count, c->eph_count_max,
              c->compressed_pages, c->compressed_sum_size,
              c->compress_poor, c->compress_nomem);
-    tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
     sum += n;
     for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ )
     {
@@ -2106,7 +2100,7 @@ static int tmemc_list_client(client_t *c, tmem_cli_va_param_t buf,
              p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
         if ( sum + n >= len )
             return sum;
-        tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+        tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
         sum += n;
     }
     return sum;
@@ -2145,7 +2139,7 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
              p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
         if ( sum + n >= len )
             return sum;
-        tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+        tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
         sum += n;
     }
     return sum;
@@ -2172,7 +2166,7 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
     n += scnprintf(info+n,BSIZE-n,"\n");
     if ( sum + n >= len )
         return sum;
-    tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
     sum += n;
     return sum;
 }
@@ -2190,7 +2184,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
       "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu,"
       "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c",
       total_tmem_ops, errored_tmem_ops, failed_copies,
-      alloc_failed, alloc_page_failed, tmh_avail_pages(),
+      alloc_failed, alloc_page_failed, tmem_page_list_pages,
       low_on_memory, evicted_pgs,
       evict_attempts, relinq_pgs, relinq_attempts, max_evicts_per_relinq,
       total_flush_pool, use_long ? ',' : '\n');
@@ -2207,7 +2201,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
          tot_good_eph_puts,deduped_puts,pcd_tot_tze_size,pcd_tot_csize);
     if ( sum + n >= len )
         return sum;
-    tmh_copy_to_client_buf_offset(buf,off+sum,info,n+1);
+    tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1);
     sum += n;
     return sum;
 }
@@ -2218,14 +2212,14 @@ static int tmemc_list(cli_id_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
     client_t *client;
     int off = 0;
 
-    if ( cli_id == CLI_ID_NULL ) {
+    if ( cli_id == TMEM_CLI_ID_NULL ) {
         off = tmemc_list_global(buf,0,len,use_long);
         off += tmemc_list_shared(buf,off,len-off,use_long);
         list_for_each_entry(client,&global_client_list,client_list)
             off += tmemc_list_client(client, buf, off, len-off, use_long);
         off += tmemc_list_global_perf(buf,off,len-off,use_long);
     }
-    else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
+    else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
         return -1;
     else
         off = tmemc_list_client(client, buf, 0, len, use_long);
@@ -2243,30 +2237,30 @@ static int tmemc_set_var_one(client_t *client, uint32_t subop, uint32_t arg1)
     case TMEMC_SET_WEIGHT:
         old_weight = client->weight;
         client->weight = arg1;
-        tmh_client_info("tmem: weight set to %d for %s=%d\n",
-                        arg1, cli_id_str, cli_id);
+        tmem_client_info("tmem: weight set to %d for %s=%d\n",
+                        arg1, tmem_cli_id_str, cli_id);
         atomic_sub(old_weight,&client_weight_total);
         atomic_add(client->weight,&client_weight_total);
         break;
     case TMEMC_SET_CAP:
         client->cap = arg1;
-        tmh_client_info("tmem: cap set to %d for %s=%d\n",
-                        arg1, cli_id_str, cli_id);
+        tmem_client_info("tmem: cap set to %d for %s=%d\n",
+                        arg1, tmem_cli_id_str, cli_id);
         break;
     case TMEMC_SET_COMPRESS:
-        if ( tmh_dedup_enabled() )
+        if ( tmem_dedup_enabled() )
         {
-            tmh_client_warn("tmem: compression %s for all %ss, cannot be changed when tmem_dedup is enabled\n",
-                            tmh_compression_enabled() ? "enabled" : "disabled",
-                            client_str);
+            tmem_client_warn("tmem: compression %s for all %ss, cannot be changed when tmem_dedup is enabled\n",
+                            tmem_compression_enabled() ? "enabled" : "disabled",
+                            tmem_client_str);
             return -1;
         }
         client->compress = arg1 ? 1 : 0;
-        tmh_client_info("tmem: compression %s for %s=%d\n",
-            arg1 ? "enabled" : "disabled",cli_id_str,cli_id);
+        tmem_client_info("tmem: compression %s for %s=%d\n",
+            arg1 ? "enabled" : "disabled",tmem_cli_id_str,cli_id);
         break;
     default:
-        tmh_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
+        tmem_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
         return -1;
     }
     return 0;
@@ -2276,10 +2270,10 @@ static int tmemc_set_var(cli_id_t cli_id, uint32_t subop, uint32_t arg1)
 {
     client_t *client;
 
-    if ( cli_id == CLI_ID_NULL )
+    if ( cli_id == TMEM_CLI_ID_NULL )
         list_for_each_entry(client,&global_client_list,client_list)
             tmemc_set_var_one(client, subop, arg1);
-    else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
+    else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
         return -1;
     else
         tmemc_set_var_one(client, subop, arg1);
@@ -2292,12 +2286,12 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
     client_t *client;
     int i, free = -1;
 
-    if ( cli_id == CLI_ID_NULL )
+    if ( cli_id == TMEM_CLI_ID_NULL )
     {
         global_shared_auth = auth;
         return 1;
     }
-    client = tmh_client_from_cli_id(cli_id);
+    client = tmem_client_from_cli_id(cli_id);
     if ( client == NULL )
         return -EINVAL;
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
@@ -2326,7 +2320,7 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
 static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
                         uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     uint32_t p;
@@ -2394,7 +2388,7 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
     case TMEMC_SAVE_GET_POOL_UUID:
          if ( pool == NULL )
              break;
-        tmh_copy_to_client_buf(buf, pool->uuid, 2);
+        tmem_copy_to_client_buf(buf, pool->uuid, 2);
         rc = 0;
         break;
     case TMEMC_SAVE_END:
@@ -2415,7 +2409,7 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
 static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
                         tmem_cli_va_param_t buf, uint32_t bufsize)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     pgp_t *pgp;
@@ -2458,8 +2452,8 @@ static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
     BUILD_BUG_ON(sizeof(h.oid) != sizeof(oid));
     memcpy(h.oid, oid.oid, sizeof(h.oid));
     h.index = pgp->index;
-    tmh_copy_to_client_buf(buf, &h, 1);
-    tmh_client_buf_add(buf, sizeof(h));
+    tmem_copy_to_client_buf(buf, &h, 1);
+    tmem_client_buf_add(buf, sizeof(h));
     ret = do_tmem_get(pool, &oid, pgp->index, 0, 0, 0, pagesize, buf);
 
 out:
@@ -2470,7 +2464,7 @@ out:
 static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
                         uint32_t bufsize)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pgp_t *pgp;
     struct tmem_handle h;
     int ret = 0;
@@ -2502,7 +2496,7 @@ static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
     BUILD_BUG_ON(sizeof(h.oid) != sizeof(pgp->inv_oid));
     memcpy(h.oid, pgp->inv_oid.oid, sizeof(h.oid));
     h.index = pgp->index;
-    tmh_copy_to_client_buf(buf, &h, 1);
+    tmem_copy_to_client_buf(buf, &h, 1);
     ret = 1;
 out:
     tmem_spin_unlock(&pers_lists_spinlock);
@@ -2512,7 +2506,7 @@ out:
 static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, OID *oidp,
                       uint32_t index, tmem_cli_va_param_t buf, uint32_t bufsize)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
 
@@ -2524,7 +2518,7 @@ static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, OID *oidp,
 static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, OID *oidp,
                         uint32_t index)
 {
-    client_t *client = tmh_client_from_cli_id(cli_id);
+    client_t *client = tmem_client_from_cli_id(cli_id);
     pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
 
@@ -2540,7 +2534,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
     uint32_t subop = op->u.ctrl.subop;
     OID *oidp = (OID *)(&op->u.ctrl.oid[0]);
 
-    if (!tmh_current_is_privileged())
+    if (!tmem_current_is_privileged())
         return -EPERM;
 
     switch(subop)
@@ -2564,7 +2558,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
         ret = tmemc_set_var(op->u.ctrl.cli_id,subop,op->u.ctrl.arg1);
         break;
     case TMEMC_QUERY_FREEABLE_MB:
-        ret = tmh_freeable_pages() >> (20 - PAGE_SHIFT);
+        ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT);
         break;
     case TMEMC_SAVE_BEGIN:
     case TMEMC_RESTORE_BEGIN:
@@ -2612,7 +2606,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
 EXPORT long do_tmem_op(tmem_cli_op_t uops)
 {
     struct tmem_op op;
-    client_t *client = tmh_client_from_current();
+    client_t *client = tmem_client_from_current();
     pool_t *pool = NULL;
     OID *oidp;
     int rc = 0;
@@ -2630,14 +2624,14 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     if ( !tmem_initialized )
         return -ENODEV;
 
-    if ( !tmh_current_permitted() )
+    if ( !tmem_current_permitted() )
         return -EPERM;
 
     total_tmem_ops++;
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
     {
-        if ( tmh_lock_all > 1 )
+        if ( tmem_lock_all > 1 )
             spin_lock_irq(&tmem_spinlock);
         else
             spin_lock(&tmem_spinlock);
@@ -2650,21 +2644,21 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     DUP_START_CYC_COUNTER(flush,succ_get);
     DUP_START_CYC_COUNTER(flush_obj,succ_get);
 
-    if ( client != NULL && tmh_client_is_dying(client) )
+    if ( client != NULL && tmem_client_is_dying(client) )
     {
         rc = -ENODEV;
-        if ( tmh_lock_all )
+        if ( tmem_lock_all )
             goto out;
  simple_error:
         errored_tmem_ops++;
         return rc;
     }
 
-    if ( unlikely(tmh_get_tmemop_from_client(&op, uops) != 0) )
+    if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) )
     {
-        tmh_client_err("tmem: can't get tmem struct from %s\n", client_str);
+        tmem_client_err("tmem: can't get tmem struct from %s\n", tmem_client_str);
         rc = -EFAULT;
-        if ( !tmh_lock_all )
+        if ( !tmem_lock_all )
             goto simple_error;
         goto out;
     }
@@ -2694,10 +2688,10 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     {
         tmem_write_lock(&tmem_rwlock);
         tmem_write_lock_set = 1;
-        if ( (client = client_create(tmh_get_cli_id_from_current())) == NULL )
+        if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL )
         {
-            tmh_client_err("tmem: can't create tmem structure for %s\n",
-                           client_str);
+            tmem_client_err("tmem: can't create tmem structure for %s\n",
+                           tmem_client_str);
             rc = -ENOMEM;
             goto out;
         }
@@ -2721,7 +2715,7 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
         if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) ||
              ((pool = client->pools[op.pool_id]) == NULL) )
         {
-            tmh_client_err("tmem: operation requested on uncreated pool\n");
+            tmem_client_err("tmem: operation requested on uncreated pool\n");
             rc = -ENODEV;
             goto out;
         }
@@ -2732,24 +2726,24 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     switch ( op.cmd )
     {
     case TMEM_NEW_POOL:
-        rc = do_tmem_new_pool(CLI_ID_NULL, 0, op.u.creat.flags,
+        rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags,
                               op.u.creat.uuid[0], op.u.creat.uuid[1]);
         break;
     case TMEM_NEW_PAGE:
         tmem_ensure_avail_pages();
         rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, 0,
-                         tmh_cli_buf_null);
+                         tmem_cli_buf_null);
         break;
     case TMEM_PUT_PAGE:
         tmem_ensure_avail_pages();
         rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0,
-                         PAGE_SIZE, tmh_cli_buf_null);
+                         PAGE_SIZE, tmem_cli_buf_null);
         if (rc == 1) succ_put = 1;
         else non_succ_put = 1;
         break;
     case TMEM_GET_PAGE:
         rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
-                         0, 0, PAGE_SIZE, tmh_cli_buf_null);
+                         0, 0, PAGE_SIZE, tmem_cli_buf_null);
         if (rc == 1) succ_get = 1;
         else non_succ_get = 1;
         break;
@@ -2768,21 +2762,21 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
     case TMEM_READ:
         rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
                          op.u.gen.tmem_offset, op.u.gen.pfn_offset,
-                         op.u.gen.len, tmh_cli_buf_null);
+                         op.u.gen.len, tmem_cli_buf_null);
         break;
     case TMEM_WRITE:
         rc = do_tmem_put(pool, oidp,
                          op.u.gen.index, op.u.gen.cmfn,
                          op.u.gen.tmem_offset, op.u.gen.pfn_offset,
-                         op.u.gen.len, tmh_cli_buf_null);
+                         op.u.gen.len, tmem_cli_buf_null);
         break;
     case TMEM_XCHG:
         /* need to hold global lock to ensure xchg is atomic */
-        tmh_client_warn("tmem_xchg op not implemented yet\n");
+        tmem_client_warn("tmem_xchg op not implemented yet\n");
         rc = 0;
         break;
     default:
-        tmh_client_warn("tmem: op %d not implemented\n", op.cmd);
+        tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
         rc = 0;
         break;
     }
@@ -2803,9 +2797,9 @@ out:
     else if ( flush_obj )
         END_CYC_COUNTER_CLI(flush_obj,client);
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
     {
-        if ( tmh_lock_all > 1 )
+        if ( tmem_lock_all > 1 )
             spin_unlock_irq(&tmem_spinlock);
         else
             spin_unlock(&tmem_spinlock);
@@ -2829,22 +2823,22 @@ EXPORT void tmem_destroy(void *v)
     if ( client == NULL )
         return;
 
-    if ( !tmh_client_is_dying(client) )
+    if ( !tmem_client_is_dying(client) )
     {
         printk("tmem: tmem_destroy can only destroy dying client\n");
         return;
     }
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_lock(&tmem_spinlock);
     else
         write_lock(&tmem_rwlock);
 
     printk("tmem: flushing tmem pools for %s=%d\n",
-           cli_id_str, client->cli_id);
+           tmem_cli_id_str, client->cli_id);
     client_flush(client, 1);
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_unlock(&tmem_spinlock);
     else
         write_unlock(&tmem_rwlock);
@@ -2855,15 +2849,15 @@ EXPORT void tmem_freeze_all(unsigned char key)
 {
     static int freeze = 0;
  
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_lock(&tmem_spinlock);
     else
         write_lock(&tmem_rwlock);
 
     freeze = !freeze;
-    tmemc_freeze_pools(CLI_ID_NULL,freeze);
+    tmemc_freeze_pools(TMEM_CLI_ID_NULL,freeze);
 
-    if ( tmh_lock_all )
+    if ( tmem_lock_all )
         spin_unlock(&tmem_spinlock);
     else
         write_unlock(&tmem_rwlock);
@@ -2877,7 +2871,7 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
     unsigned long evicts_per_relinq = 0;
     int max_evictions = 10;
 
-    if (!tmh_enabled() || !tmh_freeable_pages())
+    if (!tmem_enabled() || !tmem_freeable_pages())
         return NULL;
 
     relinq_attempts++;
@@ -2889,15 +2883,15 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
         return NULL;
     }
 
-    if ( tmh_called_from_tmem(memflags) )
+    if ( tmem_called_from_tmem(memflags) )
     {
-        if ( tmh_lock_all )
+        if ( tmem_lock_all )
             spin_lock(&tmem_spinlock);
         else
             read_lock(&tmem_rwlock);
     }
 
-    while ( (pfp = tmh_alloc_page(NULL,1)) == NULL )
+    while ( (pfp = tmem_alloc_page(NULL,1)) == NULL )
     {
         if ( (max_evictions-- <= 0) || !tmem_evict())
             break;
@@ -2905,13 +2899,13 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
     }
     if ( evicts_per_relinq > max_evicts_per_relinq )
         max_evicts_per_relinq = evicts_per_relinq;
-    tmh_scrub_page(pfp, memflags);
+    tmem_scrub_page(pfp, memflags);
     if ( pfp != NULL )
         relinq_pgs++;
 
-    if ( tmh_called_from_tmem(memflags) )
+    if ( tmem_called_from_tmem(memflags) )
     {
-        if ( tmh_lock_all )
+        if ( tmem_lock_all )
             spin_unlock(&tmem_spinlock);
         else
             read_unlock(&tmem_rwlock);
@@ -2920,33 +2914,33 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
     return pfp;
 }
 
-EXPORT unsigned long tmem_freeable_pages(void)
+unsigned long tmem_freeable_pages(void)
 {
-    return tmh_freeable_pages();
+    return tmem_page_list_pages + _atomic_read(freeable_page_count);
 }
 
 /* called at hypervisor startup */
 static int __init init_tmem(void)
 {
     int i;
-    if ( !tmh_enabled() )
+    if ( !tmem_enabled() )
         return 0;
 
-    if ( tmh_dedup_enabled() )
+    if ( tmem_dedup_enabled() )
         for (i = 0; i < 256; i++ )
         {
             pcd_tree_roots[i] = RB_ROOT;
             rwlock_init(&pcd_tree_rwlocks[i]);
         }
 
-    if ( tmh_init() )
+    if ( tmem_init() )
     {
         printk("tmem: initialized comp=%d dedup=%d tze=%d global-lock=%d\n",
-            tmh_compression_enabled(), tmh_dedup_enabled(), tmh_tze_enabled(),
-            tmh_lock_all);
-        if ( tmh_dedup_enabled()&&tmh_compression_enabled()&&tmh_tze_enabled() )
+            tmem_compression_enabled(), tmem_dedup_enabled(), tmem_tze_enabled(),
+            tmem_lock_all);
+        if ( tmem_dedup_enabled()&&tmem_compression_enabled()&&tmem_tze_enabled() )
         {
-            tmh_tze_disable();
+            tmem_tze_disable();
             printk("tmem: tze and compression not compatible, disabling tze\n");
         }
         tmem_initialized = 1;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index e1e83d2..bb2b601 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -46,7 +46,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
 static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
 
-void tmh_copy_page(char *to, char*from)
+void tmem_copy_page(char *to, char*from)
 {
     DECL_LOCAL_CYC_COUNTER(pg_copy);
     START_CYC_COUNTER(pg_copy);
@@ -109,7 +109,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
 }
 #endif
 
-EXPORT int tmh_copy_from_client(struct page_info *pfp,
+EXPORT int tmem_copy_from_client(struct page_info *pfp,
     xen_pfn_t cmfn, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
@@ -140,7 +140,7 @@ EXPORT int tmh_copy_from_client(struct page_info *pfp,
     }
     smp_mb();
     if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va )
-        tmh_copy_page(tmem_va, cli_va);
+        tmem_copy_page(tmem_va, cli_va);
     else if ( (tmem_offset+len <= PAGE_SIZE) &&
               (pfn_offset+len <= PAGE_SIZE) )
     {
@@ -158,7 +158,7 @@ EXPORT int tmh_copy_from_client(struct page_info *pfp,
     return rc;
 }
 
-EXPORT int tmh_compress_from_client(xen_pfn_t cmfn,
+EXPORT int tmem_compress_from_client(xen_pfn_t cmfn,
     void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf)
 {
     int ret = 0;
@@ -190,7 +190,7 @@ EXPORT int tmh_compress_from_client(xen_pfn_t cmfn,
     return 1;
 }
 
-EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
+EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
     pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len,
     tmem_cli_va_param_t clibuf)
 {
@@ -211,7 +211,7 @@ EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
     tmem_mfn = page_to_mfn(pfp);
     tmem_va = map_domain_page(tmem_mfn);
     if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va )
-        tmh_copy_page(cli_va, tmem_va);
+        tmem_copy_page(cli_va, tmem_va);
     else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) )
     {
         if ( cli_va )
@@ -229,7 +229,7 @@ EXPORT int tmh_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
     return rc;
 }
 
-EXPORT int tmh_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
+EXPORT int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
                                     size_t size, tmem_cli_va_param_t clibuf)
 {
     unsigned long cli_mfn = 0;
@@ -258,7 +258,7 @@ EXPORT int tmh_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
     return 1;
 }
 
-EXPORT int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
+EXPORT int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
                                     pagesize_t len)
 {
     void *cli_va;
@@ -282,30 +282,30 @@ EXPORT int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
 
 /******************  XEN-SPECIFIC MEMORY ALLOCATION ********************/
 
-EXPORT struct xmem_pool *tmh_mempool = 0;
-EXPORT unsigned int tmh_mempool_maxalloc = 0;
+EXPORT struct xmem_pool *tmem_mempool = 0;
+EXPORT unsigned int tmem_mempool_maxalloc = 0;
 
-EXPORT DEFINE_SPINLOCK(tmh_page_list_lock);
-EXPORT PAGE_LIST_HEAD(tmh_page_list);
-EXPORT unsigned long tmh_page_list_pages = 0;
+EXPORT DEFINE_SPINLOCK(tmem_page_list_lock);
+EXPORT PAGE_LIST_HEAD(tmem_page_list);
+EXPORT unsigned long tmem_page_list_pages = 0;
 
-/* free anything on tmh_page_list to Xen's scrub list */
-EXPORT void tmh_release_avail_pages_to_host(void)
+/* free anything on tmem_page_list to Xen's scrub list */
+EXPORT void tmem_release_avail_pages_to_host(void)
 {
-    spin_lock(&tmh_page_list_lock);
-    while ( !page_list_empty(&tmh_page_list) )
+    spin_lock(&tmem_page_list_lock);
+    while ( !page_list_empty(&tmem_page_list) )
     {
-        struct page_info *pg = page_list_remove_head(&tmh_page_list);
+        struct page_info *pg = page_list_remove_head(&tmem_page_list);
         scrub_one_page(pg);
-        tmh_page_list_pages--;
+        tmem_page_list_pages--;
         free_domheap_page(pg);
     }
-    ASSERT(tmh_page_list_pages == 0);
-    INIT_PAGE_LIST_HEAD(&tmh_page_list);
-    spin_unlock(&tmh_page_list_lock);
+    ASSERT(tmem_page_list_pages == 0);
+    INIT_PAGE_LIST_HEAD(&tmem_page_list);
+    spin_unlock(&tmem_page_list_lock);
 }
 
-EXPORT void tmh_scrub_page(struct page_info *pi, unsigned int memflags)
+EXPORT void tmem_scrub_page(struct page_info *pi, unsigned int memflags)
 {
     if ( pi == NULL )
         return;
@@ -313,84 +313,84 @@ EXPORT void tmh_scrub_page(struct page_info *pi, unsigned int memflags)
         scrub_one_page(pi);
 }
 
-static noinline void *tmh_mempool_page_get(unsigned long size)
+static noinline void *tmem_mempool_page_get(unsigned long size)
 {
     struct page_info *pi;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = tmh_alloc_page(NULL,0)) == NULL )
+    if ( (pi = tmem_alloc_page(NULL,0)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
 }
 
-static void tmh_mempool_page_put(void *page_va)
+static void tmem_mempool_page_put(void *page_va)
 {
     ASSERT(IS_PAGE_ALIGNED(page_va));
-    tmh_free_page(virt_to_page(page_va));
+    tmem_free_page(virt_to_page(page_va));
 }
 
-static int __init tmh_mempool_init(void)
+static int __init tmem_mempool_init(void)
 {
-    tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
-        tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
-    if ( tmh_mempool )
-        tmh_mempool_maxalloc = xmem_pool_maxalloc(tmh_mempool);
-    return tmh_mempool != NULL;
+    tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get,
+        tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+    if ( tmem_mempool )
+        tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool);
+    return tmem_mempool != NULL;
 }
 
 /* persistent pools are per-domain */
 
-static void *tmh_persistent_pool_page_get(unsigned long size)
+static void *tmem_persistent_pool_page_get(unsigned long size)
 {
     struct page_info *pi;
     struct domain *d = current->domain;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
+    if ( (pi = _tmem_alloc_page_thispool(d)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
 }
 
-static void tmh_persistent_pool_page_put(void *page_va)
+static void tmem_persistent_pool_page_put(void *page_va)
 {
     struct page_info *pi;
 
     ASSERT(IS_PAGE_ALIGNED(page_va));
     pi = mfn_to_page(virt_to_mfn(page_va));
     ASSERT(IS_VALID_PAGE(pi));
-    _tmh_free_page_thispool(pi);
+    _tmem_free_page_thispool(pi);
 }
 
 /******************  XEN-SPECIFIC CLIENT HANDLING ********************/
 
-EXPORT tmh_client_t *tmh_client_init(cli_id_t cli_id)
+EXPORT tmem_client_t *tmem_client_init(cli_id_t cli_id)
 {
-    tmh_client_t *tmh;
+    tmem_client_t *tmem;
     char name[5];
     int i, shift;
 
-    if ( (tmh = xmalloc(tmh_client_t)) == NULL )
+    if ( (tmem = xmalloc(tmem_client_t)) == NULL )
         return NULL;
     for (i = 0, shift = 12; i < 4; shift -=4, i++)
         name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
     name[4] = '\0';
-    tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get,
-        tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
-    if ( tmh->persistent_pool == NULL )
+    tmem->persistent_pool = xmem_pool_create(name, tmem_persistent_pool_page_get,
+        tmem_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+    if ( tmem->persistent_pool == NULL )
     {
-        xfree(tmh);
+        xfree(tmem);
         return NULL;
     }
-    return tmh;
+    return tmem;
 }
 
-EXPORT void tmh_client_destroy(tmh_client_t *tmh)
+EXPORT void tmem_client_destroy(tmem_client_t *tmem)
 {
-    ASSERT(tmh->domain->is_dying);
-    xmem_pool_destroy(tmh->persistent_pool);
-    tmh->domain = NULL;
+    ASSERT(tmem->domain->is_dying);
+    xmem_pool_destroy(tmem->persistent_pool);
+    tmem->domain = NULL;
 }
 
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
@@ -443,11 +443,11 @@ static struct notifier_block cpu_nfb = {
     .notifier_call = cpu_callback
 };
 
-EXPORT int __init tmh_init(void)
+EXPORT int __init tmem_init(void)
 {
     unsigned int cpu;
 
-    if ( !tmh_mempool_init() )
+    if ( !tmem_mempool_init() )
         return 0;
 
     dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index b24246c..dc37861 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -26,7 +26,7 @@ struct tmem_host_dependent_client {
     struct domain *domain;
     struct xmem_pool *persistent_pool;
 };
-typedef struct tmem_host_dependent_client tmh_client_t;
+typedef struct tmem_host_dependent_client tmem_client_t;
 
 typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
 
@@ -34,55 +34,55 @@ typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
   ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
 #define IS_VALID_PAGE(_pi)  ( mfn_valid(page_to_mfn(_pi)) )
 
-extern struct xmem_pool *tmh_mempool;
-extern unsigned int tmh_mempool_maxalloc;
-extern struct page_list_head tmh_page_list;
-extern spinlock_t tmh_page_list_lock;
-extern unsigned long tmh_page_list_pages;
+extern struct xmem_pool *tmem_mempool;
+extern unsigned int tmem_mempool_maxalloc;
+extern struct page_list_head tmem_page_list;
+extern spinlock_t tmem_page_list_lock;
+extern unsigned long tmem_page_list_pages;
 extern atomic_t freeable_page_count;
 
 extern spinlock_t tmem_lock;
 extern spinlock_t tmem_spinlock;
 extern rwlock_t tmem_rwlock;
 
-extern void tmh_copy_page(char *to, char*from);
-extern int tmh_init(void);
-#define tmh_hash hash_long
+extern void tmem_copy_page(char *to, char*from);
+extern int tmem_init(void);
+#define tmem_hash hash_long
 
-extern void tmh_release_avail_pages_to_host(void);
-extern void tmh_scrub_page(struct page_info *pi, unsigned int memflags);
+extern void tmem_release_avail_pages_to_host(void);
+extern void tmem_scrub_page(struct page_info *pi, unsigned int memflags);
 
 extern bool_t opt_tmem_compress;
-static inline bool_t tmh_compression_enabled(void)
+static inline bool_t tmem_compression_enabled(void)
 {
     return opt_tmem_compress;
 }
 
 extern bool_t opt_tmem_dedup;
-static inline bool_t tmh_dedup_enabled(void)
+static inline bool_t tmem_dedup_enabled(void)
 {
     return opt_tmem_dedup;
 }
 
 extern bool_t opt_tmem_tze;
-static inline bool_t tmh_tze_enabled(void)
+static inline bool_t tmem_tze_enabled(void)
 {
     return opt_tmem_tze;
 }
 
-static inline void tmh_tze_disable(void)
+static inline void tmem_tze_disable(void)
 {
     opt_tmem_tze = 0;
 }
 
 extern bool_t opt_tmem_shared_auth;
-static inline bool_t tmh_shared_auth(void)
+static inline bool_t tmem_shared_auth(void)
 {
     return opt_tmem_shared_auth;
 }
 
 extern bool_t opt_tmem;
-static inline bool_t tmh_enabled(void)
+static inline bool_t tmem_enabled(void)
 {
     return opt_tmem;
 }
@@ -93,30 +93,25 @@ extern int opt_tmem_lock;
  * Memory free page list management
  */
 
-static inline struct page_info *tmh_page_list_get(void)
+static inline struct page_info *tmem_page_list_get(void)
 {
     struct page_info *pi;
 
-    spin_lock(&tmh_page_list_lock);
-    if ( (pi = page_list_remove_head(&tmh_page_list)) != NULL )
-        tmh_page_list_pages--;
-    spin_unlock(&tmh_page_list_lock);
+    spin_lock(&tmem_page_list_lock);
+    if ( (pi = page_list_remove_head(&tmem_page_list)) != NULL )
+        tmem_page_list_pages--;
+    spin_unlock(&tmem_page_list_lock);
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
 
-static inline void tmh_page_list_put(struct page_info *pi)
+static inline void tmem_page_list_put(struct page_info *pi)
 {
     ASSERT(IS_VALID_PAGE(pi));
-    spin_lock(&tmh_page_list_lock);
-    page_list_add(pi, &tmh_page_list);
-    tmh_page_list_pages++;
-    spin_unlock(&tmh_page_list_lock);
-}
-
-static inline unsigned long tmh_avail_pages(void)
-{
-    return tmh_page_list_pages;
+    spin_lock(&tmem_page_list_lock);
+    page_list_add(pi, &tmem_page_list);
+    tmem_page_list_pages++;
+    spin_unlock(&tmem_page_list_lock);
 }
 
 /*
@@ -127,36 +122,36 @@ static inline bool_t domain_fully_allocated(struct domain *d)
 {
     return ( d->tot_pages >= d->max_pages );
 }
-#define tmh_client_memory_fully_allocated(_pool) \
- domain_fully_allocated(_pool->client->tmh->domain)
+#define tmem_client_memory_fully_allocated(_pool) \
+ domain_fully_allocated(_pool->client->tmem->domain)
 
-static inline void *_tmh_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                  size_t size, size_t align)
 {
 #if 0
     if ( d->tot_pages >= d->max_pages )
         return NULL;
 #endif
-    ASSERT( size < tmh_mempool_maxalloc );
+    ASSERT( size < tmem_mempool_maxalloc );
     if ( cmem_mempool == NULL )
         return NULL;
     return xmem_pool_alloc(size, cmem_mempool);
 }
-#define tmh_alloc_subpage_thispool(_pool, _s, _a) \
-            _tmh_alloc_subpage_thispool(pool->client->tmh->persistent_pool, \
+#define tmem_alloc_subpage_thispool(_pool, _s, _a) \
+            _tmem_alloc_subpage_thispool(pool->client->tmem->persistent_pool, \
                                          _s, _a)
 
-static inline void _tmh_free_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void _tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                void *ptr, size_t size)
 {
-    ASSERT( size < tmh_mempool_maxalloc );
+    ASSERT( size < tmem_mempool_maxalloc );
     ASSERT( cmem_mempool != NULL );
     xmem_pool_free(ptr,cmem_mempool);
 }
-#define tmh_free_subpage_thispool(_pool, _p, _s) \
- _tmh_free_subpage_thispool(_pool->client->tmh->persistent_pool, _p, _s)
+#define tmem_free_subpage_thispool(_pool, _p, _s) \
+ _tmem_free_subpage_thispool(_pool->client->tmem->persistent_pool, _p, _s)
 
-static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d)
+static inline struct page_info *_tmem_alloc_page_thispool(struct domain *d)
 {
     struct page_info *pi;
 
@@ -166,14 +161,14 @@ static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d)
     if ( d->tot_pages >= d->max_pages )
         return NULL;
 
-    if ( tmh_page_list_pages )
+    if ( tmem_page_list_pages )
     {
-        if ( (pi = tmh_page_list_get()) != NULL )
+        if ( (pi = tmem_page_list_get()) != NULL )
         {
             if ( donate_page(d,pi,0) == 0 )
                 goto out;
             else
-                tmh_page_list_put(pi);
+                tmem_page_list_put(pi);
         }
     }
 
@@ -183,16 +178,16 @@ out:
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
-#define tmh_alloc_page_thispool(_pool) \
-    _tmh_alloc_page_thispool(_pool->client->tmh->domain)
+#define tmem_alloc_page_thispool(_pool) \
+    _tmem_alloc_page_thispool(_pool->client->tmem->domain)
 
-static inline void _tmh_free_page_thispool(struct page_info *pi)
+static inline void _tmem_free_page_thispool(struct page_info *pi)
 {
     struct domain *d = page_get_owner(pi);
 
     ASSERT(IS_VALID_PAGE(pi));
     if ( (d == NULL) || steal_page(d,pi,0) == 0 )
-        tmh_page_list_put(pi);
+        tmem_page_list_put(pi);
     else
     {
         scrub_one_page(pi);
@@ -200,30 +195,30 @@ static inline void _tmh_free_page_thispool(struct page_info *pi)
         free_domheap_pages(pi,0);
     }
 }
-#define tmh_free_page_thispool(_pool,_pg) \
-    _tmh_free_page_thispool(_pg)
+#define tmem_free_page_thispool(_pool,_pg) \
+    _tmem_free_page_thispool(_pg)
 
 /*
  * Memory allocation for ephemeral (non-persistent) data
  */
 
-static inline void *tmh_alloc_subpage(void *pool, size_t size,
+static inline void *tmem_alloc_subpage(void *pool, size_t size,
                                                  size_t align)
 {
-    ASSERT( size < tmh_mempool_maxalloc );
-    ASSERT( tmh_mempool != NULL );
-    return xmem_pool_alloc(size, tmh_mempool);
+    ASSERT( size < tmem_mempool_maxalloc );
+    ASSERT( tmem_mempool != NULL );
+    return xmem_pool_alloc(size, tmem_mempool);
 }
 
-static inline void tmh_free_subpage(void *ptr, size_t size)
+static inline void tmem_free_subpage(void *ptr, size_t size)
 {
-    ASSERT( size < tmh_mempool_maxalloc );
-    xmem_pool_free(ptr,tmh_mempool);
+    ASSERT( size < tmem_mempool_maxalloc );
+    xmem_pool_free(ptr,tmem_mempool);
 }
 
-static inline struct page_info *tmh_alloc_page(void *pool, int no_heap)
+static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
 {
-    struct page_info *pi = tmh_page_list_get();
+    struct page_info *pi = tmem_page_list_get();
 
     if ( pi == NULL && !no_heap )
         pi = alloc_domheap_pages(0,0,MEMF_tmem);
@@ -233,55 +228,50 @@ static inline struct page_info *tmh_alloc_page(void *pool, int no_heap)
     return pi;
 }
 
-static inline void tmh_free_page(struct page_info *pi)
+static inline void tmem_free_page(struct page_info *pi)
 {
     ASSERT(IS_VALID_PAGE(pi));
-    tmh_page_list_put(pi);
+    tmem_page_list_put(pi);
     atomic_dec(&freeable_page_count);
 }
 
 static inline unsigned int tmem_subpage_maxsize(void)
 {
-    return tmh_mempool_maxalloc;
-}
-
-static inline unsigned long tmh_freeable_pages(void)
-{
-    return tmh_avail_pages() + _atomic_read(freeable_page_count);
+    return tmem_mempool_maxalloc;
 }
 
-static inline unsigned long tmh_free_mb(void)
+static inline unsigned long tmem_free_mb(void)
 {
-    return (tmh_avail_pages() + total_free_pages()) >> (20 - PAGE_SHIFT);
+    return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
 }
 
 /*
  * Memory allocation for "infrastructure" data
  */
 
-static inline void *tmh_alloc_infra(size_t size, size_t align)
+static inline void *tmem_alloc_infra(size_t size, size_t align)
 {
     return _xmalloc(size,align);
 }
 
-static inline void tmh_free_infra(void *p)
+static inline void tmem_free_infra(void *p)
 {
     return xfree(p);
 }
 
-#define tmh_lock_all  opt_tmem_lock
-#define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
+#define tmem_lock_all  opt_tmem_lock
+#define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
 
 /*  "Client" (==domain) abstraction */
 
 struct client;
 typedef domid_t cli_id_t;
-typedef struct domain tmh_cli_ptr_t;
+typedef struct domain tmem_cli_ptr_t;
 
-extern tmh_client_t *tmh_client_init(cli_id_t);
-extern void tmh_client_destroy(tmh_client_t *);
+extern tmem_client_t *tmem_client_init(cli_id_t);
+extern void tmem_client_destroy(tmem_client_t *);
 
-static inline struct client *tmh_client_from_cli_id(cli_id_t cli_id)
+static inline struct client *tmem_client_from_cli_id(cli_id_t cli_id)
 {
     struct client *c;
     struct domain *d = rcu_lock_domain_by_id(cli_id);
@@ -292,25 +282,25 @@ static inline struct client *tmh_client_from_cli_id(cli_id_t cli_id)
     return c;
 }
 
-static inline struct client *tmh_client_from_current(void)
+static inline struct client *tmem_client_from_current(void)
 {
     return (struct client *)(current->domain->tmem);
 }
 
-#define tmh_client_is_dying(_client) (!!_client->tmh->domain->is_dying)
+#define tmem_client_is_dying(_client) (!!_client->tmem->domain->is_dying)
 
-static inline cli_id_t tmh_get_cli_id_from_current(void)
+static inline cli_id_t tmem_get_cli_id_from_current(void)
 {
     return current->domain->domain_id;
 }
 
-static inline tmh_cli_ptr_t *tmh_get_cli_ptr_from_current(void)
+static inline tmem_cli_ptr_t *tmem_get_cli_ptr_from_current(void)
 {
     return current->domain;
 }
 
-static inline bool_t tmh_set_client_from_id(
-    struct client *client, tmh_client_t *tmh, cli_id_t cli_id)
+static inline bool_t tmem_set_client_from_id(
+    struct client *client, tmem_client_t *tmem, cli_id_t cli_id)
 {
     struct domain *d = rcu_lock_domain_by_id(cli_id);
     bool_t rc = 0;
@@ -319,31 +309,31 @@ static inline bool_t tmh_set_client_from_id(
     if ( !d->is_dying )
     {
         d->tmem = client;
-        tmh->domain = d;
+        tmem->domain = d;
         rc = 1;
     }
     rcu_unlock_domain(d);
     return rc;
 }
 
-static inline bool_t tmh_current_permitted(void)
+static inline bool_t tmem_current_permitted(void)
 {
     return !xsm_tmem_op(XSM_HOOK);
 }
 
-static inline bool_t tmh_current_is_privileged(void)
+static inline bool_t tmem_current_is_privileged(void)
 {
     return !xsm_tmem_control(XSM_PRIV);
 }
 
-static inline uint8_t tmh_get_first_byte(struct page_info *pfp)
+static inline uint8_t tmem_get_first_byte(struct page_info *pfp)
 {
     void *p = __map_domain_page(pfp);
 
     return (uint8_t)(*(char *)p);
 }
 
-static inline int tmh_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
+static inline int tmem_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
 {
     const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
     const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp2);
@@ -360,7 +350,7 @@ ASSERT(p2 != NULL);
     return 1;
 }
 
-static inline int tmh_pcd_cmp(void *va1, pagesize_t len1, void *va2, pagesize_t len2)
+static inline int tmem_pcd_cmp(void *va1, pagesize_t len1, void *va2, pagesize_t len2)
 {
     const char *p1 = (char *)va1;
     const char *p2 = (char *)va2;
@@ -381,7 +371,7 @@ static inline int tmh_pcd_cmp(void *va1, pagesize_t len1, void *va2, pagesize_t
     return 1;
 }
 
-static inline int tmh_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, void *tva, pagesize_t tze_len)
+static inline int tmem_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, void *tva, pagesize_t tze_len)
 {
     const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
     const uint64_t *p2;
@@ -410,7 +400,7 @@ static inline int tmh_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, vo
 
 /* return the size of the data in the pfp, ignoring trailing zeroes and
  * rounded up to the nearest multiple of 8 */
-static inline pagesize_t tmh_tze_pfp_scan(struct page_info *pfp)
+static inline pagesize_t tmem_tze_pfp_scan(struct page_info *pfp)
 {
     const uint64_t *p = (uint64_t *)__map_domain_page(pfp);
     pagesize_t bytecount = PAGE_SIZE;
@@ -421,7 +411,7 @@ static inline pagesize_t tmh_tze_pfp_scan(struct page_info *pfp)
     return bytecount;
 }
 
-static inline void tmh_tze_copy_from_pfp(void *tva, struct page_info *pfp, pagesize_t len)
+static inline void tmem_tze_copy_from_pfp(void *tva, struct page_info *pfp, pagesize_t len)
 {
     uint64_t *p1 = (uint64_t *)tva;
     const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp);
@@ -438,7 +428,7 @@ typedef XEN_GUEST_HANDLE(char) cli_va_t;
 typedef XEN_GUEST_HANDLE_PARAM(tmem_op_t) tmem_cli_op_t;
 typedef XEN_GUEST_HANDLE_PARAM(char) tmem_cli_va_param_t;
 
-static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
+static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
 {
 #ifdef CONFIG_COMPAT
     if ( is_hvm_vcpu(current) ?
@@ -470,42 +460,42 @@ static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
     return copy_from_guest(op, uops, 1);
 }
 
-#define tmh_cli_buf_null guest_handle_from_ptr(NULL, char)
+#define tmem_cli_buf_null guest_handle_from_ptr(NULL, char)
 
-static inline void tmh_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
+static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
 						 int off,
 						 char *tmembuf, int len)
 {
     copy_to_guest_offset(clibuf,off,tmembuf,len);
 }
 
-#define tmh_copy_to_client_buf(clibuf, tmembuf, cnt) \
+#define tmem_copy_to_client_buf(clibuf, tmembuf, cnt) \
     copy_to_guest(guest_handle_cast(clibuf, void), tmembuf, cnt)
 
-#define tmh_client_buf_add guest_handle_add_offset
+#define tmem_client_buf_add guest_handle_add_offset
 
-#define TMH_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
+#define TMEM_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
 
-#define tmh_cli_id_str "domid"
-#define tmh_client_str "domain"
+#define tmem_cli_id_str "domid"
+#define tmem_client_str "domain"
 
-int tmh_decompress_to_client(xen_pfn_t, void *, size_t,
+int tmem_decompress_to_client(xen_pfn_t, void *, size_t,
 			     tmem_cli_va_param_t);
 
-int tmh_compress_from_client(xen_pfn_t, void **, size_t *,
+int tmem_compress_from_client(xen_pfn_t, void **, size_t *,
 			     tmem_cli_va_param_t);
 
-int tmh_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t tmem_offset,
+int tmem_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-int tmh_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset,
+int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
-extern int tmh_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len);
+extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len);
 
-#define tmh_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
-#define tmh_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
-#define tmh_client_info(fmt, args...) printk(XENLOG_G_INFO fmt, ##args)
+#define tmem_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
+#define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
+#define tmem_client_info(fmt, args...) printk(XENLOG_G_INFO fmt, ##args)
 
 #define TMEM_PERF
 #ifdef TMEM_PERF
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 05/11] tmem: cleanup: drop most of the typedefs
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (3 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 04/11] tmem: cleanup: rename 'tmh_' with 'tmem_' Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 06/11] tmem: cleanup: drop function tmem_alloc/free_infra Bob Liu
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

There are many typedefs in tmem most of which are useless and make code
unstraightforward.
This patch try to cleanup those typedefs.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |  359 +++++++++++++++++++++-----------------------
 xen/common/tmem_xen.c      |    2 +-
 xen/include/xen/tmem_xen.h |   14 +-
 3 files changed, 182 insertions(+), 193 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 7d22e0c..3d8e67f 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -98,16 +98,16 @@ DECL_CYC_COUNTER(decompress);
 #define MAX_POOLS_PER_DOMAIN 16
 #define MAX_GLOBAL_SHARED_POOLS  16
 
-struct tm_pool;
+struct tmem_pool;
 struct tmem_page_descriptor;
 struct tmem_page_content_descriptor;
 struct client {
     struct list_head client_list;
-    struct tm_pool *pools[MAX_POOLS_PER_DOMAIN];
+    struct tmem_pool *pools[MAX_POOLS_PER_DOMAIN];
     tmem_client_t *tmem;
     struct list_head ephemeral_page_list;
     long eph_count, eph_count_max;
-    cli_id_t cli_id;
+    domid_t cli_id;
     uint32_t weight;
     uint32_t cap;
     bool_t compress;
@@ -127,24 +127,22 @@ struct client {
     /* shared pool authentication */
     uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2];
 };
-typedef struct client client_t;
 
 struct share_list {
     struct list_head share_list;
-    client_t *client;
+    struct client *client;
 };
-typedef struct share_list sharelist_t;
 
 #define OBJ_HASH_BUCKETS 256 /* must be power of two */
 #define OBJ_HASH_BUCKETS_MASK (OBJ_HASH_BUCKETS-1)
 
-struct tm_pool {
+struct tmem_pool {
     bool_t shared;
     bool_t persistent;
     bool_t is_dying;
     int pageshift; /* 0 == 2**12 */
     struct list_head pool_list;
-    client_t *client;
+    struct client *client;
     uint64_t uuid[2]; /* 0 for private, non-zero for shared */
     uint32_t pool_id;
     rwlock_t pool_rwlock;
@@ -169,7 +167,6 @@ struct tm_pool {
     unsigned long flush_objs, flush_objs_found;
     DECL_SENTINEL
 };
-typedef struct tm_pool pool_t;
 
 #define is_persistent(_p)  (_p->persistent)
 #define is_ephemeral(_p)   (!(_p->persistent))
@@ -179,29 +176,25 @@ typedef struct tm_pool pool_t;
 struct oid {
     uint64_t oid[3];
 };
-typedef struct oid OID;
 
 struct tmem_object_root {
     DECL_SENTINEL
-    OID oid;
+    struct oid oid;
     struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
     unsigned long objnode_count; /* atomicity depends on obj_spinlock */
     long pgp_count; /* atomicity depends on obj_spinlock */
     struct radix_tree_root tree_root; /* tree of pages within object */
-    pool_t *pool;
-    cli_id_t last_client;
+    struct tmem_pool *pool;
+    domid_t last_client;
     spinlock_t obj_spinlock;
     bool_t no_evict; /* if globally locked, pseudo-locks against eviction */
 };
-typedef struct tmem_object_root obj_t;
 
-typedef struct radix_tree_node rtn_t;
 struct tmem_object_node {
-    obj_t *obj;
+    struct tmem_object_root *obj;
     DECL_SENTINEL
-    rtn_t rtn;
+    struct radix_tree_node rtn;
 };
-typedef struct tmem_object_node objnode_t;
 
 struct tmem_page_descriptor {
     union {
@@ -214,9 +207,9 @@ struct tmem_page_descriptor {
                 struct list_head client_eph_pages;
                 struct list_head pool_pers_pages;
             };
-            obj_t *obj;
+            struct tmem_object_root *obj;
         } us;
-        OID inv_oid;  /* used for invalid list only */
+        struct oid inv_oid;  /* used for invalid list only */
     };
     pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
                     else compressed data (cdata) */
@@ -236,7 +229,6 @@ struct tmem_page_descriptor {
     };
     DECL_SENTINEL
 };
-typedef struct tmem_page_descriptor pgp_t;
 
 #define PCD_TZE_MAX_SIZE (PAGE_SIZE - (PAGE_SIZE/64))
 
@@ -253,7 +245,6 @@ struct tmem_page_content_descriptor {
                      * else if tze, 0<=size<PAGE_SIZE, rounded up to mult of 8
                      * else PAGE_SIZE -> *pfp */
 };
-typedef struct tmem_page_content_descriptor pcd_t;
 struct rb_root pcd_tree_roots[256]; /* choose based on first byte of page */
 rwlock_t pcd_tree_rwlocks[256]; /* poor man's concurrency for now */
 
@@ -262,7 +253,7 @@ static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools */
 static LIST_HEAD(global_client_list);
 static LIST_HEAD(global_pool_list);
 
-static pool_t *global_shared_pools[MAX_GLOBAL_SHARED_POOLS] = { 0 };
+static struct tmem_pool *global_shared_pools[MAX_GLOBAL_SHARED_POOLS] = { 0 };
 static bool_t global_shared_auth = 0;
 static atomic_t client_weight_total = ATOMIC_INIT(0);
 static int tmem_initialized = 0;
@@ -314,7 +305,7 @@ static atomic_t global_rtree_node_count = ATOMIC_INIT(0);
 #define tmem_malloc_bytes(_size,_pool) \
        _tmem_malloc(_size, 1, _pool)
 
-static NOINLINE void *_tmem_malloc(size_t size, size_t align, pool_t *pool)
+static NOINLINE void *_tmem_malloc(size_t size, size_t align, struct tmem_pool *pool)
 {
     void *v;
 
@@ -327,7 +318,7 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t align, pool_t *pool)
     return v;
 }
 
-static NOINLINE void tmem_free(void *p, size_t size, pool_t *pool)
+static NOINLINE void tmem_free(void *p, size_t size, struct tmem_pool *pool)
 {
     if ( pool == NULL || !is_persistent(pool) )
         tmem_free_subpage(p,size);
@@ -335,7 +326,7 @@ static NOINLINE void tmem_free(void *p, size_t size, pool_t *pool)
         tmem_free_subpage_thispool(pool,p,size);
 }
 
-static NOINLINE struct page_info *tmem_page_alloc(pool_t *pool)
+static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool)
 {
     struct page_info *pfp = NULL;
 
@@ -350,7 +341,7 @@ static NOINLINE struct page_info *tmem_page_alloc(pool_t *pool)
     return pfp;
 }
 
-static NOINLINE void tmem_page_free(pool_t *pool, struct page_info *pfp)
+static NOINLINE void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp)
 {
     ASSERT(pfp);
     if ( pool == NULL || !is_persistent(pool) )
@@ -364,10 +355,10 @@ static NOINLINE void tmem_page_free(pool_t *pool, struct page_info *pfp)
 
 #define NOT_SHAREABLE ((uint16_t)-1UL)
 
-static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, pgp_t *pgp)
+static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp)
 {
     uint8_t firstbyte = pgp->firstbyte;
-    pcd_t *pcd;
+    struct tmem_page_content_descriptor *pcd;
     int ret;
 
     ASSERT(tmem_dedup_enabled());
@@ -388,9 +379,9 @@ static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, pgp_t *pgp)
 
 /* ensure pgp no longer points to pcd, nor vice-versa */
 /* take pcd rwlock unless have_pcd_rwlock is set, always unlock when done */
-static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_rwlock)
+static NOINLINE void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool *pool, bool_t have_pcd_rwlock)
 {
-    pcd_t *pcd = pgp->pcd;
+    struct tmem_page_content_descriptor *pcd = pgp->pcd;
     struct page_info *pfp = pgp->pcd->pfp;
     uint16_t firstbyte = pgp->firstbyte;
     char *pcd_tze = pgp->pcd->tze;
@@ -425,7 +416,7 @@ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_
     /* reinit the struct for safety for now */
     RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);
     /* now free up the pcd memory */
-    tmem_free(pcd,sizeof(pcd_t),NULL);
+    tmem_free(pcd,sizeof(struct tmem_page_content_descriptor),NULL);
     atomic_dec_and_assert(global_pcd_count);
     if ( pgp_size != 0 && pcd_size < PAGE_SIZE )
     {
@@ -451,11 +442,11 @@ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_
 }
 
 
-static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
+static NOINLINE int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize_t csize)
 {
     struct rb_node **new, *parent = NULL;
     struct rb_root *root;
-    pcd_t *pcd;
+    struct tmem_page_content_descriptor *pcd;
     int cmp;
     pagesize_t pfp_size = 0;
     uint8_t firstbyte = (cdata == NULL) ? tmem_get_first_byte(pgp->pfp) : *cdata;
@@ -486,7 +477,7 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
     new = &(root->rb_node);
     while ( *new )
     {
-        pcd = container_of(*new, pcd_t, pcd_rb_tree_node);
+        pcd = container_of(*new, struct tmem_page_content_descriptor, pcd_rb_tree_node);
         parent = *new;
         /* compare new entry and rbtree entry, set cmp accordingly */
         if ( cdata != NULL )
@@ -531,14 +522,14 @@ static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize)
     }
 
     /* exited while loop with no match, so alloc a pcd and put it in the tree */
-    if ( (pcd = tmem_malloc(pcd_t, NULL)) == NULL )
+    if ( (pcd = tmem_malloc(struct tmem_page_content_descriptor, NULL)) == NULL )
     {
         ret = -ENOMEM;
         goto unlock;
     } else if ( cdata != NULL ) {
         if ( (pcd->cdata = tmem_malloc_bytes(csize,pgp->us.obj->pool)) == NULL )
         {
-            tmem_free(pcd,sizeof(pcd_t),NULL);
+            tmem_free(pcd,sizeof(struct tmem_page_content_descriptor),NULL);
             ret = -ENOMEM;
             goto unlock;
         }
@@ -587,16 +578,16 @@ unlock:
 
 /************ PAGE DESCRIPTOR MANIPULATION ROUTINES *******************/
 
-/* allocate a pgp_t and associate it with an object */
-static NOINLINE pgp_t *pgp_alloc(obj_t *obj)
+/* allocate a struct tmem_page_descriptor and associate it with an object */
+static NOINLINE struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj)
 {
-    pgp_t *pgp;
-    pool_t *pool;
+    struct tmem_page_descriptor *pgp;
+    struct tmem_pool *pool;
 
     ASSERT(obj != NULL);
     ASSERT(obj->pool != NULL);
     pool = obj->pool;
-    if ( (pgp = tmem_malloc(pgp_t, pool)) == NULL )
+    if ( (pgp = tmem_malloc(struct tmem_page_descriptor, pool)) == NULL )
         return NULL;
     pgp->us.obj = obj;
     INIT_LIST_HEAD(&pgp->global_eph_pages);
@@ -617,7 +608,7 @@ static NOINLINE pgp_t *pgp_alloc(obj_t *obj)
     return pgp;
 }
 
-static pgp_t *pgp_lookup_in_obj(obj_t *obj, uint32_t index)
+static struct tmem_page_descriptor *pgp_lookup_in_obj(struct tmem_object_root *obj, uint32_t index)
 {
     ASSERT(obj != NULL);
     ASSERT_SPINLOCK(&obj->obj_spinlock);
@@ -627,7 +618,7 @@ static pgp_t *pgp_lookup_in_obj(obj_t *obj, uint32_t index)
     return radix_tree_lookup(&obj->tree_root, index);
 }
 
-static NOINLINE void pgp_free_data(pgp_t *pgp, pool_t *pool)
+static NOINLINE void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *pool)
 {
     pagesize_t pgp_size = pgp->size;
 
@@ -648,9 +639,9 @@ static NOINLINE void pgp_free_data(pgp_t *pgp, pool_t *pool)
     pgp->size = -1;
 }
 
-static NOINLINE void pgp_free(pgp_t *pgp, int from_delete)
+static NOINLINE void pgp_free(struct tmem_page_descriptor *pgp, int from_delete)
 {
-    pool_t *pool = NULL;
+    struct tmem_pool *pool = NULL;
 
     ASSERT_SENTINEL(pgp,PGD);
     ASSERT(pgp->us.obj != NULL);
@@ -679,25 +670,25 @@ static NOINLINE void pgp_free(pgp_t *pgp, int from_delete)
     INVERT_SENTINEL(pgp,PGD);
     pgp->us.obj = NULL;
     pgp->index = -1;
-    tmem_free(pgp,sizeof(pgp_t),pool);
+    tmem_free(pgp,sizeof(struct tmem_page_descriptor),pool);
 }
 
-static NOINLINE void pgp_free_from_inv_list(client_t *client, pgp_t *pgp)
+static NOINLINE void pgp_free_from_inv_list(struct client *client, struct tmem_page_descriptor *pgp)
 {
-    pool_t *pool = client->pools[pgp->pool_id];
+    struct tmem_pool *pool = client->pools[pgp->pool_id];
 
     ASSERT_SENTINEL(pool,POOL);
     ASSERT_SENTINEL(pgp,PGD);
     INVERT_SENTINEL(pgp,PGD);
     pgp->us.obj = NULL;
     pgp->index = -1;
-    tmem_free(pgp,sizeof(pgp_t),pool);
+    tmem_free(pgp,sizeof(struct tmem_page_descriptor),pool);
 }
 
 /* remove the page from appropriate lists but not from parent object */
-static void pgp_delist(pgp_t *pgp, bool_t no_eph_lock)
+static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
 {
-    client_t *client;
+    struct client *client;
 
     ASSERT(pgp != NULL);
     ASSERT(pgp->us.obj != NULL);
@@ -736,7 +727,7 @@ static void pgp_delist(pgp_t *pgp, bool_t no_eph_lock)
 }
 
 /* remove page from lists (but not from parent object) and free it */
-static NOINLINE void pgp_delete(pgp_t *pgp, bool_t no_eph_lock)
+static NOINLINE void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
 {
     uint64_t life;
 
@@ -752,7 +743,7 @@ static NOINLINE void pgp_delete(pgp_t *pgp, bool_t no_eph_lock)
 /* called only indirectly by radix_tree_destroy */
 static NOINLINE void pgp_destroy(void *v)
 {
-    pgp_t *pgp = (pgp_t *)v;
+    struct tmem_page_descriptor *pgp = (struct tmem_page_descriptor *)v;
 
     ASSERT_SPINLOCK(&pgp->us.obj->obj_spinlock);
     pgp_delist(pgp,0);
@@ -762,7 +753,7 @@ static NOINLINE void pgp_destroy(void *v)
     pgp_free(pgp,0);
 }
 
-static int pgp_add_to_obj(obj_t *obj, uint32_t index, pgp_t *pgp)
+static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, struct tmem_page_descriptor *pgp)
 {
     int ret;
 
@@ -773,9 +764,9 @@ static int pgp_add_to_obj(obj_t *obj, uint32_t index, pgp_t *pgp)
     return ret;
 }
 
-static NOINLINE pgp_t *pgp_delete_from_obj(obj_t *obj, uint32_t index)
+static NOINLINE struct tmem_page_descriptor *pgp_delete_from_obj(struct tmem_object_root *obj, uint32_t index)
 {
-    pgp_t *pgp;
+    struct tmem_page_descriptor *pgp;
 
     ASSERT(obj != NULL);
     ASSERT_SPINLOCK(&obj->obj_spinlock);
@@ -793,20 +784,20 @@ static NOINLINE pgp_t *pgp_delete_from_obj(obj_t *obj, uint32_t index)
 /************ RADIX TREE NODE MANIPULATION ROUTINES *******************/
 
 /* called only indirectly from radix_tree_insert */
-static NOINLINE rtn_t *rtn_alloc(void *arg)
+static NOINLINE struct radix_tree_node *rtn_alloc(void *arg)
 {
-    objnode_t *objnode;
-    obj_t *obj = (obj_t *)arg;
+    struct tmem_object_node *objnode;
+    struct tmem_object_root *obj = (struct tmem_object_root *)arg;
 
     ASSERT_SENTINEL(obj,OBJ);
     ASSERT(obj->pool != NULL);
     ASSERT_SENTINEL(obj->pool,POOL);
-    objnode = tmem_malloc(objnode_t,obj->pool);
+    objnode = tmem_malloc(struct tmem_object_node,obj->pool);
     if (objnode == NULL)
         return NULL;
     objnode->obj = obj;
     SET_SENTINEL(objnode,OBJNODE);
-    memset(&objnode->rtn, 0, sizeof(rtn_t));
+    memset(&objnode->rtn, 0, sizeof(struct radix_tree_node));
     if (++obj->pool->objnode_count > obj->pool->objnode_count_max)
         obj->pool->objnode_count_max = obj->pool->objnode_count;
     atomic_inc_and_max(global_rtree_node_count);
@@ -815,13 +806,13 @@ static NOINLINE rtn_t *rtn_alloc(void *arg)
 }
 
 /* called only indirectly from radix_tree_delete/destroy */
-static void rtn_free(rtn_t *rtn, void *arg)
+static void rtn_free(struct radix_tree_node *rtn, void *arg)
 {
-    pool_t *pool;
-    objnode_t *objnode;
+    struct tmem_pool *pool;
+    struct tmem_object_node *objnode;
 
     ASSERT(rtn != NULL);
-    objnode = container_of(rtn,objnode_t,rtn);
+    objnode = container_of(rtn,struct tmem_object_node,rtn);
     ASSERT_SENTINEL(objnode,OBJNODE);
     INVERT_SENTINEL(objnode,OBJNODE);
     ASSERT(objnode->obj != NULL);
@@ -833,13 +824,13 @@ static void rtn_free(rtn_t *rtn, void *arg)
     pool->objnode_count--;
     objnode->obj->objnode_count--;
     objnode->obj = NULL;
-    tmem_free(objnode,sizeof(objnode_t),pool);
+    tmem_free(objnode,sizeof(struct tmem_object_node),pool);
     atomic_dec_and_assert(global_rtree_node_count);
 }
 
 /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
 
-int oid_compare(OID *left, OID *right)
+int oid_compare(struct oid *left, struct oid *right)
 {
     if ( left->oid[2] == right->oid[2] )
     {
@@ -863,29 +854,29 @@ int oid_compare(OID *left, OID *right)
         return 1;
 }
 
-void oid_set_invalid(OID *oidp)
+void oid_set_invalid(struct oid *oidp)
 {
     oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
 }
 
-unsigned oid_hash(OID *oidp)
+unsigned oid_hash(struct oid *oidp)
 {
     return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
                      BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
 }
 
 /* searches for object==oid in pool, returns locked object if found */
-static NOINLINE obj_t * obj_find(pool_t *pool, OID *oidp)
+static NOINLINE struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid *oidp)
 {
     struct rb_node *node;
-    obj_t *obj;
+    struct tmem_object_root *obj;
 
 restart_find:
     tmem_read_lock(&pool->pool_rwlock);
     node = pool->obj_rb_root[oid_hash(oidp)].rb_node;
     while ( node )
     {
-        obj = container_of(node, obj_t, rb_tree_node);
+        obj = container_of(node, struct tmem_object_root, rb_tree_node);
         switch ( oid_compare(&obj->oid, oidp) )
         {
             case 0: /* equal */
@@ -913,10 +904,10 @@ restart_find:
 }
 
 /* free an object that has no more pgps in it */
-static NOINLINE void obj_free(obj_t *obj, int no_rebalance)
+static NOINLINE void obj_free(struct tmem_object_root *obj, int no_rebalance)
 {
-    pool_t *pool;
-    OID old_oid;
+    struct tmem_pool *pool;
+    struct oid old_oid;
 
     ASSERT_SPINLOCK(&obj->obj_spinlock);
     ASSERT(obj != NULL);
@@ -942,18 +933,18 @@ static NOINLINE void obj_free(obj_t *obj, int no_rebalance)
     if ( !no_rebalance )
         rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]);
     tmem_spin_unlock(&obj->obj_spinlock);
-    tmem_free(obj,sizeof(obj_t),pool);
+    tmem_free(obj,sizeof(struct tmem_object_root),pool);
 }
 
-static NOINLINE int obj_rb_insert(struct rb_root *root, obj_t *obj)
+static NOINLINE int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj)
 {
     struct rb_node **new, *parent = NULL;
-    obj_t *this;
+    struct tmem_object_root *this;
 
     new = &(root->rb_node);
     while ( *new )
     {
-        this = container_of(*new, obj_t, rb_tree_node);
+        this = container_of(*new, struct tmem_object_root, rb_tree_node);
         parent = *new;
         switch ( oid_compare(&this->oid, &obj->oid) )
         {
@@ -976,13 +967,13 @@ static NOINLINE int obj_rb_insert(struct rb_root *root, obj_t *obj)
  * allocate, initialize, and insert an tmem_object_root
  * (should be called only if find failed)
  */
-static NOINLINE obj_t * obj_new(pool_t *pool, OID *oidp)
+static NOINLINE struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oidp)
 {
-    obj_t *obj;
+    struct tmem_object_root *obj;
 
     ASSERT(pool != NULL);
     ASSERT_WRITELOCK(&pool->pool_rwlock);
-    if ( (obj = tmem_malloc(obj_t,pool)) == NULL )
+    if ( (obj = tmem_malloc(struct tmem_object_root,pool)) == NULL )
         return NULL;
     pool->obj_count++;
     if (pool->obj_count > pool->obj_count_max)
@@ -1005,7 +996,7 @@ static NOINLINE obj_t * obj_new(pool_t *pool, OID *oidp)
 }
 
 /* free an object after destroying any pgps in it */
-static NOINLINE void obj_destroy(obj_t *obj, int no_rebalance)
+static NOINLINE void obj_destroy(struct tmem_object_root *obj, int no_rebalance)
 {
     ASSERT_WRITELOCK(&obj->pool->pool_rwlock);
     radix_tree_destroy(&obj->tree_root, pgp_destroy);
@@ -1013,10 +1004,10 @@ static NOINLINE void obj_destroy(obj_t *obj, int no_rebalance)
 }
 
 /* destroys all objs in a pool, or only if obj->last_client matches cli_id */
-static void pool_destroy_objs(pool_t *pool, bool_t selective, cli_id_t cli_id)
+static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t cli_id)
 {
     struct rb_node *node;
-    obj_t *obj;
+    struct tmem_object_root *obj;
     int i;
 
     tmem_write_lock(&pool->pool_rwlock);
@@ -1026,7 +1017,7 @@ static void pool_destroy_objs(pool_t *pool, bool_t selective, cli_id_t cli_id)
         node = rb_first(&pool->obj_rb_root[i]);
         while ( node != NULL )
         {
-            obj = container_of(node, obj_t, rb_tree_node);
+            obj = container_of(node, struct tmem_object_root, rb_tree_node);
             tmem_spin_lock(&obj->obj_spinlock);
             node = rb_next(node);
             ASSERT(obj->no_evict == 0);
@@ -1045,12 +1036,12 @@ static void pool_destroy_objs(pool_t *pool, bool_t selective, cli_id_t cli_id)
 
 /************ POOL MANIPULATION ROUTINES ******************************/
 
-static pool_t * pool_alloc(void)
+static struct tmem_pool * pool_alloc(void)
 {
-    pool_t *pool;
+    struct tmem_pool *pool;
     int i;
 
-    if ( (pool = tmem_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL )
+    if ( (pool = tmem_alloc_infra(sizeof(struct tmem_pool),__alignof__(struct tmem_pool))) == NULL )
         return NULL;
     for (i = 0; i < OBJ_HASH_BUCKETS; i++)
         pool->obj_rb_root[i] = RB_ROOT;
@@ -1073,7 +1064,7 @@ static pool_t * pool_alloc(void)
     return pool;
 }
 
-static NOINLINE void pool_free(pool_t *pool)
+static NOINLINE void pool_free(struct tmem_pool *pool)
 {
     ASSERT_SENTINEL(pool,POOL);
     INVERT_SENTINEL(pool,POOL);
@@ -1084,12 +1075,12 @@ static NOINLINE void pool_free(pool_t *pool)
 
 /* register new_client as a user of this shared pool and return new
    total number of registered users */
-static int shared_pool_join(pool_t *pool, client_t *new_client)
+static int shared_pool_join(struct tmem_pool *pool, struct client *new_client)
 {
-    sharelist_t *sl;
+    struct share_list *sl;
 
     ASSERT(is_shared(pool));
-    if ( (sl = tmem_malloc(sharelist_t,NULL)) == NULL )
+    if ( (sl = tmem_malloc(struct share_list,NULL)) == NULL )
         return -1;
     sl->client = new_client;
     list_add_tail(&sl->share_list, &pool->share_list);
@@ -1100,11 +1091,11 @@ static int shared_pool_join(pool_t *pool, client_t *new_client)
 }
 
 /* reassign "ownership" of the pool to another client that shares this pool */
-static NOINLINE void shared_pool_reassign(pool_t *pool)
+static NOINLINE void shared_pool_reassign(struct tmem_pool *pool)
 {
-    sharelist_t *sl;
+    struct share_list *sl;
     int poolid;
-    client_t *old_client = pool->client, *new_client;
+    struct client *old_client = pool->client, *new_client;
 
     ASSERT(is_shared(pool));
     if ( list_empty(&pool->share_list) )
@@ -1113,7 +1104,7 @@ static NOINLINE void shared_pool_reassign(pool_t *pool)
         return;
     }
     old_client->pools[pool->pool_id] = NULL;
-    sl = list_entry(pool->share_list.next, sharelist_t, share_list);
+    sl = list_entry(pool->share_list.next, struct share_list, share_list);
     ASSERT(sl->client != old_client);
     pool->client = new_client = sl->client;
     for (poolid = 0; poolid < MAX_POOLS_PER_DOMAIN; poolid++)
@@ -1131,9 +1122,9 @@ static NOINLINE void shared_pool_reassign(pool_t *pool)
 
 /* destroy all objects with last_client same as passed cli_id,
    remove pool's cli_id from list of sharers of this pool */
-static NOINLINE int shared_pool_quit(pool_t *pool, cli_id_t cli_id)
+static NOINLINE int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id)
 {
-    sharelist_t *sl;
+    struct share_list *sl;
     int s_poolid;
 
     ASSERT(is_shared(pool));
@@ -1146,7 +1137,7 @@ static NOINLINE int shared_pool_quit(pool_t *pool, cli_id_t cli_id)
         if (sl->client->cli_id != cli_id)
             continue;
         list_del(&sl->share_list);
-        tmem_free(sl,sizeof(sharelist_t),pool);
+        tmem_free(sl,sizeof(struct share_list),pool);
         --pool->shared_count;
         if (pool->client->cli_id == cli_id)
             shared_pool_reassign(pool);
@@ -1166,7 +1157,7 @@ static NOINLINE int shared_pool_quit(pool_t *pool, cli_id_t cli_id)
 }
 
 /* flush all data (owned by cli_id) from a pool and, optionally, free it */
-static void pool_flush(pool_t *pool, cli_id_t cli_id, bool_t destroy)
+static void pool_flush(struct tmem_pool *pool, domid_t cli_id, bool_t destroy)
 {
     ASSERT(pool != NULL);
     if ( (is_shared(pool)) && (shared_pool_quit(pool,cli_id) > 0) )
@@ -1196,9 +1187,9 @@ static void pool_flush(pool_t *pool, cli_id_t cli_id, bool_t destroy)
 
 /************ CLIENT MANIPULATION OPERATIONS **************************/
 
-static client_t *client_create(cli_id_t cli_id)
+static struct client *client_create(domid_t cli_id)
 {
-    client_t *client = tmem_alloc_infra(sizeof(client_t),__alignof__(client_t));
+    struct client *client = tmem_alloc_infra(sizeof(struct client),__alignof__(struct client));
     int i;
 
     tmem_client_info("tmem: initializing tmem capability for %s=%d...",
@@ -1208,7 +1199,7 @@ static client_t *client_create(cli_id_t cli_id)
         tmem_client_err("failed... out of memory\n");
         goto fail;
     }
-    memset(client,0,sizeof(client_t));
+    memset(client,0,sizeof(struct client));
     if ( (client->tmem = tmem_client_init(cli_id)) == NULL )
     {
         tmem_client_err("failed... can't allocate host-dependent part of client\n");
@@ -1242,7 +1233,7 @@ static client_t *client_create(cli_id_t cli_id)
     return NULL;
 }
 
-static void client_free(client_t *client)
+static void client_free(struct client *client)
 {
     list_del(&client->client_list);
     tmem_client_destroy(client->tmem);
@@ -1250,10 +1241,10 @@ static void client_free(client_t *client)
 }
 
 /* flush all data from a client and, optionally, free it */
-static void client_flush(client_t *client, bool_t destroy)
+static void client_flush(struct client *client, bool_t destroy)
 {
     int i;
-    pool_t *pool;
+    struct tmem_pool *pool;
 
     for  (i = 0; i < MAX_POOLS_PER_DOMAIN; i++)
     {
@@ -1267,7 +1258,7 @@ static void client_flush(client_t *client, bool_t destroy)
         client_free(client);
 }
 
-static bool_t client_over_quota(client_t *client)
+static bool_t client_over_quota(struct client *client)
 {
     int total = _atomic_read(client_weight_total);
 
@@ -1279,18 +1270,18 @@ static bool_t client_over_quota(client_t *client)
              ((total*100L) / client->weight) );
 }
 
-static void client_freeze(client_t *client, int freeze)
+static void client_freeze(struct client *client, int freeze)
 {
     client->frozen = freeze;
 }
 
 /************ MEMORY REVOCATION ROUTINES *******************************/
 
-static bool_t tmem_try_to_evict_pgp(pgp_t *pgp, bool_t *hold_pool_rwlock)
+static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *hold_pool_rwlock)
 {
-    obj_t *obj = pgp->us.obj;
-    pool_t *pool = obj->pool;
-    client_t *client = pool->client;
+    struct tmem_object_root *obj = pgp->us.obj;
+    struct tmem_pool *pool = obj->pool;
+    struct client *client = pool->client;
     uint16_t firstbyte = pgp->firstbyte;
 
     if ( pool->is_dying )
@@ -1334,10 +1325,10 @@ obj_unlock:
 
 static int tmem_evict(void)
 {
-    client_t *client = tmem_client_from_current();
-    pgp_t *pgp = NULL, *pgp2, *pgp_del;
-    obj_t *obj;
-    pool_t *pool;
+    struct client *client = tmem_client_from_current();
+    struct tmem_page_descriptor *pgp = NULL, *pgp2, *pgp_del;
+    struct tmem_object_root *obj;
+    struct tmem_pool *pool;
     int ret = 0;
     bool_t hold_pool_rwlock = 0;
 
@@ -1430,7 +1421,7 @@ static inline void tmem_ensure_avail_pages(void)
 
 /************ TMEM CORE OPERATIONS ************************************/
 
-static NOINLINE int do_tmem_put_compress(pgp_t *pgp, xen_pfn_t cmfn,
+static NOINLINE int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn,
                                          tmem_cli_va_param_t clibuf)
 {
     void *dst, *p;
@@ -1473,14 +1464,14 @@ out:
     return ret;
 }
 
-static NOINLINE int do_tmem_dup_put(pgp_t *pgp, xen_pfn_t cmfn,
+static NOINLINE int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn,
        pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len,
        tmem_cli_va_param_t clibuf)
 {
-    pool_t *pool;
-    obj_t *obj;
-    client_t *client;
-    pgp_t *pgpfound = NULL;
+    struct tmem_pool *pool;
+    struct tmem_object_root *obj;
+    struct client *client;
+    struct tmem_page_descriptor *pgpfound = NULL;
     int ret;
 
     ASSERT(pgp != NULL);
@@ -1563,14 +1554,14 @@ cleanup:
 }
 
 
-static NOINLINE int do_tmem_put(pool_t *pool,
-              OID *oidp, uint32_t index,
+static NOINLINE int do_tmem_put(struct tmem_pool *pool,
+              struct oid *oidp, uint32_t index,
               xen_pfn_t cmfn, pagesize_t tmem_offset,
               pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
-    obj_t *obj = NULL, *objfound = NULL, *objnew = NULL;
-    pgp_t *pgp = NULL, *pgpdel = NULL;
-    client_t *client = pool->client;
+    struct tmem_object_root *obj = NULL, *objfound = NULL, *objnew = NULL;
+    struct tmem_page_descriptor *pgp = NULL, *pgpdel = NULL;
+    struct client *client = pool->client;
     int ret = client->frozen ? -EFROZEN : -ENOMEM;
 
     ASSERT(pool != NULL);
@@ -1707,13 +1698,13 @@ free:
     return ret;
 }
 
-static NOINLINE int do_tmem_get(pool_t *pool, OID *oidp, uint32_t index,
+static NOINLINE int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
               xen_pfn_t cmfn, pagesize_t tmem_offset,
               pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf)
 {
-    obj_t *obj;
-    pgp_t *pgp;
-    client_t *client = pool->client;
+    struct tmem_object_root *obj;
+    struct tmem_page_descriptor *pgp;
+    struct client *client = pool->client;
     DECL_LOCAL_CYC_COUNTER(decompress);
     int rc;
 
@@ -1794,10 +1785,10 @@ bad_copy:
     return rc;
 }
 
-static NOINLINE int do_tmem_flush_page(pool_t *pool, OID *oidp, uint32_t index)
+static NOINLINE int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t index)
 {
-    obj_t *obj;
-    pgp_t *pgp;
+    struct tmem_object_root *obj;
+    struct tmem_page_descriptor *pgp;
 
     pool->flushs++;
     obj = obj_find(pool,oidp);
@@ -1829,9 +1820,9 @@ out:
         return 1;
 }
 
-static NOINLINE int do_tmem_flush_object(pool_t *pool, OID *oidp)
+static NOINLINE int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
 {
-    obj_t *obj;
+    struct tmem_object_root *obj;
 
     pool->flush_objs++;
     obj = obj_find(pool,oidp);
@@ -1851,8 +1842,8 @@ out:
 
 static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id)
 {
-    client_t *client = tmem_client_from_current();
-    pool_t *pool;
+    struct client *client = tmem_client_from_current();
+    struct tmem_pool *pool;
 
     if ( client->pools == NULL )
         return 0;
@@ -1865,19 +1856,19 @@ static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id)
     return 1;
 }
 
-static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
+static NOINLINE int do_tmem_new_pool(domid_t this_cli_id,
                                      uint32_t d_poolid, uint32_t flags,
                                      uint64_t uuid_lo, uint64_t uuid_hi)
 {
-    client_t *client;
-    cli_id_t cli_id;
+    struct client *client;
+    domid_t cli_id;
     int persistent = flags & TMEM_POOL_PERSIST;
     int shared = flags & TMEM_POOL_SHARED;
     int pagebits = (flags >> TMEM_POOL_PAGESIZE_SHIFT)
          & TMEM_POOL_PAGESIZE_MASK;
     int specversion = (flags >> TMEM_POOL_VERSION_SHIFT)
          & TMEM_POOL_VERSION_MASK;
-    pool_t *pool, *shpool;
+    struct tmem_pool *pool, *shpool;
     int s_poolid, first_unused_s_poolid;
     int i;
 
@@ -2000,9 +1991,9 @@ fail:
 /************ TMEM CONTROL OPERATIONS ************************************/
 
 /* freeze/thaw all pools belonging to client cli_id (all domains if -1) */
-static int tmemc_freeze_pools(cli_id_t cli_id, int arg)
+static int tmemc_freeze_pools(domid_t cli_id, int arg)
 {
-    client_t *client;
+    struct client *client;
     bool_t freeze = (arg == TMEMC_FREEZE) ? 1 : 0;
     bool_t destroy = (arg == TMEMC_DESTROY) ? 1 : 0;
     char *s;
@@ -2025,7 +2016,7 @@ static int tmemc_freeze_pools(cli_id_t cli_id, int arg)
     return 0;
 }
 
-static int tmemc_flush_mem(cli_id_t cli_id, uint32_t kb)
+static int tmemc_flush_mem(domid_t cli_id, uint32_t kb)
 {
     uint32_t npages, flushed_pages, flushed_kb;
 
@@ -2053,12 +2044,12 @@ static int tmemc_flush_mem(cli_id_t cli_id, uint32_t kb)
  */
 #define BSIZE 1024
 
-static int tmemc_list_client(client_t *c, tmem_cli_va_param_t buf,
+static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
                              int off, uint32_t len, bool_t use_long)
 {
     char info[BSIZE];
     int i, n = 0, sum = 0;
-    pool_t *p;
+    struct tmem_pool *p;
     bool_t s;
 
     n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,ca:%d,co:%d,fr:%d,"
@@ -2111,8 +2102,8 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
 {
     char info[BSIZE];
     int i, n = 0, sum = 0;
-    pool_t *p;
-    sharelist_t *sl;
+    struct tmem_pool *p;
+    struct share_list *sl;
 
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ )
     {
@@ -2206,10 +2197,10 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
     return sum;
 }
 
-static int tmemc_list(cli_id_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
+static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
                                bool_t use_long)
 {
-    client_t *client;
+    struct client *client;
     int off = 0;
 
     if ( cli_id == TMEM_CLI_ID_NULL ) {
@@ -2227,9 +2218,9 @@ static int tmemc_list(cli_id_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
     return 0;
 }
 
-static int tmemc_set_var_one(client_t *client, uint32_t subop, uint32_t arg1)
+static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t arg1)
 {
-    cli_id_t cli_id = client->cli_id;
+    domid_t cli_id = client->cli_id;
     uint32_t old_weight;
 
     switch (subop)
@@ -2266,9 +2257,9 @@ static int tmemc_set_var_one(client_t *client, uint32_t subop, uint32_t arg1)
     return 0;
 }
 
-static int tmemc_set_var(cli_id_t cli_id, uint32_t subop, uint32_t arg1)
+static int tmemc_set_var(domid_t cli_id, uint32_t subop, uint32_t arg1)
 {
-    client_t *client;
+    struct client *client;
 
     if ( cli_id == TMEM_CLI_ID_NULL )
         list_for_each_entry(client,&global_client_list,client_list)
@@ -2280,10 +2271,10 @@ static int tmemc_set_var(cli_id_t cli_id, uint32_t subop, uint32_t arg1)
     return 0;
 }
 
-static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
+static NOINLINE int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
                                   uint64_t uuid_hi, bool_t auth)
 {
-    client_t *client;
+    struct client *client;
     int i, free = -1;
 
     if ( cli_id == TMEM_CLI_ID_NULL )
@@ -2320,11 +2311,11 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
 static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
                         uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1)
 {
-    client_t *client = tmem_client_from_cli_id(cli_id);
-    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+    struct client *client = tmem_client_from_cli_id(cli_id);
+    struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     uint32_t p;
-    pgp_t *pgp, *pgp2;
+    struct tmem_page_descriptor *pgp, *pgp2;
     int rc = -1;
 
     switch(subop)
@@ -2409,11 +2400,11 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
 static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
                         tmem_cli_va_param_t buf, uint32_t bufsize)
 {
-    client_t *client = tmem_client_from_cli_id(cli_id);
-    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+    struct client *client = tmem_client_from_cli_id(cli_id);
+    struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
-    pgp_t *pgp;
-    OID oid;
+    struct tmem_page_descriptor *pgp;
+    struct oid oid;
     int ret = 0;
     struct tmem_handle h;
     unsigned int pagesize;
@@ -2436,7 +2427,7 @@ static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
     {
         /* process the first one */
         pool->cur_pgp = pgp = list_entry((&pool->persistent_page_list)->next,
-                         pgp_t,us.pool_pers_pages);
+                         struct tmem_page_descriptor,us.pool_pers_pages);
     } else if ( list_is_last(&pool->cur_pgp->us.pool_pers_pages, 
                              &pool->persistent_page_list) )
     {
@@ -2445,7 +2436,7 @@ static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
         goto out;
     }
     pgp = list_entry((&pool->cur_pgp->us.pool_pers_pages)->next,
-                         pgp_t,us.pool_pers_pages);
+                         struct tmem_page_descriptor,us.pool_pers_pages);
     pool->cur_pgp = pgp;
     oid = pgp->us.obj->oid;
     h.pool_id = pool_id;
@@ -2464,8 +2455,8 @@ out:
 static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
                         uint32_t bufsize)
 {
-    client_t *client = tmem_client_from_cli_id(cli_id);
-    pgp_t *pgp;
+    struct client *client = tmem_client_from_cli_id(cli_id);
+    struct tmem_page_descriptor *pgp;
     struct tmem_handle h;
     int ret = 0;
 
@@ -2479,7 +2470,7 @@ static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
     if ( client->cur_pgp == NULL )
     {
         pgp = list_entry((&client->persistent_invalidated_list)->next,
-                         pgp_t,client_inv_pages);
+                         struct tmem_page_descriptor,client_inv_pages);
         client->cur_pgp = pgp;
     } else if ( list_is_last(&client->cur_pgp->client_inv_pages, 
                              &client->persistent_invalidated_list) )
@@ -2489,7 +2480,7 @@ static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
         goto out;
     } else {
         pgp = list_entry((&client->cur_pgp->client_inv_pages)->next,
-                         pgp_t,client_inv_pages);
+                         struct tmem_page_descriptor,client_inv_pages);
         client->cur_pgp = pgp;
     }
     h.pool_id = pgp->pool_id;
@@ -2503,11 +2494,11 @@ out:
     return ret;
 }
 
-static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, OID *oidp,
+static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid *oidp,
                       uint32_t index, tmem_cli_va_param_t buf, uint32_t bufsize)
 {
-    client_t *client = tmem_client_from_cli_id(cli_id);
-    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+    struct client *client = tmem_client_from_cli_id(cli_id);
+    struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
 
     if ( pool == NULL )
@@ -2515,11 +2506,11 @@ static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, OID *oidp,
     return do_tmem_put(pool, oidp, index, 0, 0, 0, bufsize, buf);
 }
 
-static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, OID *oidp,
+static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid *oidp,
                         uint32_t index)
 {
-    client_t *client = tmem_client_from_cli_id(cli_id);
-    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+    struct client *client = tmem_client_from_cli_id(cli_id);
+    struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
 
     if ( pool == NULL )
@@ -2532,7 +2523,7 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
     int ret;
     uint32_t pool_id = op->pool_id;
     uint32_t subop = op->u.ctrl.subop;
-    OID *oidp = (OID *)(&op->u.ctrl.oid[0]);
+    struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]);
 
     if (!tmem_current_is_privileged())
         return -EPERM;
@@ -2606,9 +2597,9 @@ static NOINLINE int do_tmem_control(struct tmem_op *op)
 EXPORT long do_tmem_op(tmem_cli_op_t uops)
 {
     struct tmem_op op;
-    client_t *client = tmem_client_from_current();
-    pool_t *pool = NULL;
-    OID *oidp;
+    struct client *client = tmem_client_from_current();
+    struct tmem_pool *pool = NULL;
+    struct oid *oidp;
     int rc = 0;
     bool_t succ_get = 0, succ_put = 0;
     bool_t non_succ_get = 0, non_succ_put = 0;
@@ -2722,7 +2713,7 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
         ASSERT_SENTINEL(pool,POOL);
     }
 
-    oidp = (OID *)&op.u.gen.oid[0];
+    oidp = (struct oid *)&op.u.gen.oid[0];
     switch ( op.cmd )
     {
     case TMEM_NEW_POOL:
@@ -2818,7 +2809,7 @@ out:
 /* this should be called when the host is destroying a client */
 EXPORT void tmem_destroy(void *v)
 {
-    client_t *client = (client_t *)v;
+    struct client *client = (struct client *)v;
 
     if ( client == NULL )
         return;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index bb2b601..e92eab6 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -365,7 +365,7 @@ static void tmem_persistent_pool_page_put(void *page_va)
 
 /******************  XEN-SPECIFIC CLIENT HANDLING ********************/
 
-EXPORT tmem_client_t *tmem_client_init(cli_id_t cli_id)
+EXPORT tmem_client_t *tmem_client_init(domid_t cli_id)
 {
     tmem_client_t *tmem;
     char name[5];
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index dc37861..bbe1eb6 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -265,13 +265,11 @@ static inline void tmem_free_infra(void *p)
 /*  "Client" (==domain) abstraction */
 
 struct client;
-typedef domid_t cli_id_t;
-typedef struct domain tmem_cli_ptr_t;
 
-extern tmem_client_t *tmem_client_init(cli_id_t);
+extern tmem_client_t *tmem_client_init(domid_t);
 extern void tmem_client_destroy(tmem_client_t *);
 
-static inline struct client *tmem_client_from_cli_id(cli_id_t cli_id)
+static inline struct client *tmem_client_from_cli_id(domid_t cli_id)
 {
     struct client *c;
     struct domain *d = rcu_lock_domain_by_id(cli_id);
@@ -289,18 +287,18 @@ static inline struct client *tmem_client_from_current(void)
 
 #define tmem_client_is_dying(_client) (!!_client->tmem->domain->is_dying)
 
-static inline cli_id_t tmem_get_cli_id_from_current(void)
+static inline domid_t tmem_get_cli_id_from_current(void)
 {
     return current->domain->domain_id;
 }
 
-static inline tmem_cli_ptr_t *tmem_get_cli_ptr_from_current(void)
+static inline struct domain *tmem_get_cli_ptr_from_current(void)
 {
     return current->domain;
 }
 
 static inline bool_t tmem_set_client_from_id(
-    struct client *client, tmem_client_t *tmem, cli_id_t cli_id)
+    struct client *client, tmem_client_t *tmem, domid_t cli_id)
 {
     struct domain *d = rcu_lock_domain_by_id(cli_id);
     bool_t rc = 0;
@@ -474,7 +472,7 @@ static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
 
 #define tmem_client_buf_add guest_handle_add_offset
 
-#define TMEM_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
+#define TMEM_CLI_ID_NULL ((domid_t)((domid_t)-1L))
 
 #define tmem_cli_id_str "domid"
 #define tmem_client_str "domain"
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 06/11] tmem: cleanup: drop function tmem_alloc/free_infra
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (4 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 05/11] tmem: cleanup: drop most of the typedefs Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 07/11] tmem: cleanup: drop typedef tmem_client_t Bob Liu
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

Useless function can be replaced by xmalloc/xfree directly.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |   11 +++++------
 xen/include/xen/tmem_xen.h |   14 --------------
 2 files changed, 5 insertions(+), 20 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 3d8e67f..589a515 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1041,7 +1041,7 @@ static struct tmem_pool * pool_alloc(void)
     struct tmem_pool *pool;
     int i;
 
-    if ( (pool = tmem_alloc_infra(sizeof(struct tmem_pool),__alignof__(struct tmem_pool))) == NULL )
+    if ( (pool = xmalloc(struct tmem_pool)) == NULL )
         return NULL;
     for (i = 0; i < OBJ_HASH_BUCKETS; i++)
         pool->obj_rb_root[i] = RB_ROOT;
@@ -1070,7 +1070,7 @@ static NOINLINE void pool_free(struct tmem_pool *pool)
     INVERT_SENTINEL(pool,POOL);
     pool->client = NULL;
     list_del(&pool->pool_list);
-    tmem_free_infra(pool);
+    xfree(pool);
 }
 
 /* register new_client as a user of this shared pool and return new
@@ -1189,7 +1189,7 @@ static void pool_flush(struct tmem_pool *pool, domid_t cli_id, bool_t destroy)
 
 static struct client *client_create(domid_t cli_id)
 {
-    struct client *client = tmem_alloc_infra(sizeof(struct client),__alignof__(struct client));
+    struct client *client = xzalloc(struct client);
     int i;
 
     tmem_client_info("tmem: initializing tmem capability for %s=%d...",
@@ -1199,7 +1199,6 @@ static struct client *client_create(domid_t cli_id)
         tmem_client_err("failed... out of memory\n");
         goto fail;
     }
-    memset(client,0,sizeof(struct client));
     if ( (client->tmem = tmem_client_init(cli_id)) == NULL )
     {
         tmem_client_err("failed... can't allocate host-dependent part of client\n");
@@ -1229,7 +1228,7 @@ static struct client *client_create(domid_t cli_id)
     return client;
 
  fail:
-    tmem_free_infra(client);
+    xfree(client);
     return NULL;
 }
 
@@ -1237,7 +1236,7 @@ static void client_free(struct client *client)
 {
     list_del(&client->client_list);
     tmem_client_destroy(client->tmem);
-    tmem_free_infra(client);
+    xfree(client);
 }
 
 /* flush all data from a client and, optionally, free it */
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index bbe1eb6..d4eafaf 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -245,20 +245,6 @@ static inline unsigned long tmem_free_mb(void)
     return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
 }
 
-/*
- * Memory allocation for "infrastructure" data
- */
-
-static inline void *tmem_alloc_infra(size_t size, size_t align)
-{
-    return _xmalloc(size,align);
-}
-
-static inline void tmem_free_infra(void *p)
-{
-    return xfree(p);
-}
-
 #define tmem_lock_all  opt_tmem_lock
 #define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 07/11] tmem: cleanup: drop typedef tmem_client_t
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (5 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 06/11] tmem: cleanup: drop function tmem_alloc/free_infra Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 08/11] tmem: cleanup: drop useless wrap functions Bob Liu
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

Informations in typedef 'tmem_client_t' can be integreated into
'struct client' directly, no need to use a separate struct.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |   31 ++++++++++++++++++++++++-------
 xen/common/tmem_xen.c      |   34 ++--------------------------------
 xen/include/xen/tmem_xen.h |   40 +++++++---------------------------------
 3 files changed, 33 insertions(+), 72 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 589a515..db18b65 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -104,7 +104,8 @@ struct tmem_page_content_descriptor;
 struct client {
     struct list_head client_list;
     struct tmem_pool *pools[MAX_POOLS_PER_DOMAIN];
-    tmem_client_t *tmem;
+    struct domain *domain;
+    struct xmem_pool *persistent_pool;
     struct list_head ephemeral_page_list;
     long eph_count, eph_count_max;
     domid_t cli_id;
@@ -1190,7 +1191,9 @@ static void pool_flush(struct tmem_pool *pool, domid_t cli_id, bool_t destroy)
 static struct client *client_create(domid_t cli_id)
 {
     struct client *client = xzalloc(struct client);
-    int i;
+    int i, shift;
+    char name[5];
+    struct domain *d;
 
     tmem_client_info("tmem: initializing tmem capability for %s=%d...",
                     tmem_cli_id_str, cli_id);
@@ -1199,16 +1202,30 @@ static struct client *client_create(domid_t cli_id)
         tmem_client_err("failed... out of memory\n");
         goto fail;
     }
-    if ( (client->tmem = tmem_client_init(cli_id)) == NULL )
+
+    for (i = 0, shift = 12; i < 4; shift -=4, i++)
+        name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
+    name[4] = '\0';
+    client->persistent_pool = xmem_pool_create(name, tmem_persistent_pool_page_get,
+        tmem_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+    if ( client->persistent_pool == NULL )
     {
-        tmem_client_err("failed... can't allocate host-dependent part of client\n");
+        tmem_client_err("failed... can't alloc persistent pool\n");
         goto fail;
     }
-    if ( !tmem_set_client_from_id(client, client->tmem, cli_id) )
-    {
+
+    d = rcu_lock_domain_by_id(cli_id);
+    if ( d == NULL ) {
         tmem_client_err("failed... can't set client\n");
+        xmem_pool_destroy(client->persistent_pool);
         goto fail;
     }
+    if ( !d->is_dying ) {
+        d->tmem = client;
+	client->domain = d;
+    }
+    rcu_unlock_domain(d);
+
     client->cli_id = cli_id;
     client->compress = tmem_compression_enabled();
     client->shared_auth_required = tmem_shared_auth();
@@ -1235,7 +1252,7 @@ static struct client *client_create(domid_t cli_id)
 static void client_free(struct client *client)
 {
     list_del(&client->client_list);
-    tmem_client_destroy(client->tmem);
+    xmem_pool_destroy(client->persistent_pool);
     xfree(client);
 }
 
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index e92eab6..1309932 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -341,7 +341,7 @@ static int __init tmem_mempool_init(void)
 
 /* persistent pools are per-domain */
 
-static void *tmem_persistent_pool_page_get(unsigned long size)
+void *tmem_persistent_pool_page_get(unsigned long size)
 {
     struct page_info *pi;
     struct domain *d = current->domain;
@@ -353,7 +353,7 @@ static void *tmem_persistent_pool_page_get(unsigned long size)
     return page_to_virt(pi);
 }
 
-static void tmem_persistent_pool_page_put(void *page_va)
+void tmem_persistent_pool_page_put(void *page_va)
 {
     struct page_info *pi;
 
@@ -363,36 +363,6 @@ static void tmem_persistent_pool_page_put(void *page_va)
     _tmem_free_page_thispool(pi);
 }
 
-/******************  XEN-SPECIFIC CLIENT HANDLING ********************/
-
-EXPORT tmem_client_t *tmem_client_init(domid_t cli_id)
-{
-    tmem_client_t *tmem;
-    char name[5];
-    int i, shift;
-
-    if ( (tmem = xmalloc(tmem_client_t)) == NULL )
-        return NULL;
-    for (i = 0, shift = 12; i < 4; shift -=4, i++)
-        name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
-    name[4] = '\0';
-    tmem->persistent_pool = xmem_pool_create(name, tmem_persistent_pool_page_get,
-        tmem_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
-    if ( tmem->persistent_pool == NULL )
-    {
-        xfree(tmem);
-        return NULL;
-    }
-    return tmem;
-}
-
-EXPORT void tmem_client_destroy(tmem_client_t *tmem)
-{
-    ASSERT(tmem->domain->is_dying);
-    xmem_pool_destroy(tmem->persistent_pool);
-    tmem->domain = NULL;
-}
-
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
 
 static int dstmem_order, workmem_order;
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index d4eafaf..3c99bee 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -21,13 +21,6 @@
 #ifdef CONFIG_COMPAT
 #include <compat/tmem.h>
 #endif
-
-struct tmem_host_dependent_client {
-    struct domain *domain;
-    struct xmem_pool *persistent_pool;
-};
-typedef struct tmem_host_dependent_client tmem_client_t;
-
 typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
 
 #define IS_PAGE_ALIGNED(addr) \
@@ -123,7 +116,7 @@ static inline bool_t domain_fully_allocated(struct domain *d)
     return ( d->tot_pages >= d->max_pages );
 }
 #define tmem_client_memory_fully_allocated(_pool) \
- domain_fully_allocated(_pool->client->tmem->domain)
+ domain_fully_allocated(_pool->client->domain)
 
 static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                  size_t size, size_t align)
@@ -138,7 +131,7 @@ static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
     return xmem_pool_alloc(size, cmem_mempool);
 }
 #define tmem_alloc_subpage_thispool(_pool, _s, _a) \
-            _tmem_alloc_subpage_thispool(pool->client->tmem->persistent_pool, \
+            _tmem_alloc_subpage_thispool(pool->client->persistent_pool, \
                                          _s, _a)
 
 static inline void _tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
@@ -149,7 +142,7 @@ static inline void _tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
     xmem_pool_free(ptr,cmem_mempool);
 }
 #define tmem_free_subpage_thispool(_pool, _p, _s) \
- _tmem_free_subpage_thispool(_pool->client->tmem->persistent_pool, _p, _s)
+ _tmem_free_subpage_thispool(_pool->client->persistent_pool, _p, _s)
 
 static inline struct page_info *_tmem_alloc_page_thispool(struct domain *d)
 {
@@ -179,7 +172,7 @@ out:
     return pi;
 }
 #define tmem_alloc_page_thispool(_pool) \
-    _tmem_alloc_page_thispool(_pool->client->tmem->domain)
+    _tmem_alloc_page_thispool(_pool->client->domain)
 
 static inline void _tmem_free_page_thispool(struct page_info *pi)
 {
@@ -251,10 +244,6 @@ static inline unsigned long tmem_free_mb(void)
 /*  "Client" (==domain) abstraction */
 
 struct client;
-
-extern tmem_client_t *tmem_client_init(domid_t);
-extern void tmem_client_destroy(tmem_client_t *);
-
 static inline struct client *tmem_client_from_cli_id(domid_t cli_id)
 {
     struct client *c;
@@ -271,7 +260,7 @@ static inline struct client *tmem_client_from_current(void)
     return (struct client *)(current->domain->tmem);
 }
 
-#define tmem_client_is_dying(_client) (!!_client->tmem->domain->is_dying)
+#define tmem_client_is_dying(_client) (!!_client->domain->is_dying)
 
 static inline domid_t tmem_get_cli_id_from_current(void)
 {
@@ -283,23 +272,6 @@ static inline struct domain *tmem_get_cli_ptr_from_current(void)
     return current->domain;
 }
 
-static inline bool_t tmem_set_client_from_id(
-    struct client *client, tmem_client_t *tmem, domid_t cli_id)
-{
-    struct domain *d = rcu_lock_domain_by_id(cli_id);
-    bool_t rc = 0;
-    if ( d == NULL )
-        return 0;
-    if ( !d->is_dying )
-    {
-        d->tmem = client;
-        tmem->domain = d;
-        rc = 1;
-    }
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static inline bool_t tmem_current_permitted(void)
 {
     return !xsm_tmem_op(XSM_HOOK);
@@ -476,6 +448,8 @@ int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset,
     pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t);
 
 extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len);
+extern void *tmem_persistent_pool_page_get(unsigned long size);
+extern void tmem_persistent_pool_page_put(void *page_va);
 
 #define tmem_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
 #define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 08/11] tmem: cleanup: drop useless wrap functions
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (6 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 07/11] tmem: cleanup: drop typedef tmem_client_t Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 09/11] tmem: cleanup: drop unused function 'domain_fully_allocated' Bob Liu
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

_tmem_alloc/free_subpage_thispool() and _tmem_alloc/free_page_thispool() are
useless, replace them with tmem_alloc/free_subpage_thispool() and
tmem_alloc/free_page_thispool() directly.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |    8 ++++----
 xen/common/tmem_xen.c      |    4 ++--
 xen/include/xen/tmem_xen.h |   17 ++++-------------
 3 files changed, 10 insertions(+), 19 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index db18b65..d3318d4 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -311,7 +311,7 @@ static NOINLINE void *_tmem_malloc(size_t size, size_t align, struct tmem_pool *
     void *v;
 
     if ( (pool != NULL) && is_persistent(pool) )
-        v = tmem_alloc_subpage_thispool(pool,size,align);
+        v = tmem_alloc_subpage_thispool(pool->client->persistent_pool,size,align);
     else
         v = tmem_alloc_subpage(pool, size, align);
     if ( v == NULL )
@@ -324,7 +324,7 @@ static NOINLINE void tmem_free(void *p, size_t size, struct tmem_pool *pool)
     if ( pool == NULL || !is_persistent(pool) )
         tmem_free_subpage(p,size);
     else
-        tmem_free_subpage_thispool(pool,p,size);
+        tmem_free_subpage_thispool(pool->client->persistent_pool,p,size);
 }
 
 static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool)
@@ -332,7 +332,7 @@ static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool)
     struct page_info *pfp = NULL;
 
     if ( pool != NULL && is_persistent(pool) )
-        pfp = tmem_alloc_page_thispool(pool);
+        pfp = tmem_alloc_page_thispool(pool->client->domain);
     else
         pfp = tmem_alloc_page(pool,0);
     if ( pfp == NULL )
@@ -348,7 +348,7 @@ static NOINLINE void tmem_page_free(struct tmem_pool *pool, struct page_info *pf
     if ( pool == NULL || !is_persistent(pool) )
         tmem_free_page(pfp);
     else
-        tmem_free_page_thispool(pool,pfp);
+        tmem_free_page_thispool(pfp);
     atomic_dec_and_assert(global_page_count);
 }
 
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 1309932..0f5955d 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -347,7 +347,7 @@ void *tmem_persistent_pool_page_get(unsigned long size)
     struct domain *d = current->domain;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = _tmem_alloc_page_thispool(d)) == NULL )
+    if ( (pi = tmem_alloc_page_thispool(d)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
@@ -360,7 +360,7 @@ void tmem_persistent_pool_page_put(void *page_va)
     ASSERT(IS_PAGE_ALIGNED(page_va));
     pi = mfn_to_page(virt_to_mfn(page_va));
     ASSERT(IS_VALID_PAGE(pi));
-    _tmem_free_page_thispool(pi);
+    tmem_free_page_thispool(pi);
 }
 
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 3c99bee..0b64309 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -118,7 +118,7 @@ static inline bool_t domain_fully_allocated(struct domain *d)
 #define tmem_client_memory_fully_allocated(_pool) \
  domain_fully_allocated(_pool->client->domain)
 
-static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void *tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                  size_t size, size_t align)
 {
 #if 0
@@ -130,21 +130,16 @@ static inline void *_tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
         return NULL;
     return xmem_pool_alloc(size, cmem_mempool);
 }
-#define tmem_alloc_subpage_thispool(_pool, _s, _a) \
-            _tmem_alloc_subpage_thispool(pool->client->persistent_pool, \
-                                         _s, _a)
 
-static inline void _tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
+static inline void tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                void *ptr, size_t size)
 {
     ASSERT( size < tmem_mempool_maxalloc );
     ASSERT( cmem_mempool != NULL );
     xmem_pool_free(ptr,cmem_mempool);
 }
-#define tmem_free_subpage_thispool(_pool, _p, _s) \
- _tmem_free_subpage_thispool(_pool->client->persistent_pool, _p, _s)
 
-static inline struct page_info *_tmem_alloc_page_thispool(struct domain *d)
+static inline struct page_info *tmem_alloc_page_thispool(struct domain *d)
 {
     struct page_info *pi;
 
@@ -171,10 +166,8 @@ out:
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
-#define tmem_alloc_page_thispool(_pool) \
-    _tmem_alloc_page_thispool(_pool->client->domain)
 
-static inline void _tmem_free_page_thispool(struct page_info *pi)
+static inline void tmem_free_page_thispool(struct page_info *pi)
 {
     struct domain *d = page_get_owner(pi);
 
@@ -188,8 +181,6 @@ static inline void _tmem_free_page_thispool(struct page_info *pi)
         free_domheap_pages(pi,0);
     }
 }
-#define tmem_free_page_thispool(_pool,_pg) \
-    _tmem_free_page_thispool(_pg)
 
 /*
  * Memory allocation for ephemeral (non-persistent) data
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 09/11] tmem: cleanup: drop unused function 'domain_fully_allocated'
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (7 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 08/11] tmem: cleanup: drop useless wrap functions Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 10/11] tmem: cleanup: drop useless '_subpage' wrap functions Bob Liu
  2013-11-08  1:03 ` [PATCH v2 11/11] tmem: cleanup: drop useless functions Bob Liu
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

Nobody uses this function.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/include/xen/tmem_xen.h |    8 --------
 1 file changed, 8 deletions(-)

diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 0b64309..9941bf2 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -110,14 +110,6 @@ static inline void tmem_page_list_put(struct page_info *pi)
 /*
  * Memory allocation for persistent data 
  */
-
-static inline bool_t domain_fully_allocated(struct domain *d)
-{
-    return ( d->tot_pages >= d->max_pages );
-}
-#define tmem_client_memory_fully_allocated(_pool) \
- domain_fully_allocated(_pool->client->domain)
-
 static inline void *tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
                                                  size_t size, size_t align)
 {
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 10/11] tmem: cleanup: drop useless '_subpage' wrap functions
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (8 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 09/11] tmem: cleanup: drop unused function 'domain_fully_allocated' Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2013-11-08  1:03 ` [PATCH v2 11/11] tmem: cleanup: drop useless functions Bob Liu
  10 siblings, 0 replies; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

There are too many alloc place in tmem which cause the source code is hard too
read.
Most of the alloc path are useless and only referenced only once, this patch
drops 'tmem_alloc_subpage/_thispool' and '_tmem_alloc'.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |   71 ++++++++++++++++++++++++--------------------
 xen/include/xen/tmem_xen.h |   36 ----------------------
 2 files changed, 38 insertions(+), 69 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index d3318d4..f009fd8 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -299,32 +299,37 @@ static atomic_t global_rtree_node_count = ATOMIC_INIT(0);
 
 
 /************ MEMORY ALLOCATION INTERFACE *****************************/
-
-#define tmem_malloc(_type,_pool) \
-       _tmem_malloc(sizeof(_type), __alignof__(_type), _pool)
-
-#define tmem_malloc_bytes(_size,_pool) \
-       _tmem_malloc(_size, 1, _pool)
-
-static NOINLINE void *_tmem_malloc(size_t size, size_t align, struct tmem_pool *pool)
+static NOINLINE void *tmem_malloc(size_t size, struct tmem_pool *pool)
 {
-    void *v;
+    void *v = NULL;
 
-    if ( (pool != NULL) && is_persistent(pool) )
-        v = tmem_alloc_subpage_thispool(pool->client->persistent_pool,size,align);
+    if ( (pool != NULL) && is_persistent(pool) ) {
+        if ( pool->client->persistent_pool )
+            v = xmem_pool_alloc(size, pool->client->persistent_pool);
+    }
     else
-        v = tmem_alloc_subpage(pool, size, align);
+    {
+        ASSERT( size < tmem_mempool_maxalloc );
+        ASSERT( tmem_mempool != NULL );
+        v = xmem_pool_alloc(size, tmem_mempool);
+    }
     if ( v == NULL )
         alloc_failed++;
     return v;
 }
 
-static NOINLINE void tmem_free(void *p, size_t size, struct tmem_pool *pool)
+static NOINLINE void tmem_free(void *p, struct tmem_pool *pool)
 {
     if ( pool == NULL || !is_persistent(pool) )
-        tmem_free_subpage(p,size);
+    {
+        ASSERT( tmem_mempool != NULL );
+        xmem_pool_free(p, tmem_mempool);
+    }
     else
-        tmem_free_subpage_thispool(pool->client->persistent_pool,p,size);
+    {
+        ASSERT( pool->client->persistent_pool != NULL );
+        xmem_pool_free(p, pool->client->persistent_pool);
+    }
 }
 
 static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool)
@@ -417,12 +422,12 @@ static NOINLINE void pcd_disassociate(struct tmem_page_descriptor *pgp, struct t
     /* reinit the struct for safety for now */
     RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);
     /* now free up the pcd memory */
-    tmem_free(pcd,sizeof(struct tmem_page_content_descriptor),NULL);
+    tmem_free(pcd, NULL);
     atomic_dec_and_assert(global_pcd_count);
     if ( pgp_size != 0 && pcd_size < PAGE_SIZE )
     {
         /* compressed data */
-        tmem_free(pcd_cdata,pcd_csize,pool);
+        tmem_free(pcd_cdata, pool);
         pcd_tot_csize -= pcd_csize;
     }
     else if ( pcd_size != PAGE_SIZE )
@@ -430,7 +435,7 @@ static NOINLINE void pcd_disassociate(struct tmem_page_descriptor *pgp, struct t
         /* trailing zero data */
         pcd_tot_tze_size -= pcd_size;
         if ( pcd_size )
-            tmem_free(pcd_tze,pcd_size,pool);
+            tmem_free(pcd_tze, pool);
     } else {
         /* real physical page */
         if ( tmem_tze_enabled() )
@@ -523,14 +528,14 @@ static NOINLINE int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata,
     }
 
     /* exited while loop with no match, so alloc a pcd and put it in the tree */
-    if ( (pcd = tmem_malloc(struct tmem_page_content_descriptor, NULL)) == NULL )
+    if ( (pcd = tmem_malloc(sizeof(struct tmem_page_content_descriptor), NULL)) == NULL )
     {
         ret = -ENOMEM;
         goto unlock;
     } else if ( cdata != NULL ) {
-        if ( (pcd->cdata = tmem_malloc_bytes(csize,pgp->us.obj->pool)) == NULL )
+        if ( (pcd->cdata = tmem_malloc(csize,pgp->us.obj->pool)) == NULL )
         {
-            tmem_free(pcd,sizeof(struct tmem_page_content_descriptor),NULL);
+            tmem_free(pcd, NULL);
             ret = -ENOMEM;
             goto unlock;
         }
@@ -549,7 +554,7 @@ static NOINLINE int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata,
         pcd->size = 0;
         pcd->tze = NULL;
     } else if ( pfp_size < PAGE_SIZE &&
-         ((pcd->tze = tmem_malloc_bytes(pfp_size,pgp->us.obj->pool)) != NULL) ) {
+         ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) {
         tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
         pcd->size = pfp_size;
         pcd_tot_tze_size += pfp_size;
@@ -588,7 +593,7 @@ static NOINLINE struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *
     ASSERT(obj != NULL);
     ASSERT(obj->pool != NULL);
     pool = obj->pool;
-    if ( (pgp = tmem_malloc(struct tmem_page_descriptor, pool)) == NULL )
+    if ( (pgp = tmem_malloc(sizeof(struct tmem_page_descriptor), pool)) == NULL )
         return NULL;
     pgp->us.obj = obj;
     INIT_LIST_HEAD(&pgp->global_eph_pages);
@@ -628,7 +633,7 @@ static NOINLINE void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem
     if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
         pcd_disassociate(pgp,pool,0); /* pgp->size lost */
     else if ( pgp_size )
-        tmem_free(pgp->cdata,pgp_size,pool);
+        tmem_free(pgp->cdata, pool);
     else
         tmem_page_free(pgp->us.obj->pool,pgp->pfp);
     if ( pool != NULL && pgp_size )
@@ -671,7 +676,7 @@ static NOINLINE void pgp_free(struct tmem_page_descriptor *pgp, int from_delete)
     INVERT_SENTINEL(pgp,PGD);
     pgp->us.obj = NULL;
     pgp->index = -1;
-    tmem_free(pgp,sizeof(struct tmem_page_descriptor),pool);
+    tmem_free(pgp, pool);
 }
 
 static NOINLINE void pgp_free_from_inv_list(struct client *client, struct tmem_page_descriptor *pgp)
@@ -683,7 +688,7 @@ static NOINLINE void pgp_free_from_inv_list(struct client *client, struct tmem_p
     INVERT_SENTINEL(pgp,PGD);
     pgp->us.obj = NULL;
     pgp->index = -1;
-    tmem_free(pgp,sizeof(struct tmem_page_descriptor),pool);
+    tmem_free(pgp, pool);
 }
 
 /* remove the page from appropriate lists but not from parent object */
@@ -793,7 +798,7 @@ static NOINLINE struct radix_tree_node *rtn_alloc(void *arg)
     ASSERT_SENTINEL(obj,OBJ);
     ASSERT(obj->pool != NULL);
     ASSERT_SENTINEL(obj->pool,POOL);
-    objnode = tmem_malloc(struct tmem_object_node,obj->pool);
+    objnode = tmem_malloc(sizeof(struct tmem_object_node),obj->pool);
     if (objnode == NULL)
         return NULL;
     objnode->obj = obj;
@@ -825,7 +830,7 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg)
     pool->objnode_count--;
     objnode->obj->objnode_count--;
     objnode->obj = NULL;
-    tmem_free(objnode,sizeof(struct tmem_object_node),pool);
+    tmem_free(objnode, pool);
     atomic_dec_and_assert(global_rtree_node_count);
 }
 
@@ -934,7 +939,7 @@ static NOINLINE void obj_free(struct tmem_object_root *obj, int no_rebalance)
     if ( !no_rebalance )
         rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]);
     tmem_spin_unlock(&obj->obj_spinlock);
-    tmem_free(obj,sizeof(struct tmem_object_root),pool);
+    tmem_free(obj, pool);
 }
 
 static NOINLINE int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj)
@@ -974,7 +979,7 @@ static NOINLINE struct tmem_object_root * obj_new(struct tmem_pool *pool, struct
 
     ASSERT(pool != NULL);
     ASSERT_WRITELOCK(&pool->pool_rwlock);
-    if ( (obj = tmem_malloc(struct tmem_object_root,pool)) == NULL )
+    if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL )
         return NULL;
     pool->obj_count++;
     if (pool->obj_count > pool->obj_count_max)
@@ -1081,7 +1086,7 @@ static int shared_pool_join(struct tmem_pool *pool, struct client *new_client)
     struct share_list *sl;
 
     ASSERT(is_shared(pool));
-    if ( (sl = tmem_malloc(struct share_list,NULL)) == NULL )
+    if ( (sl = tmem_malloc(sizeof(struct share_list), NULL)) == NULL )
         return -1;
     sl->client = new_client;
     list_add_tail(&sl->share_list, &pool->share_list);
@@ -1138,7 +1143,7 @@ static NOINLINE int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id)
         if (sl->client->cli_id != cli_id)
             continue;
         list_del(&sl->share_list);
-        tmem_free(sl,sizeof(struct share_list),pool);
+        tmem_free(sl, pool);
         --pool->shared_count;
         if (pool->client->cli_id == cli_id)
             shared_pool_reassign(pool);
@@ -1463,7 +1468,7 @@ static NOINLINE int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_p
     } else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
         if ( (ret = pcd_associate(pgp,dst,size)) == -ENOMEM )
             goto out;
-    } else if ( (p = tmem_malloc_bytes(size,pgp->us.obj->pool)) == NULL ) {
+    } else if ( (p = tmem_malloc(size,pgp->us.obj->pool)) == NULL ) {
         ret = -ENOMEM;
         goto out;
     } else {
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 9941bf2..f9639a5 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -110,27 +110,6 @@ static inline void tmem_page_list_put(struct page_info *pi)
 /*
  * Memory allocation for persistent data 
  */
-static inline void *tmem_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
-                                                 size_t size, size_t align)
-{
-#if 0
-    if ( d->tot_pages >= d->max_pages )
-        return NULL;
-#endif
-    ASSERT( size < tmem_mempool_maxalloc );
-    if ( cmem_mempool == NULL )
-        return NULL;
-    return xmem_pool_alloc(size, cmem_mempool);
-}
-
-static inline void tmem_free_subpage_thispool(struct xmem_pool *cmem_mempool,
-                                               void *ptr, size_t size)
-{
-    ASSERT( size < tmem_mempool_maxalloc );
-    ASSERT( cmem_mempool != NULL );
-    xmem_pool_free(ptr,cmem_mempool);
-}
-
 static inline struct page_info *tmem_alloc_page_thispool(struct domain *d)
 {
     struct page_info *pi;
@@ -177,21 +156,6 @@ static inline void tmem_free_page_thispool(struct page_info *pi)
 /*
  * Memory allocation for ephemeral (non-persistent) data
  */
-
-static inline void *tmem_alloc_subpage(void *pool, size_t size,
-                                                 size_t align)
-{
-    ASSERT( size < tmem_mempool_maxalloc );
-    ASSERT( tmem_mempool != NULL );
-    return xmem_pool_alloc(size, tmem_mempool);
-}
-
-static inline void tmem_free_subpage(void *ptr, size_t size)
-{
-    ASSERT( size < tmem_mempool_maxalloc );
-    xmem_pool_free(ptr,tmem_mempool);
-}
-
 static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
 {
     struct page_info *pi = tmem_page_list_get();
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 11/11] tmem: cleanup: drop useless functions
  2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
                   ` (9 preceding siblings ...)
  2013-11-08  1:03 ` [PATCH v2 10/11] tmem: cleanup: drop useless '_subpage' wrap functions Bob Liu
@ 2013-11-08  1:03 ` Bob Liu
  2014-02-07 15:48   ` Konrad Rzeszutek Wilk
  10 siblings, 1 reply; 15+ messages in thread
From: Bob Liu @ 2013-11-08  1:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Bob Liu, keir, ian.campbell, JBeulich

Function tmem_release_avail_pages_to_host() and tmem_scrub_page() only used
once, no need to separate them out.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/tmem.c          |   19 +++++++++++++++++--
 xen/common/tmem_xen.c      |   24 ------------------------
 xen/include/xen/tmem_xen.h |    3 ---
 3 files changed, 17 insertions(+), 29 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index f009fd8..3d15ead 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1418,7 +1418,19 @@ static unsigned long tmem_relinquish_npages(unsigned long n)
             break;
     }
     if ( avail_pages )
-        tmem_release_avail_pages_to_host();
+    {
+        spin_lock(&tmem_page_list_lock);
+        while ( !page_list_empty(&tmem_page_list) )
+        {
+            struct page_info *pg = page_list_remove_head(&tmem_page_list);
+            scrub_one_page(pg);
+            tmem_page_list_pages--;
+            free_domheap_page(pg);
+        }
+        ASSERT(tmem_page_list_pages == 0);
+        INIT_PAGE_LIST_HEAD(&tmem_page_list);
+        spin_unlock(&tmem_page_list_lock);
+    }
     return avail_pages;
 }
 
@@ -2911,9 +2923,12 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
     }
     if ( evicts_per_relinq > max_evicts_per_relinq )
         max_evicts_per_relinq = evicts_per_relinq;
-    tmem_scrub_page(pfp, memflags);
     if ( pfp != NULL )
+    {
+        if ( !(memflags & MEMF_tmem) )
+            scrub_one_page(pfp);
         relinq_pgs++;
+    }
 
     if ( tmem_called_from_tmem(memflags) )
     {
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 0f5955d..d6e2e0d 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -289,30 +289,6 @@ EXPORT DEFINE_SPINLOCK(tmem_page_list_lock);
 EXPORT PAGE_LIST_HEAD(tmem_page_list);
 EXPORT unsigned long tmem_page_list_pages = 0;
 
-/* free anything on tmem_page_list to Xen's scrub list */
-EXPORT void tmem_release_avail_pages_to_host(void)
-{
-    spin_lock(&tmem_page_list_lock);
-    while ( !page_list_empty(&tmem_page_list) )
-    {
-        struct page_info *pg = page_list_remove_head(&tmem_page_list);
-        scrub_one_page(pg);
-        tmem_page_list_pages--;
-        free_domheap_page(pg);
-    }
-    ASSERT(tmem_page_list_pages == 0);
-    INIT_PAGE_LIST_HEAD(&tmem_page_list);
-    spin_unlock(&tmem_page_list_lock);
-}
-
-EXPORT void tmem_scrub_page(struct page_info *pi, unsigned int memflags)
-{
-    if ( pi == NULL )
-        return;
-    if ( !(memflags & MEMF_tmem) )
-        scrub_one_page(pi);
-}
-
 static noinline void *tmem_mempool_page_get(unsigned long size)
 {
     struct page_info *pi;
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index f9639a5..034fd5c 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -42,9 +42,6 @@ extern void tmem_copy_page(char *to, char*from);
 extern int tmem_init(void);
 #define tmem_hash hash_long
 
-extern void tmem_release_avail_pages_to_host(void);
-extern void tmem_scrub_page(struct page_info *pi, unsigned int memflags);
-
 extern bool_t opt_tmem_compress;
 static inline bool_t tmem_compression_enabled(void)
 {
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 11/11] tmem: cleanup: drop useless functions
  2013-11-08  1:03 ` [PATCH v2 11/11] tmem: cleanup: drop useless functions Bob Liu
@ 2014-02-07 15:48   ` Konrad Rzeszutek Wilk
  2014-02-07 16:16     ` Jan Beulich
  2014-02-11  7:45     ` Bob Liu
  0 siblings, 2 replies; 15+ messages in thread
From: Konrad Rzeszutek Wilk @ 2014-02-07 15:48 UTC (permalink / raw)
  To: Bob Liu; +Cc: xen-devel, keir, ian.campbell, JBeulich

On Fri, Nov 08, 2013 at 09:03:57AM +0800, Bob Liu wrote:
> Function tmem_release_avail_pages_to_host() and tmem_scrub_page() only used
> once, no need to separate them out.

All of the patches look good to me. Let me put them in my tree
and do a sanity check tonight and then send a git pull to Jan
on Monday.

Thank you for making the code much easier to read!
> 
> Signed-off-by: Bob Liu <bob.liu@oracle.com>
> ---
>  xen/common/tmem.c          |   19 +++++++++++++++++--
>  xen/common/tmem_xen.c      |   24 ------------------------
>  xen/include/xen/tmem_xen.h |    3 ---
>  3 files changed, 17 insertions(+), 29 deletions(-)
> 
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index f009fd8..3d15ead 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -1418,7 +1418,19 @@ static unsigned long tmem_relinquish_npages(unsigned long n)
>              break;
>      }
>      if ( avail_pages )
> -        tmem_release_avail_pages_to_host();
> +    {
> +        spin_lock(&tmem_page_list_lock);
> +        while ( !page_list_empty(&tmem_page_list) )
> +        {
> +            struct page_info *pg = page_list_remove_head(&tmem_page_list);
> +            scrub_one_page(pg);
> +            tmem_page_list_pages--;
> +            free_domheap_page(pg);
> +        }
> +        ASSERT(tmem_page_list_pages == 0);
> +        INIT_PAGE_LIST_HEAD(&tmem_page_list);
> +        spin_unlock(&tmem_page_list_lock);
> +    }
>      return avail_pages;
>  }
>  
> @@ -2911,9 +2923,12 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
>      }
>      if ( evicts_per_relinq > max_evicts_per_relinq )
>          max_evicts_per_relinq = evicts_per_relinq;
> -    tmem_scrub_page(pfp, memflags);
>      if ( pfp != NULL )
> +    {
> +        if ( !(memflags & MEMF_tmem) )
> +            scrub_one_page(pfp);
>          relinq_pgs++;
> +    }
>  
>      if ( tmem_called_from_tmem(memflags) )
>      {
> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
> index 0f5955d..d6e2e0d 100644
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -289,30 +289,6 @@ EXPORT DEFINE_SPINLOCK(tmem_page_list_lock);
>  EXPORT PAGE_LIST_HEAD(tmem_page_list);
>  EXPORT unsigned long tmem_page_list_pages = 0;
>  
> -/* free anything on tmem_page_list to Xen's scrub list */
> -EXPORT void tmem_release_avail_pages_to_host(void)
> -{
> -    spin_lock(&tmem_page_list_lock);
> -    while ( !page_list_empty(&tmem_page_list) )
> -    {
> -        struct page_info *pg = page_list_remove_head(&tmem_page_list);
> -        scrub_one_page(pg);
> -        tmem_page_list_pages--;
> -        free_domheap_page(pg);
> -    }
> -    ASSERT(tmem_page_list_pages == 0);
> -    INIT_PAGE_LIST_HEAD(&tmem_page_list);
> -    spin_unlock(&tmem_page_list_lock);
> -}
> -
> -EXPORT void tmem_scrub_page(struct page_info *pi, unsigned int memflags)
> -{
> -    if ( pi == NULL )
> -        return;
> -    if ( !(memflags & MEMF_tmem) )
> -        scrub_one_page(pi);
> -}
> -
>  static noinline void *tmem_mempool_page_get(unsigned long size)
>  {
>      struct page_info *pi;
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index f9639a5..034fd5c 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -42,9 +42,6 @@ extern void tmem_copy_page(char *to, char*from);
>  extern int tmem_init(void);
>  #define tmem_hash hash_long
>  
> -extern void tmem_release_avail_pages_to_host(void);
> -extern void tmem_scrub_page(struct page_info *pi, unsigned int memflags);
> -
>  extern bool_t opt_tmem_compress;
>  static inline bool_t tmem_compression_enabled(void)
>  {
> -- 
> 1.7.10.4
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 11/11] tmem: cleanup: drop useless functions
  2014-02-07 15:48   ` Konrad Rzeszutek Wilk
@ 2014-02-07 16:16     ` Jan Beulich
  2014-02-11  7:45     ` Bob Liu
  1 sibling, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2014-02-07 16:16 UTC (permalink / raw)
  To: Bob Liu, Konrad Rzeszutek Wilk; +Cc: xen-devel, keir, ian.campbell

>>> On 07.02.14 at 16:48, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote:
> On Fri, Nov 08, 2013 at 09:03:57AM +0800, Bob Liu wrote:
>> Function tmem_release_avail_pages_to_host() and tmem_scrub_page() only used
>> once, no need to separate them out.
> 
> All of the patches look good to me. Let me put them in my tree
> and do a sanity check tonight and then send a git pull to Jan
> on Monday.

I don't think we should be pulling in cleanup like this anymore,
until we branch.

Jan

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 11/11] tmem: cleanup: drop useless functions
  2014-02-07 15:48   ` Konrad Rzeszutek Wilk
  2014-02-07 16:16     ` Jan Beulich
@ 2014-02-11  7:45     ` Bob Liu
  1 sibling, 0 replies; 15+ messages in thread
From: Bob Liu @ 2014-02-11  7:45 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: xen-devel, Bob Liu, keir, ian.campbell, JBeulich

Hi Konrad,

On 02/07/2014 11:48 PM, Konrad Rzeszutek Wilk wrote:
> On Fri, Nov 08, 2013 at 09:03:57AM +0800, Bob Liu wrote:
>> Function tmem_release_avail_pages_to_host() and tmem_scrub_page() only used
>> once, no need to separate them out.
> 
> All of the patches look good to me. Let me put them in my tree
> and do a sanity check tonight and then send a git pull to Jan
> on Monday.
> 

This series of patches have already get merged.
I have three series of patches on tmem, the previous two have been merged.

The third one which you haven't review is:
[PATCH RESEND 00/14] xen: new patches for tmem

Thanks,
-Bob

> Thank you for making the code much easier to read!
>>
>> Signed-off-by: Bob Liu <bob.liu@oracle.com>
>> ---
>>  xen/common/tmem.c          |   19 +++++++++++++++++--
>>  xen/common/tmem_xen.c      |   24 ------------------------
>>  xen/include/xen/tmem_xen.h |    3 ---
>>  3 files changed, 17 insertions(+), 29 deletions(-)
>>
>> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
>> index f009fd8..3d15ead 100644
>> --- a/xen/common/tmem.c
>> +++ b/xen/common/tmem.c
>> @@ -1418,7 +1418,19 @@ static unsigned long tmem_relinquish_npages(unsigned long n)
>>              break;
>>      }
>>      if ( avail_pages )
>> -        tmem_release_avail_pages_to_host();
>> +    {
>> +        spin_lock(&tmem_page_list_lock);
>> +        while ( !page_list_empty(&tmem_page_list) )
>> +        {
>> +            struct page_info *pg = page_list_remove_head(&tmem_page_list);
>> +            scrub_one_page(pg);
>> +            tmem_page_list_pages--;
>> +            free_domheap_page(pg);
>> +        }
>> +        ASSERT(tmem_page_list_pages == 0);
>> +        INIT_PAGE_LIST_HEAD(&tmem_page_list);
>> +        spin_unlock(&tmem_page_list_lock);
>> +    }
>>      return avail_pages;
>>  }
>>  
>> @@ -2911,9 +2923,12 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
>>      }
>>      if ( evicts_per_relinq > max_evicts_per_relinq )
>>          max_evicts_per_relinq = evicts_per_relinq;
>> -    tmem_scrub_page(pfp, memflags);
>>      if ( pfp != NULL )
>> +    {
>> +        if ( !(memflags & MEMF_tmem) )
>> +            scrub_one_page(pfp);
>>          relinq_pgs++;
>> +    }
>>  
>>      if ( tmem_called_from_tmem(memflags) )
>>      {
>> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
>> index 0f5955d..d6e2e0d 100644
>> --- a/xen/common/tmem_xen.c
>> +++ b/xen/common/tmem_xen.c
>> @@ -289,30 +289,6 @@ EXPORT DEFINE_SPINLOCK(tmem_page_list_lock);
>>  EXPORT PAGE_LIST_HEAD(tmem_page_list);
>>  EXPORT unsigned long tmem_page_list_pages = 0;
>>  
>> -/* free anything on tmem_page_list to Xen's scrub list */
>> -EXPORT void tmem_release_avail_pages_to_host(void)
>> -{
>> -    spin_lock(&tmem_page_list_lock);
>> -    while ( !page_list_empty(&tmem_page_list) )
>> -    {
>> -        struct page_info *pg = page_list_remove_head(&tmem_page_list);
>> -        scrub_one_page(pg);
>> -        tmem_page_list_pages--;
>> -        free_domheap_page(pg);
>> -    }
>> -    ASSERT(tmem_page_list_pages == 0);
>> -    INIT_PAGE_LIST_HEAD(&tmem_page_list);
>> -    spin_unlock(&tmem_page_list_lock);
>> -}
>> -
>> -EXPORT void tmem_scrub_page(struct page_info *pi, unsigned int memflags)
>> -{
>> -    if ( pi == NULL )
>> -        return;
>> -    if ( !(memflags & MEMF_tmem) )
>> -        scrub_one_page(pi);
>> -}
>> -
>>  static noinline void *tmem_mempool_page_get(unsigned long size)
>>  {
>>      struct page_info *pi;
>> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
>> index f9639a5..034fd5c 100644
>> --- a/xen/include/xen/tmem_xen.h
>> +++ b/xen/include/xen/tmem_xen.h
>> @@ -42,9 +42,6 @@ extern void tmem_copy_page(char *to, char*from);
>>  extern int tmem_init(void);
>>  #define tmem_hash hash_long
>>  
>> -extern void tmem_release_avail_pages_to_host(void);
>> -extern void tmem_scrub_page(struct page_info *pi, unsigned int memflags);
>> -
>>  extern bool_t opt_tmem_compress;
>>  static inline bool_t tmem_compression_enabled(void)
>>  {
>> -- 
>> 1.7.10.4
>>

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2014-02-11  7:45 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-11-08  1:03 [PATCH v2 00/11] tmem: some basic cleanup Bob Liu
2013-11-08  1:03 ` [PATCH v2 01/11] tmem: cleanup: drop COMPARE_COPY_PAGE_SSE2 Bob Liu
2013-11-08  1:03 ` [PATCH v2 02/11] tmem: cleanup: drop typedef pfp_t Bob Liu
2013-11-08  1:03 ` [PATCH v2 03/11] tmem: cleanup: drop typedef tmem_cli_mfn_t Bob Liu
2013-11-08  1:03 ` [PATCH v2 04/11] tmem: cleanup: rename 'tmh_' with 'tmem_' Bob Liu
2013-11-08  1:03 ` [PATCH v2 05/11] tmem: cleanup: drop most of the typedefs Bob Liu
2013-11-08  1:03 ` [PATCH v2 06/11] tmem: cleanup: drop function tmem_alloc/free_infra Bob Liu
2013-11-08  1:03 ` [PATCH v2 07/11] tmem: cleanup: drop typedef tmem_client_t Bob Liu
2013-11-08  1:03 ` [PATCH v2 08/11] tmem: cleanup: drop useless wrap functions Bob Liu
2013-11-08  1:03 ` [PATCH v2 09/11] tmem: cleanup: drop unused function 'domain_fully_allocated' Bob Liu
2013-11-08  1:03 ` [PATCH v2 10/11] tmem: cleanup: drop useless '_subpage' wrap functions Bob Liu
2013-11-08  1:03 ` [PATCH v2 11/11] tmem: cleanup: drop useless functions Bob Liu
2014-02-07 15:48   ` Konrad Rzeszutek Wilk
2014-02-07 16:16     ` Jan Beulich
2014-02-11  7:45     ` Bob Liu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.