* [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
@ 2023-06-15 3:49 Zhongkun He
2023-06-15 7:46 ` kernel test robot
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Zhongkun He @ 2023-06-15 3:49 UTC (permalink / raw)
To: minchan, senozhatsky, mhocko; +Cc: linux-mm, linux-kernel, Zhongkun He
The zspage's object is not charge to any memory cgroup.The
memory of a task will have no limit if the zram swap device
is large enough even though the task is limited by memory
cgroup.
So, it should be necessary to charge the zspage's object to
obj_cgroup,just like slub.
Signed-off-by: Zhongkun He <hezhongkun.hzk@bytedance.com>
---
mm/zsmalloc.c | 196 ++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 191 insertions(+), 5 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 02f7f414aade..a5d1c484dd8f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -62,6 +62,7 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/local_lock.h>
+#include <linux/memcontrol.h>
#define ZSPAGE_MAGIC 0x58
@@ -311,6 +312,7 @@ static bool ZsHugePage(struct zspage *zspage)
static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage);
+static void zs_free_page_obj_cgroups(struct zspage *zspage);
#ifdef CONFIG_COMPACTION
static void migrate_write_lock(struct zspage *zspage);
@@ -973,6 +975,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
free_handles(pool, class, zspage);
next = page = get_first_page(zspage);
+ zs_free_page_obj_cgroups(zspage);
do {
VM_BUG_ON_PAGE(!PageLocked(page), page);
next = get_next_page(page);
@@ -1420,6 +1423,170 @@ size_t zs_huge_class_size(struct zs_pool *pool)
}
EXPORT_SYMBOL_GPL(zs_huge_class_size);
+
+#ifdef CONFIG_MEMCG_KMEM
+
+static inline struct obj_cgroup **page_objcgs_check(struct page *page)
+{
+ unsigned long memcg_data = READ_ONCE(page->memcg_data);
+
+ if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
+ return NULL;
+
+ VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
+
+ return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+
+static void zs_free_page_obj_cgroups(struct zspage *zspage)
+{
+ struct obj_cgroup **objcgs;
+ struct page *page = zspage->first_page;
+
+ objcgs = page_objcgs_check(page);
+ if (!objcgs)
+ return;
+
+ kfree(objcgs);
+ page->memcg_data = 0;
+}
+
+static inline size_t zs_obj_full_size(struct size_class *class)
+{
+ return class->size + sizeof(struct obj_cgroup *);
+}
+
+static inline void zs_objcg_exchange(struct page *s_page, unsigned int s_index,
+ struct page *d_page, unsigned int d_index)
+{
+ struct page *sf_page, *df_page;
+ struct obj_cgroup **s_objcgs, **d_objcgs;
+
+ sf_page = get_first_page(get_zspage(s_page));
+ df_page = get_first_page(get_zspage(d_page));
+
+ s_objcgs = page_objcgs_check(sf_page);
+ d_objcgs = page_objcgs_check(df_page);
+
+ if (!s_objcgs || !d_objcgs)
+ return;
+
+ d_objcgs[d_index] = s_objcgs[s_index];
+ s_objcgs[s_index] = NULL;
+}
+
+void zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
+{
+ struct page *page = zspage->first_page;
+ int objects = class->objs_per_zspage;
+ unsigned long memcg_data = 0;
+ void *vec;
+
+ vec = kcalloc_node(objects, sizeof(struct obj_cgroup *),
+ GFP_NOWAIT|__GFP_NOWARN|
+ __GFP_ZERO|__GFP_MOVABLE,
+ page_to_nid(page));
+
+ if (vec)
+ memcg_data = (unsigned long)vec | MEMCG_DATA_OBJCGS;
+ page->memcg_data = memcg_data;
+}
+
+static inline bool zs_obj_pre_alloc_hook(struct size_class *class,
+ struct obj_cgroup **objcgp, gfp_t flags)
+{
+ struct obj_cgroup *objcg;
+
+ *objcgp = NULL;
+
+ objcg = get_obj_cgroup_from_current();
+ if (!objcg)
+ return true;
+
+ if (obj_cgroup_charge(objcg, flags, zs_obj_full_size(class))) {
+ obj_cgroup_put(objcg);
+ return false;
+ }
+
+ *objcgp = objcg;
+ return true;
+}
+
+static inline void zs_obj_post_alloc_hook(struct size_class *class,
+ struct zspage *zspage,
+ unsigned int idx,
+ struct obj_cgroup *objcg)
+{
+ struct obj_cgroup **objcgs = NULL;
+
+ if (!objcg)
+ return;
+
+ if (zspage)
+ objcgs = page_objcgs_check(zspage->first_page);
+
+ if (!objcgs) {
+ obj_cgroup_uncharge(objcg, zs_obj_full_size(class));
+ obj_cgroup_put(objcg);
+ return;
+ }
+
+ objcgs[idx] = objcg;
+}
+
+static inline void zs_obj_free_hook(int class_size, struct zspage *zspage,
+ unsigned int idx)
+{
+ struct obj_cgroup **objcgs, *objcg;
+
+ objcgs = page_objcgs_check(zspage->first_page);
+
+ if (WARN_ON(!objcgs))
+ return;
+
+ objcg = objcgs[idx];
+ if (!objcg)
+ return;
+
+ objcgs[idx] = NULL;
+ obj_cgroup_uncharge(objcg, class_size + sizeof(struct obj_cgroup *));
+ obj_cgroup_put(objcg);
+}
+#else
+
+static void zs_free_page_obj_cgroups(struct zspage *zspage)
+{
+}
+
+static inline void zs_objcg_exchange(struct page *s_page, unsigned int s_index,
+ struct page *d_page, unsigned int d_index)
+{
+}
+
+int zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
+{
+ return 0;
+}
+
+static inline bool zs_obj_pre_alloc_hook(struct size_class *class,
+ struct obj_cgroup **objcgp, gfp_t flags)
+{
+ return true;
+}
+
+static inline void zs_obj_post_alloc_hook(struct size_class *class,
+ struct page *page,
+ unsigned int idx,
+ struct obj_cgroup *objcg)
+{
+}
+static inline void zs_obj_free_hook(int class_size, struct zspage *zspage,
+ unsigned int idx)
+{
+}
+#endif
+
static unsigned long obj_malloc(struct zs_pool *pool,
struct zspage *zspage, unsigned long handle)
{
@@ -1475,7 +1642,8 @@ static unsigned long obj_malloc(struct zs_pool *pool,
*/
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{
- unsigned long handle, obj;
+ unsigned long handle, obj, index;
+ struct obj_cgroup *objcg;
struct size_class *class;
int newfg;
struct zspage *zspage;
@@ -1491,17 +1659,23 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
size += ZS_HANDLE_SIZE;
class = pool->size_class[get_size_class_index(size)];
+ if (!zs_obj_pre_alloc_hook(class, &objcg, gfp)) {
+ cache_free_handle(pool, handle);
+ return (unsigned long)ERR_PTR(-ENOMEM);
+ }
+
/* pool->lock effectively protects the zpage migration */
spin_lock(&pool->lock);
zspage = find_get_zspage(class);
if (likely(zspage)) {
+ index = get_freeobj(zspage);
obj = obj_malloc(pool, zspage, handle);
/* Now move the zspage to another fullness group, if required */
fix_fullness_group(class, zspage);
record_obj(handle, obj);
class_stat_inc(class, ZS_OBJS_INUSE, 1);
- goto out;
+ goto out_unlock;
}
spin_unlock(&pool->lock);
@@ -1509,9 +1683,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
zspage = alloc_zspage(pool, class, gfp);
if (!zspage) {
cache_free_handle(pool, handle);
- return (unsigned long)ERR_PTR(-ENOMEM);
+ handle = (unsigned long)ERR_PTR(-ENOMEM);
+ goto out;
}
+ zs_alloc_obj_cgroups(zspage, class);
+ index = get_freeobj(zspage);
spin_lock(&pool->lock);
obj = obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage);
@@ -1524,7 +1701,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
/* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage);
-out:
+out_unlock:
#ifdef CONFIG_ZPOOL
/* Add/move zspage to beginning of LRU */
if (!list_empty(&zspage->lru))
@@ -1533,6 +1710,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
#endif
spin_unlock(&pool->lock);
+out:
+ zs_obj_post_alloc_hook(class, zspage, index, objcg);
return handle;
}
@@ -1573,7 +1752,7 @@ static void obj_free(int class_size, unsigned long obj, unsigned long *handle)
f_page->index = 0;
set_freeobj(zspage, f_objidx);
}
-
+ zs_obj_free_hook(class_size, zspage, f_objidx);
kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, -1);
}
@@ -1640,6 +1819,7 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
obj_to_location(src, &s_page, &s_objidx);
obj_to_location(dst, &d_page, &d_objidx);
+ zs_objcg_exchange(s_page, s_objidx, d_page, d_objidx);
s_off = (class->size * s_objidx) & ~PAGE_MASK;
d_off = (class->size * d_objidx) & ~PAGE_MASK;
@@ -1960,6 +2140,12 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
int idx = 0;
page = get_first_page(zspage);
+#ifdef CONFIG_MEMCG_KMEM
+ if (page == oldpage) {
+ newpage->memcg_data = page->memcg_data;
+ page->memcg_data = 0;
+ }
+#endif
do {
if (page == oldpage)
pages[idx] = newpage;
--
2.25.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
2023-06-15 3:49 [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup Zhongkun He
@ 2023-06-15 7:46 ` kernel test robot
2023-06-15 9:00 ` kernel test robot
2023-06-15 9:42 ` kernel test robot
2 siblings, 0 replies; 4+ messages in thread
From: kernel test robot @ 2023-06-15 7:46 UTC (permalink / raw)
To: Zhongkun He; +Cc: oe-kbuild-all
Hi Zhongkun,
[This is a private test report for your RFC patch.]
kernel test robot noticed the following build errors:
[auto build test ERROR on axboe-block/for-next]
[also build test ERROR on linus/master v6.4-rc6]
[cannot apply to akpm-mm/mm-everything next-20230615]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zhongkun-He/memcg-export-obj_cgroup-symbol-to-charge-compressed-RAM/20230615-115048
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
patch link: https://lore.kernel.org/r/20230615034905.1362034-1-hezhongkun.hzk%40bytedance.com
patch subject: [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
config: mips-randconfig-r022-20230615 (https://download.01.org/0day-ci/archive/20230615/202306151517.tjkWZm6G-lkp@intel.com/config)
compiler: mipsel-linux-gcc (GCC) 12.3.0
reproduce (this is a W=1 build):
mkdir -p ~/bin
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git remote add axboe-block https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
git fetch axboe-block for-next
git checkout axboe-block/for-next
b4 shazam https://lore.kernel.org/r/20230615034905.1362034-1-hezhongkun.hzk@bytedance.com
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.3.0 ~/bin/make.cross W=1 O=build_dir ARCH=mips olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.3.0 ~/bin/make.cross W=1 O=build_dir ARCH=mips SHELL=/bin/bash
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202306151517.tjkWZm6G-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/zsmalloc.c:1567:5: warning: no previous prototype for 'zs_alloc_obj_cgroups' [-Wmissing-prototypes]
1567 | int zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
| ^~~~~~~~~~~~~~~~~~~~
mm/zsmalloc.c: In function 'zs_malloc':
>> mm/zsmalloc.c:1714:39: error: passing argument 2 of 'zs_obj_post_alloc_hook' from incompatible pointer type [-Werror=incompatible-pointer-types]
1714 | zs_obj_post_alloc_hook(class, zspage, index, objcg);
| ^~~~~~
| |
| struct zspage *
mm/zsmalloc.c:1579:46: note: expected 'struct page *' but argument is of type 'struct zspage *'
1579 | struct page *page,
| ~~~~~~~~~~~~~^~~~
cc1: some warnings being treated as errors
vim +/zs_obj_post_alloc_hook +1714 mm/zsmalloc.c
1711
1712 spin_unlock(&pool->lock);
1713 out:
> 1714 zs_obj_post_alloc_hook(class, zspage, index, objcg);
1715
1716 return handle;
1717 }
1718 EXPORT_SYMBOL_GPL(zs_malloc);
1719
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
2023-06-15 3:49 [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup Zhongkun He
2023-06-15 7:46 ` kernel test robot
@ 2023-06-15 9:00 ` kernel test robot
2023-06-15 9:42 ` kernel test robot
2 siblings, 0 replies; 4+ messages in thread
From: kernel test robot @ 2023-06-15 9:00 UTC (permalink / raw)
To: Zhongkun He; +Cc: llvm, oe-kbuild-all
Hi Zhongkun,
[This is a private test report for your RFC patch.]
kernel test robot noticed the following build errors:
[auto build test ERROR on axboe-block/for-next]
[also build test ERROR on linus/master v6.4-rc6]
[cannot apply to akpm-mm/mm-everything next-20230615]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zhongkun-He/memcg-export-obj_cgroup-symbol-to-charge-compressed-RAM/20230615-115048
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
patch link: https://lore.kernel.org/r/20230615034905.1362034-1-hezhongkun.hzk%40bytedance.com
patch subject: [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
config: s390-randconfig-r044-20230615 (https://download.01.org/0day-ci/archive/20230615/202306151659.Jo2P7J8r-lkp@intel.com/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build):
mkdir -p ~/bin
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install s390 cross compiling tool for clang build
# apt-get install binutils-s390x-linux-gnu
git remote add axboe-block https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
git fetch axboe-block for-next
git checkout axboe-block/for-next
b4 shazam https://lore.kernel.org/r/20230615034905.1362034-1-hezhongkun.hzk@bytedance.com
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang ~/bin/make.cross W=1 O=build_dir ARCH=s390 olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang ~/bin/make.cross W=1 O=build_dir ARCH=s390 SHELL=/bin/bash
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202306151659.Jo2P7J8r-lkp@intel.com/
All error/warnings (new ones prefixed by >>):
>> mm/zsmalloc.c:1567:5: warning: no previous prototype for function 'zs_alloc_obj_cgroups' [-Wmissing-prototypes]
int zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
^
mm/zsmalloc.c:1567:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
int zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
^
static
>> mm/zsmalloc.c:1714:32: error: incompatible pointer types passing 'struct zspage *' to parameter of type 'struct page *' [-Werror,-Wincompatible-pointer-types]
zs_obj_post_alloc_hook(class, zspage, index, objcg);
^~~~~~
mm/zsmalloc.c:1579:18: note: passing argument to parameter 'page' here
struct page *page,
^
>> mm/zsmalloc.c:1684:6: warning: variable 'index' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
if (!zspage) {
^~~~~~~
mm/zsmalloc.c:1714:40: note: uninitialized use occurs here
zs_obj_post_alloc_hook(class, zspage, index, objcg);
^~~~~
mm/zsmalloc.c:1684:2: note: remove the 'if' if its condition is always false
if (!zspage) {
^~~~~~~~~~~~~~
mm/zsmalloc.c:1645:34: note: initialize the variable 'index' to silence this warning
unsigned long handle, obj, index;
^
= 0
2 warnings and 1 error generated.
vim +1714 mm/zsmalloc.c
1566
> 1567 int zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
1568 {
1569 return 0;
1570 }
1571
1572 static inline bool zs_obj_pre_alloc_hook(struct size_class *class,
1573 struct obj_cgroup **objcgp, gfp_t flags)
1574 {
1575 return true;
1576 }
1577
1578 static inline void zs_obj_post_alloc_hook(struct size_class *class,
1579 struct page *page,
1580 unsigned int idx,
1581 struct obj_cgroup *objcg)
1582 {
1583 }
1584 static inline void zs_obj_free_hook(int class_size, struct zspage *zspage,
1585 unsigned int idx)
1586 {
1587 }
1588 #endif
1589
1590 static unsigned long obj_malloc(struct zs_pool *pool,
1591 struct zspage *zspage, unsigned long handle)
1592 {
1593 int i, nr_page, offset;
1594 unsigned long obj;
1595 struct link_free *link;
1596 struct size_class *class;
1597
1598 struct page *m_page;
1599 unsigned long m_offset;
1600 void *vaddr;
1601
1602 class = pool->size_class[zspage->class];
1603 handle |= OBJ_ALLOCATED_TAG;
1604 obj = get_freeobj(zspage);
1605
1606 offset = obj * class->size;
1607 nr_page = offset >> PAGE_SHIFT;
1608 m_offset = offset & ~PAGE_MASK;
1609 m_page = get_first_page(zspage);
1610
1611 for (i = 0; i < nr_page; i++)
1612 m_page = get_next_page(m_page);
1613
1614 vaddr = kmap_atomic(m_page);
1615 link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1616 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1617 if (likely(!ZsHugePage(zspage)))
1618 /* record handle in the header of allocated chunk */
1619 link->handle = handle;
1620 else
1621 /* record handle to page->index */
1622 zspage->first_page->index = handle;
1623
1624 kunmap_atomic(vaddr);
1625 mod_zspage_inuse(zspage, 1);
1626
1627 obj = location_to_obj(m_page, obj);
1628
1629 return obj;
1630 }
1631
1632
1633 /**
1634 * zs_malloc - Allocate block of given size from pool.
1635 * @pool: pool to allocate from
1636 * @size: size of block to allocate
1637 * @gfp: gfp flags when allocating object
1638 *
1639 * On success, handle to the allocated object is returned,
1640 * otherwise an ERR_PTR().
1641 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1642 */
1643 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1644 {
1645 unsigned long handle, obj, index;
1646 struct obj_cgroup *objcg;
1647 struct size_class *class;
1648 int newfg;
1649 struct zspage *zspage;
1650
1651 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1652 return (unsigned long)ERR_PTR(-EINVAL);
1653
1654 handle = cache_alloc_handle(pool, gfp);
1655 if (!handle)
1656 return (unsigned long)ERR_PTR(-ENOMEM);
1657
1658 /* extra space in chunk to keep the handle */
1659 size += ZS_HANDLE_SIZE;
1660 class = pool->size_class[get_size_class_index(size)];
1661
1662 if (!zs_obj_pre_alloc_hook(class, &objcg, gfp)) {
1663 cache_free_handle(pool, handle);
1664 return (unsigned long)ERR_PTR(-ENOMEM);
1665 }
1666
1667 /* pool->lock effectively protects the zpage migration */
1668 spin_lock(&pool->lock);
1669 zspage = find_get_zspage(class);
1670 if (likely(zspage)) {
1671 index = get_freeobj(zspage);
1672 obj = obj_malloc(pool, zspage, handle);
1673 /* Now move the zspage to another fullness group, if required */
1674 fix_fullness_group(class, zspage);
1675 record_obj(handle, obj);
1676 class_stat_inc(class, ZS_OBJS_INUSE, 1);
1677
1678 goto out_unlock;
1679 }
1680
1681 spin_unlock(&pool->lock);
1682
1683 zspage = alloc_zspage(pool, class, gfp);
> 1684 if (!zspage) {
1685 cache_free_handle(pool, handle);
1686 handle = (unsigned long)ERR_PTR(-ENOMEM);
1687 goto out;
1688 }
1689
1690 zs_alloc_obj_cgroups(zspage, class);
1691 index = get_freeobj(zspage);
1692 spin_lock(&pool->lock);
1693 obj = obj_malloc(pool, zspage, handle);
1694 newfg = get_fullness_group(class, zspage);
1695 insert_zspage(class, zspage, newfg);
1696 set_zspage_mapping(zspage, class->index, newfg);
1697 record_obj(handle, obj);
1698 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
1699 class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
1700 class_stat_inc(class, ZS_OBJS_INUSE, 1);
1701
1702 /* We completely set up zspage so mark them as movable */
1703 SetZsPageMovable(pool, zspage);
1704 out_unlock:
1705 #ifdef CONFIG_ZPOOL
1706 /* Add/move zspage to beginning of LRU */
1707 if (!list_empty(&zspage->lru))
1708 list_del(&zspage->lru);
1709 list_add(&zspage->lru, &pool->lru);
1710 #endif
1711
1712 spin_unlock(&pool->lock);
1713 out:
> 1714 zs_obj_post_alloc_hook(class, zspage, index, objcg);
1715
1716 return handle;
1717 }
1718 EXPORT_SYMBOL_GPL(zs_malloc);
1719
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
2023-06-15 3:49 [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup Zhongkun He
2023-06-15 7:46 ` kernel test robot
2023-06-15 9:00 ` kernel test robot
@ 2023-06-15 9:42 ` kernel test robot
2 siblings, 0 replies; 4+ messages in thread
From: kernel test robot @ 2023-06-15 9:42 UTC (permalink / raw)
To: Zhongkun He; +Cc: llvm, oe-kbuild-all
Hi Zhongkun,
[This is a private test report for your RFC patch.]
kernel test robot noticed the following build warnings:
[auto build test WARNING on axboe-block/for-next]
[also build test WARNING on linus/master v6.4-rc6]
[cannot apply to akpm-mm/mm-everything next-20230615]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zhongkun-He/memcg-export-obj_cgroup-symbol-to-charge-compressed-RAM/20230615-115048
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
patch link: https://lore.kernel.org/r/20230615034905.1362034-1-hezhongkun.hzk%40bytedance.com
patch subject: [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup
config: hexagon-randconfig-r035-20230615 (https://download.01.org/0day-ci/archive/20230615/202306151727.Nw4ilZPb-lkp@intel.com/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project.git 4a5ac14ee968ff0ad5d2cc1ffa0299048db4c88a)
reproduce (this is a W=1 build):
mkdir -p ~/bin
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git remote add axboe-block https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
git fetch axboe-block for-next
git checkout axboe-block/for-next
b4 shazam https://lore.kernel.org/r/20230615034905.1362034-1-hezhongkun.hzk@bytedance.com
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang ~/bin/make.cross W=1 O=build_dir ARCH=hexagon olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang ~/bin/make.cross W=1 O=build_dir ARCH=hexagon SHELL=/bin/bash
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202306151727.Nw4ilZPb-lkp@intel.com/
All warnings (new ones prefixed by >>):
In file included from mm/zsmalloc.c:45:
In file included from include/linux/highmem.h:12:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/hexagon/include/asm/io.h:334:
include/asm-generic/io.h:547:31: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
547 | val = __raw_readb(PCI_IOBASE + addr);
| ~~~~~~~~~~ ^
include/asm-generic/io.h:560:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
560 | val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
| ~~~~~~~~~~ ^
include/uapi/linux/byteorder/little_endian.h:37:51: note: expanded from macro '__le16_to_cpu'
37 | #define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
| ^
In file included from mm/zsmalloc.c:45:
In file included from include/linux/highmem.h:12:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/hexagon/include/asm/io.h:334:
include/asm-generic/io.h:573:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
573 | val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
| ~~~~~~~~~~ ^
include/uapi/linux/byteorder/little_endian.h:35:51: note: expanded from macro '__le32_to_cpu'
35 | #define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
| ^
In file included from mm/zsmalloc.c:45:
In file included from include/linux/highmem.h:12:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/hexagon/include/asm/io.h:334:
include/asm-generic/io.h:584:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
584 | __raw_writeb(value, PCI_IOBASE + addr);
| ~~~~~~~~~~ ^
include/asm-generic/io.h:594:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
594 | __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
| ~~~~~~~~~~ ^
include/asm-generic/io.h:604:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
604 | __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
| ~~~~~~~~~~ ^
>> mm/zsmalloc.c:1479:6: warning: no previous prototype for function 'zs_alloc_obj_cgroups' [-Wmissing-prototypes]
1479 | void zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
| ^
mm/zsmalloc.c:1479:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
1479 | void zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
| ^
| static
>> mm/zsmalloc.c:1684:6: warning: variable 'index' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
1684 | if (!zspage) {
| ^~~~~~~
mm/zsmalloc.c:1714:40: note: uninitialized use occurs here
1714 | zs_obj_post_alloc_hook(class, zspage, index, objcg);
| ^~~~~
mm/zsmalloc.c:1684:2: note: remove the 'if' if its condition is always false
1684 | if (!zspage) {
| ^~~~~~~~~~~~~~
1685 | cache_free_handle(pool, handle);
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1686 | handle = (unsigned long)ERR_PTR(-ENOMEM);
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1687 | goto out;
| ~~~~~~~~~~~~~~~~~~~~~~~~~
1688 | }
| ~~~~~~~~~
mm/zsmalloc.c:1645:34: note: initialize the variable 'index' to silence this warning
1645 | unsigned long handle, obj, index;
| ^
| = 0
8 warnings generated.
vim +/zs_alloc_obj_cgroups +1479 mm/zsmalloc.c
1478
> 1479 void zs_alloc_obj_cgroups(struct zspage *zspage, struct size_class *class)
1480 {
1481 struct page *page = zspage->first_page;
1482 int objects = class->objs_per_zspage;
1483 unsigned long memcg_data = 0;
1484 void *vec;
1485
1486 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *),
1487 GFP_NOWAIT|__GFP_NOWARN|
1488 __GFP_ZERO|__GFP_MOVABLE,
1489 page_to_nid(page));
1490
1491 if (vec)
1492 memcg_data = (unsigned long)vec | MEMCG_DATA_OBJCGS;
1493 page->memcg_data = memcg_data;
1494 }
1495
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2023-06-15 9:42 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-15 3:49 [RFC PATCH 3/3] zsmalloc: charge the zspage's object to obj_cgroup Zhongkun He
2023-06-15 7:46 ` kernel test robot
2023-06-15 9:00 ` kernel test robot
2023-06-15 9:42 ` kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.