linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] percpu: improve percpu_alloc_percpu event trace
@ 2022-05-06  4:46 Vasily Averin
  2022-05-06  7:52 ` Vasily Averin
  2022-05-06 20:38 ` [PATCH] " kernel test robot
  0 siblings, 2 replies; 22+ messages in thread
From: Vasily Averin @ 2022-05-06  4:46 UTC (permalink / raw)
  To: Shakeel Butt, Steven Rostedt, Ingo Molnar
  Cc: kernel, linux-kernel, Roman Gushchin, Vlastimil Babka,
	Michal Hocko, cgroups, Andrew Morton, Dennis Zhou, Tejun Heo,
	Christoph Lameter, linux-mm

Added bytes_alloc and gfp_flags fields to the output of the
percpu_alloc_percpu ftrace event. This is required to track
memcg-accounted percpu allocations.

Signed-off-by: Vasily Averin <vvs@openvz.org>
---
 include/trace/events/percpu.h | 17 ++++++++++++-----
 mm/percpu-internal.h          |  8 ++++----
 mm/percpu.c                   |  3 ++-
 3 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h
index df112a64f6c9..a6d640d2cb8b 100644
--- a/include/trace/events/percpu.h
+++ b/include/trace/events/percpu.h
@@ -6,13 +6,16 @@
 #define _TRACE_PERCPU_H
 
 #include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
 
 TRACE_EVENT(percpu_alloc_percpu,
 
 	TP_PROTO(bool reserved, bool is_atomic, size_t size,
-		 size_t align, void *base_addr, int off, void __percpu *ptr),
+		 size_t align, void *base_addr, int off,
+		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
 
-	TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr),
+	TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr,
+		bytes_alloc, gfp_flags),
 
 	TP_STRUCT__entry(
 		__field(	bool,			reserved	)
@@ -22,8 +25,9 @@ TRACE_EVENT(percpu_alloc_percpu,
 		__field(	void *,			base_addr	)
 		__field(	int,			off		)
 		__field(	void __percpu *,	ptr		)
+		__field(	size_t,			bytes_alloc	)
+		__field(	gfp_t,			gfp_flags	)
 	),
-
 	TP_fast_assign(
 		__entry->reserved	= reserved;
 		__entry->is_atomic	= is_atomic;
@@ -32,12 +36,15 @@ TRACE_EVENT(percpu_alloc_percpu,
 		__entry->base_addr	= base_addr;
 		__entry->off		= off;
 		__entry->ptr		= ptr;
+		__entry->bytes_alloc	= bytes_alloc;
+		__entry->gfp_flags	= gfp_flags;
 	),
 
-	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p",
+	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
 		  __entry->reserved, __entry->is_atomic,
 		  __entry->size, __entry->align,
-		  __entry->base_addr, __entry->off, __entry->ptr)
+		  __entry->base_addr, __entry->off, __entry->ptr,
+		  __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
 );
 
 TRACE_EVENT(percpu_free_percpu,
diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
index 411d1593ef23..70b1ea23f4d2 100644
--- a/mm/percpu-internal.h
+++ b/mm/percpu-internal.h
@@ -113,7 +113,6 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
 	return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
 }
 
-#ifdef CONFIG_MEMCG_KMEM
 /**
  * pcpu_obj_full_size - helper to calculate size of each accounted object
  * @size: size of area to allocate in bytes
@@ -123,13 +122,14 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
  */
 static inline size_t pcpu_obj_full_size(size_t size)
 {
-	size_t extra_size;
+	size_t extra_size = 0;
 
-	extra_size = size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
+#ifdef CONFIG_MEMCG_KMEM
+	extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
+#endif
 
 	return size * num_possible_cpus() + extra_size;
 }
-#endif /* CONFIG_MEMCG_KMEM */
 
 #ifdef CONFIG_PERCPU_STATS
 
diff --git a/mm/percpu.c b/mm/percpu.c
index ea28db283044..cbeb380c359d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1885,7 +1885,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 	kmemleak_alloc_percpu(ptr, size, gfp);
 
 	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
-			chunk->base_addr, off, ptr);
+			chunk->base_addr, off, ptr,
+			pcpu_obj_full_size(size), gfp);
 
 	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH] percpu: improve percpu_alloc_percpu event trace
  2022-05-06  4:46 [PATCH] percpu: improve percpu_alloc_percpu event trace Vasily Averin
@ 2022-05-06  7:52 ` Vasily Averin
  2022-05-06 19:29   ` [PATCH v2] " Vasily Averin
  2022-05-06 20:38 ` [PATCH] " kernel test robot
  1 sibling, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-06  7:52 UTC (permalink / raw)
  To: Shakeel Butt, Steven Rostedt, Ingo Molnar
  Cc: kernel, linux-kernel, Roman Gushchin, Vlastimil Babka,
	Michal Hocko, cgroups, Andrew Morton, Dennis Zhou, Tejun Heo,
	Christoph Lameter, linux-mm

On 5/6/22 07:46, Vasily Averin wrote:
> Added bytes_alloc and gfp_flags fields to the output of the
> percpu_alloc_percpu ftrace event. This is required to track
> memcg-accounted percpu allocations.

Perhaps it makes sense to add call_site too...

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v2] percpu: improve percpu_alloc_percpu event trace
  2022-05-06  7:52 ` Vasily Averin
@ 2022-05-06 19:29   ` Vasily Averin
  2022-05-11  2:33     ` Roman Gushchin
  0 siblings, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-06 19:29 UTC (permalink / raw)
  To: Shakeel Butt, Steven Rostedt, Ingo Molnar
  Cc: kernel, linux-kernel, Roman Gushchin, Vlastimil Babka,
	Michal Hocko, cgroups, Andrew Morton, Dennis Zhou, Tejun Heo,
	Christoph Lameter, linux-mm

Added call_site, bytes_alloc and gfp_flags fields to the output
of the percpu_alloc_percpu ftrace event:

mkdir-4393  [001]   169.334788: percpu_alloc_percpu:
 call_site=mem_cgroup_css_alloc+0xa6 reserved=0 is_atomic=0 size=2408 align=8
  base_addr=0xffffc7117fc00000 off=402176 ptr=0x3dc867a62300 bytes_alloc=14448
   gfp_flags=GFP_KERNEL_ACCOUNT

This is required to track memcg-accounted percpu allocations.

Signed-off-by: Vasily Averin <vvs@openvz.org>
---
v2: added call_site, improved patch description
---
 include/trace/events/percpu.h | 23 +++++++++++++++++------
 mm/percpu-internal.h          |  8 ++++----
 mm/percpu.c                   |  5 +++--
 3 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h
index df112a64f6c9..e989cefc0def 100644
--- a/include/trace/events/percpu.h
+++ b/include/trace/events/percpu.h
@@ -6,15 +6,20 @@
 #define _TRACE_PERCPU_H
 
 #include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
 
 TRACE_EVENT(percpu_alloc_percpu,
 
-	TP_PROTO(bool reserved, bool is_atomic, size_t size,
-		 size_t align, void *base_addr, int off, void __percpu *ptr),
+	TP_PROTO(unsigned long call_site,
+		 bool reserved, bool is_atomic, size_t size,
+		 size_t align, void *base_addr, int off,
+		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
 
-	TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr),
+	TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off,
+		ptr, bytes_alloc, gfp_flags),
 
 	TP_STRUCT__entry(
+		__field(	unsigned long,		call_site	)
 		__field(	bool,			reserved	)
 		__field(	bool,			is_atomic	)
 		__field(	size_t,			size		)
@@ -22,9 +27,11 @@ TRACE_EVENT(percpu_alloc_percpu,
 		__field(	void *,			base_addr	)
 		__field(	int,			off		)
 		__field(	void __percpu *,	ptr		)
+		__field(	size_t,			bytes_alloc	)
+		__field(	gfp_t,			gfp_flags	)
 	),
-
 	TP_fast_assign(
+		__entry->call_site	= call_site;
 		__entry->reserved	= reserved;
 		__entry->is_atomic	= is_atomic;
 		__entry->size		= size;
@@ -32,12 +39,16 @@ TRACE_EVENT(percpu_alloc_percpu,
 		__entry->base_addr	= base_addr;
 		__entry->off		= off;
 		__entry->ptr		= ptr;
+		__entry->bytes_alloc	= bytes_alloc;
+		__entry->gfp_flags	= gfp_flags;
 	),
 
-	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p",
+	TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
+		  (void *)__entry->call_site,
 		  __entry->reserved, __entry->is_atomic,
 		  __entry->size, __entry->align,
-		  __entry->base_addr, __entry->off, __entry->ptr)
+		  __entry->base_addr, __entry->off, __entry->ptr,
+		  __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
 );
 
 TRACE_EVENT(percpu_free_percpu,
diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
index 411d1593ef23..70b1ea23f4d2 100644
--- a/mm/percpu-internal.h
+++ b/mm/percpu-internal.h
@@ -113,7 +113,6 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
 	return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
 }
 
-#ifdef CONFIG_MEMCG_KMEM
 /**
  * pcpu_obj_full_size - helper to calculate size of each accounted object
  * @size: size of area to allocate in bytes
@@ -123,13 +122,14 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
  */
 static inline size_t pcpu_obj_full_size(size_t size)
 {
-	size_t extra_size;
+	size_t extra_size = 0;
 
-	extra_size = size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
+#ifdef CONFIG_MEMCG_KMEM
+	extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
+#endif
 
 	return size * num_possible_cpus() + extra_size;
 }
-#endif /* CONFIG_MEMCG_KMEM */
 
 #ifdef CONFIG_PERCPU_STATS
 
diff --git a/mm/percpu.c b/mm/percpu.c
index ea28db283044..3633eeefaa0d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1884,8 +1884,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
 	kmemleak_alloc_percpu(ptr, size, gfp);
 
-	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
-			chunk->base_addr, off, ptr);
+	trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
+				  chunk->base_addr, off, ptr,
+				  pcpu_obj_full_size(size), gfp);
 
 	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH] percpu: improve percpu_alloc_percpu event trace
  2022-05-06  4:46 [PATCH] percpu: improve percpu_alloc_percpu event trace Vasily Averin
  2022-05-06  7:52 ` Vasily Averin
@ 2022-05-06 20:38 ` kernel test robot
  2022-05-07 14:51   ` Vasily Averin
  1 sibling, 1 reply; 22+ messages in thread
From: kernel test robot @ 2022-05-06 20:38 UTC (permalink / raw)
  To: Vasily Averin, Shakeel Butt, Steven Rostedt, Ingo Molnar
  Cc: kbuild-all, kernel, linux-kernel, Roman Gushchin,
	Vlastimil Babka, Michal Hocko, cgroups, Andrew Morton,
	Linux Memory Management List, Dennis Zhou, Tejun Heo,
	Christoph Lameter

Hi Vasily,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on rostedt-trace/for-next]
[also build test WARNING on hnaz-mm/master v5.18-rc5]
[cannot apply to dennis-percpu/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/intel-lab-lkp/linux/commits/Vasily-Averin/percpu-improve-percpu_alloc_percpu-event-trace/20220506-124742
base:   https://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git for-next
config: i386-randconfig-s001 (https://download.01.org/0day-ci/archive/20220507/202205070420.aAhuqpYk-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.2.0-20) 11.2.0
reproduce:
        # apt-get install sparse
        # sparse version: v0.6.4-dirty
        # https://github.com/intel-lab-lkp/linux/commit/dee6876db0a7a4715516e673f9edaca2ba40677c
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Vasily-Averin/percpu-improve-percpu_alloc_percpu-event-trace/20220506-124742
        git checkout dee6876db0a7a4715516e673f9edaca2ba40677c
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>


sparse warnings: (new ones prefixed by >>)
   mm/percpu.c: note: in included file (through include/trace/trace_events.h, include/trace/define_trace.h, include/trace/events/percpu.h):
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: incorrect type in argument 3 (different base types) @@     expected unsigned long flags @@     got restricted gfp_t [usertype] gfp_flags @@
   include/trace/events/percpu.h:11:1: sparse:     expected unsigned long flags
   include/trace/events/percpu.h:11:1: sparse:     got restricted gfp_t [usertype] gfp_flags
   mm/percpu.c: note: in included file (through include/trace/trace_events.h, include/trace/define_trace.h, include/trace/events/percpu.h):
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast to restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: cast to restricted gfp_t
>> include/trace/events/percpu.h:11:1: sparse: sparse: restricted gfp_t degrades to integer
>> include/trace/events/percpu.h:11:1: sparse: sparse: restricted gfp_t degrades to integer
   mm/percpu.c:2012:24: sparse: sparse: context imbalance in 'pcpu_balance_free' - unexpected unlock

vim +11 include/trace/events/percpu.h

df95e795a72289 Dennis Zhou   2017-06-19  10  
df95e795a72289 Dennis Zhou   2017-06-19 @11  TRACE_EVENT(percpu_alloc_percpu,
df95e795a72289 Dennis Zhou   2017-06-19  12  
df95e795a72289 Dennis Zhou   2017-06-19  13  	TP_PROTO(bool reserved, bool is_atomic, size_t size,
dee6876db0a7a4 Vasily Averin 2022-05-06  14  		 size_t align, void *base_addr, int off,
dee6876db0a7a4 Vasily Averin 2022-05-06  15  		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
df95e795a72289 Dennis Zhou   2017-06-19  16  
dee6876db0a7a4 Vasily Averin 2022-05-06  17  	TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr,
dee6876db0a7a4 Vasily Averin 2022-05-06  18  		bytes_alloc, gfp_flags),
df95e795a72289 Dennis Zhou   2017-06-19  19  
df95e795a72289 Dennis Zhou   2017-06-19  20  	TP_STRUCT__entry(
df95e795a72289 Dennis Zhou   2017-06-19  21  		__field(	bool,			reserved	)
df95e795a72289 Dennis Zhou   2017-06-19  22  		__field(	bool,			is_atomic	)
df95e795a72289 Dennis Zhou   2017-06-19  23  		__field(	size_t,			size		)
df95e795a72289 Dennis Zhou   2017-06-19  24  		__field(	size_t,			align		)
df95e795a72289 Dennis Zhou   2017-06-19  25  		__field(	void *,			base_addr	)
df95e795a72289 Dennis Zhou   2017-06-19  26  		__field(	int,			off		)
df95e795a72289 Dennis Zhou   2017-06-19  27  		__field(	void __percpu *,	ptr		)
dee6876db0a7a4 Vasily Averin 2022-05-06  28  		__field(	size_t,			bytes_alloc	)
dee6876db0a7a4 Vasily Averin 2022-05-06  29  		__field(	gfp_t,			gfp_flags	)
df95e795a72289 Dennis Zhou   2017-06-19  30  	),
df95e795a72289 Dennis Zhou   2017-06-19  31  	TP_fast_assign(
df95e795a72289 Dennis Zhou   2017-06-19  32  		__entry->reserved	= reserved;
df95e795a72289 Dennis Zhou   2017-06-19  33  		__entry->is_atomic	= is_atomic;
df95e795a72289 Dennis Zhou   2017-06-19  34  		__entry->size		= size;
df95e795a72289 Dennis Zhou   2017-06-19  35  		__entry->align		= align;
df95e795a72289 Dennis Zhou   2017-06-19  36  		__entry->base_addr	= base_addr;
df95e795a72289 Dennis Zhou   2017-06-19  37  		__entry->off		= off;
df95e795a72289 Dennis Zhou   2017-06-19  38  		__entry->ptr		= ptr;
dee6876db0a7a4 Vasily Averin 2022-05-06  39  		__entry->bytes_alloc	= bytes_alloc;
dee6876db0a7a4 Vasily Averin 2022-05-06  40  		__entry->gfp_flags	= gfp_flags;
df95e795a72289 Dennis Zhou   2017-06-19  41  	),
df95e795a72289 Dennis Zhou   2017-06-19  42  
dee6876db0a7a4 Vasily Averin 2022-05-06  43  	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
df95e795a72289 Dennis Zhou   2017-06-19  44  		  __entry->reserved, __entry->is_atomic,
df95e795a72289 Dennis Zhou   2017-06-19  45  		  __entry->size, __entry->align,
dee6876db0a7a4 Vasily Averin 2022-05-06  46  		  __entry->base_addr, __entry->off, __entry->ptr,
dee6876db0a7a4 Vasily Averin 2022-05-06  47  		  __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
df95e795a72289 Dennis Zhou   2017-06-19  48  );
df95e795a72289 Dennis Zhou   2017-06-19  49  

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] percpu: improve percpu_alloc_percpu event trace
  2022-05-06 20:38 ` [PATCH] " kernel test robot
@ 2022-05-07 14:51   ` Vasily Averin
  2022-05-07 19:02     ` [PATCH mm] tracing: incorrect gfp_t conversion Vasily Averin
  2022-05-09 21:06     ` [PATCH] percpu: improve percpu_alloc_percpu event trace Steven Rostedt
  0 siblings, 2 replies; 22+ messages in thread
From: Vasily Averin @ 2022-05-07 14:51 UTC (permalink / raw)
  To: kernel test robot, Steven Rostedt, Ingo Molnar
  Cc: kbuild-all, Shakeel Butt, kernel, linux-kernel, Roman Gushchin,
	Vlastimil Babka, Michal Hocko, cgroups, Andrew Morton,
	Linux Memory Management List, Dennis Zhou, Tejun Heo,
	Christoph Lameter

On 5/6/22 23:38, kernel test robot wrote:
>>> include/trace/events/percpu.h:11:1: sparse: sparse: cast from restricted gfp_t
>>> include/trace/events/percpu.h:11:1: sparse: sparse: incorrect type in argument 3 (different base types) @@     expected unsigned long flags @@     got restricted gfp_t [usertype] gfp_flags @@
>    include/trace/events/percpu.h:11:1: sparse:     expected unsigned long flags
>    include/trace/events/percpu.h:11:1: sparse:     got restricted gfp_t [usertype] gfp_flags
>    mm/percpu.c: note: in included file (through include/trace/trace_events.h, include/trace/define_trace.h, include/trace/events/percpu.h):
>>> include/trace/events/percpu.h:11:1: sparse: sparse: cast to restricted gfp_t
>>> include/trace/events/percpu.h:11:1: sparse: sparse: cast to restricted gfp_t
>>> include/trace/events/percpu.h:11:1: sparse: sparse: restricted gfp_t degrades to integer
>>> include/trace/events/percpu.h:11:1: sparse: sparse: restricted gfp_t degrades to integer
>    mm/percpu.c:2012:24: sparse: sparse: context imbalance in 'pcpu_balance_free' - unexpected unlock

The same messages are generated for any other gfp_t argument in trace events.
As far as I understand it is not a bug per se,
but trace macros lacks __force attribute in 'gfp_t'-> 'unsigned long' casts.
The same thing happens with mode_t and with some other places using __print_flags()
for __bitwise marked types.

I can make sparse happy, here and elsewhere but it requires a lot of __force attributes.
Is anyone interested in such patches, or can we silently ignore these messages?

Need to add __force attribute to all entries in __def_gfpflag_names array
and add few changes into trace description, below is an example.

> vim +11 include/trace/events/percpu.h
> 
> df95e795a72289 Dennis Zhou   2017-06-19  10  
> df95e795a72289 Dennis Zhou   2017-06-19 @11  TRACE_EVENT(percpu_alloc_percpu,
> df95e795a72289 Dennis Zhou   2017-06-19  12  
> df95e795a72289 Dennis Zhou   2017-06-19  13  	TP_PROTO(bool reserved, bool is_atomic, size_t size,
> dee6876db0a7a4 Vasily Averin 2022-05-06  14  		 size_t align, void *base_addr, int off,
> dee6876db0a7a4 Vasily Averin 2022-05-06  15  		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
> df95e795a72289 Dennis Zhou   2017-06-19  16  
> dee6876db0a7a4 Vasily Averin 2022-05-06  17  	TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr,
> dee6876db0a7a4 Vasily Averin 2022-05-06  18  		bytes_alloc, gfp_flags),
> df95e795a72289 Dennis Zhou   2017-06-19  19  
> df95e795a72289 Dennis Zhou   2017-06-19  20  	TP_STRUCT__entry(
> df95e795a72289 Dennis Zhou   2017-06-19  21  		__field(	bool,			reserved	)
> df95e795a72289 Dennis Zhou   2017-06-19  22  		__field(	bool,			is_atomic	)
> df95e795a72289 Dennis Zhou   2017-06-19  23  		__field(	size_t,			size		)
> df95e795a72289 Dennis Zhou   2017-06-19  24  		__field(	size_t,			align		)
> df95e795a72289 Dennis Zhou   2017-06-19  25  		__field(	void *,			base_addr	)
> df95e795a72289 Dennis Zhou   2017-06-19  26  		__field(	int,			off		)
> df95e795a72289 Dennis Zhou   2017-06-19  27  		__field(	void __percpu *,	ptr		)
> dee6876db0a7a4 Vasily Averin 2022-05-06  28  		__field(	size_t,			bytes_alloc	)
> dee6876db0a7a4 Vasily Averin 2022-05-06  29  		__field(	gfp_t,			gfp_flags	)
VvS: need to replace gfp_t to unsigned long ...

> df95e795a72289 Dennis Zhou   2017-06-19  30  	),
> df95e795a72289 Dennis Zhou   2017-06-19  31  	TP_fast_assign(
> df95e795a72289 Dennis Zhou   2017-06-19  32  		__entry->reserved	= reserved;
> df95e795a72289 Dennis Zhou   2017-06-19  33  		__entry->is_atomic	= is_atomic;
> df95e795a72289 Dennis Zhou   2017-06-19  34  		__entry->size		= size;
> df95e795a72289 Dennis Zhou   2017-06-19  35  		__entry->align		= align;
> df95e795a72289 Dennis Zhou   2017-06-19  36  		__entry->base_addr	= base_addr;
> df95e795a72289 Dennis Zhou   2017-06-19  37  		__entry->off		= off;
> df95e795a72289 Dennis Zhou   2017-06-19  38  		__entry->ptr		= ptr;
> dee6876db0a7a4 Vasily Averin 2022-05-06  39  		__entry->bytes_alloc	= bytes_alloc;
> dee6876db0a7a4 Vasily Averin 2022-05-06  40  		__entry->gfp_flags	= gfp_flags;
VvS: ... and use here (__force unsigned long)

> df95e795a72289 Dennis Zhou   2017-06-19  41  	),
> df95e795a72289 Dennis Zhou   2017-06-19  42  
> dee6876db0a7a4 Vasily Averin 2022-05-06  43  	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
> df95e795a72289 Dennis Zhou   2017-06-19  44  		  __entry->reserved, __entry->is_atomic,
> df95e795a72289 Dennis Zhou   2017-06-19  45  		  __entry->size, __entry->align,
> dee6876db0a7a4 Vasily Averin 2022-05-06  46  		  __entry->base_addr, __entry->off, __entry->ptr,
> dee6876db0a7a4 Vasily Averin 2022-05-06  47  		  __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
> df95e795a72289 Dennis Zhou   2017-06-19  48  );
> df95e795a72289 Dennis Zhou   2017-06-19  49  
 Thank you,
	Vasily Averin

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 14:51   ` Vasily Averin
@ 2022-05-07 19:02     ` Vasily Averin
  2022-05-07 19:37       ` Andrew Morton
  2022-05-09 21:06     ` [PATCH] percpu: improve percpu_alloc_percpu event trace Steven Rostedt
  1 sibling, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-07 19:02 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar, Andrew Morton; +Cc: kernel, linux-kernel, linux-mm

Fixes the following sparse warnings:

include/trace/events/*: sparse: cast to restricted gfp_t
include/trace/events/*: sparse: restricted gfp_t degrades to integer

gfp_t type is bitwise and requires __force attributes for any casts.

Signed-off-by: Vasily Averin <vvs@openvz.org>
---
 include/linux/gfp.h               |  2 +-
 include/trace/events/btrfs.h      |  4 +-
 include/trace/events/compaction.h |  4 +-
 include/trace/events/kmem.h       | 12 ++---
 include/trace/events/mmflags.h    | 74 +++++++++++++++----------------
 include/trace/events/vmscan.h     | 16 +++----
 mm/compaction.c                   |  2 +-
 7 files changed, 57 insertions(+), 57 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 80f63c862be5..5f44c49abab7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -352,7 +352,7 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
 		return MIGRATE_UNMOVABLE;
 
 	/* Group based on mobility */
-	return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+	return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
 }
 #undef GFP_MOVABLE_MASK
 #undef GFP_MOVABLE_SHIFT
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0d729664b4b4..ea7a05ac49c3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1343,13 +1343,13 @@ TRACE_EVENT(alloc_extent_state,
 
 	TP_STRUCT__entry(
 		__field(const struct extent_state *, state)
-		__field(gfp_t, mask)
+		__field(unsigned long, mask)
 		__field(const void*, ip)
 	),
 
 	TP_fast_assign(
 		__entry->state	= state,
-		__entry->mask	= mask,
+		__entry->mask	= (__force unsigned long)mask,
 		__entry->ip	= (const void *)IP
 	),
 
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 7d48e7079e48..915c11ee28e4 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -162,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
 
 	TP_STRUCT__entry(
 		__field(int, order)
-		__field(gfp_t, gfp_mask)
+		__field(unsigned long, gfp_mask)
 		__field(int, prio)
 	),
 
 	TP_fast_assign(
 		__entry->order = order;
-		__entry->gfp_mask = gfp_mask;
+		__entry->gfp_mask = (__force unsigned long)gfp_mask;
 		__entry->prio = prio;
 	),
 
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ddc8c944f417..71c141804222 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
 		__field(	const void *,	ptr		)
 		__field(	size_t,		bytes_req	)
 		__field(	size_t,		bytes_alloc	)
-		__field(	gfp_t,		gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 	),
 
 	TP_fast_assign(
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
 		__entry->ptr		= ptr;
 		__entry->bytes_req	= bytes_req;
 		__entry->bytes_alloc	= bytes_alloc;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
@@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
 		__field(	const void *,	ptr		)
 		__field(	size_t,		bytes_req	)
 		__field(	size_t,		bytes_alloc	)
-		__field(	gfp_t,		gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 		__field(	int,		node		)
 	),
 
@@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
 		__entry->ptr		= ptr;
 		__entry->bytes_req	= bytes_req;
 		__entry->bytes_alloc	= bytes_alloc;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 		__entry->node		= node;
 	),
 
@@ -208,14 +208,14 @@ TRACE_EVENT(mm_page_alloc,
 	TP_STRUCT__entry(
 		__field(	unsigned long,	pfn		)
 		__field(	unsigned int,	order		)
-		__field(	gfp_t,		gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 		__field(	int,		migratetype	)
 	),
 
 	TP_fast_assign(
 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
 		__entry->order		= order;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 		__entry->migratetype	= migratetype;
 	),
 
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 116ed4d5d0f8..51d4fa6f3f39 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -14,43 +14,43 @@
  */
 
 #define __def_gfpflag_names						\
-	{(unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
-	{(unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
-	{(unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
-	{(unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
-	{(unsigned long)GFP_USER,		"GFP_USER"},		\
-	{(unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
-	{(unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
-	{(unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
-	{(unsigned long)GFP_ATOMIC,		"GFP_ATOMIC"},		\
-	{(unsigned long)GFP_NOIO,		"GFP_NOIO"},		\
-	{(unsigned long)GFP_NOWAIT,		"GFP_NOWAIT"},		\
-	{(unsigned long)GFP_DMA,		"GFP_DMA"},		\
-	{(unsigned long)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
-	{(unsigned long)GFP_DMA32,		"GFP_DMA32"},		\
-	{(unsigned long)__GFP_HIGH,		"__GFP_HIGH"},		\
-	{(unsigned long)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
-	{(unsigned long)__GFP_IO,		"__GFP_IO"},		\
-	{(unsigned long)__GFP_FS,		"__GFP_FS"},		\
-	{(unsigned long)__GFP_NOWARN,		"__GFP_NOWARN"},	\
-	{(unsigned long)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
-	{(unsigned long)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
-	{(unsigned long)__GFP_NORETRY,		"__GFP_NORETRY"},	\
-	{(unsigned long)__GFP_COMP,		"__GFP_COMP"},		\
-	{(unsigned long)__GFP_ZERO,		"__GFP_ZERO"},		\
-	{(unsigned long)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
-	{(unsigned long)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
-	{(unsigned long)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
-	{(unsigned long)__GFP_THISNODE,		"__GFP_THISNODE"},	\
-	{(unsigned long)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
-	{(unsigned long)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
-	{(unsigned long)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
-	{(unsigned long)__GFP_WRITE,		"__GFP_WRITE"},		\
-	{(unsigned long)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
-	{(unsigned long)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
-	{(unsigned long)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},\
-	{(unsigned long)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"},	\
-	{(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
+	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
+	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
+	{(__force unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
+	{(__force unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
+	{(__force unsigned long)GFP_USER,		"GFP_USER"},		\
+	{(__force unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
+	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
+	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
+	{(__force unsigned long)GFP_ATOMIC,		"GFP_ATOMIC"},		\
+	{(__force unsigned long)GFP_NOIO,		"GFP_NOIO"},		\
+	{(__force unsigned long)GFP_NOWAIT,		"GFP_NOWAIT"},		\
+	{(__force unsigned long)GFP_DMA,		"GFP_DMA"},		\
+	{(__force unsigned long)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
+	{(__force unsigned long)GFP_DMA32,		"GFP_DMA32"},		\
+	{(__force unsigned long)__GFP_HIGH,		"__GFP_HIGH"},		\
+	{(__force unsigned long)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
+	{(__force unsigned long)__GFP_IO,		"__GFP_IO"},		\
+	{(__force unsigned long)__GFP_FS,		"__GFP_FS"},		\
+	{(__force unsigned long)__GFP_NOWARN,		"__GFP_NOWARN"},	\
+	{(__force unsigned long)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
+	{(__force unsigned long)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
+	{(__force unsigned long)__GFP_NORETRY,		"__GFP_NORETRY"},	\
+	{(__force unsigned long)__GFP_COMP,		"__GFP_COMP"},		\
+	{(__force unsigned long)__GFP_ZERO,		"__GFP_ZERO"},		\
+	{(__force unsigned long)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
+	{(__force unsigned long)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
+	{(__force unsigned long)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
+	{(__force unsigned long)__GFP_THISNODE,		"__GFP_THISNODE"},	\
+	{(__force unsigned long)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
+	{(__force unsigned long)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
+	{(__force unsigned long)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
+	{(__force unsigned long)__GFP_WRITE,		"__GFP_WRITE"},		\
+	{(__force unsigned long)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
+	{(__force unsigned long)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
+	{(__force unsigned long)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},\
+	{(__force unsigned long)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"},	\
+	{(__force unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"} \
 
 #define show_gfp_flags(flags)						\
 	(flags) ? __print_flags(flags, "|",				\
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ca2e9009a651..b1db02c1aa6f 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -96,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
 		__field(	int,	nid		)
 		__field(	int,	zid		)
 		__field(	int,	order		)
-		__field(	gfp_t,	gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 	),
 
 	TP_fast_assign(
 		__entry->nid		= nid;
 		__entry->zid		= zid;
 		__entry->order		= order;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -120,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
 
 	TP_STRUCT__entry(
 		__field(	int,	order		)
-		__field(	gfp_t,	gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 	),
 
 	TP_fast_assign(
 		__entry->order		= order;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("order=%d gfp_flags=%s",
@@ -210,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
 		__field(void *, shrink)
 		__field(int, nid)
 		__field(long, nr_objects_to_shrink)
-		__field(gfp_t, gfp_flags)
+		__field(unsigned long, gfp_flags)
 		__field(unsigned long, cache_items)
 		__field(unsigned long long, delta)
 		__field(unsigned long, total_scan)
@@ -222,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start,
 		__entry->shrink = shr->scan_objects;
 		__entry->nid = sc->nid;
 		__entry->nr_objects_to_shrink = nr_objects_to_shrink;
-		__entry->gfp_flags = sc->gfp_mask;
+		__entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
 		__entry->cache_items = cache_items;
 		__entry->delta = delta;
 		__entry->total_scan = total_scan;
@@ -446,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
 	TP_STRUCT__entry(
 		__field(int, nid)
 		__field(int, order)
-		__field(gfp_t, gfp_flags)
+		__field(unsigned long, gfp_flags)
 	),
 
 	TP_fast_assign(
 		__entry->nid = nid;
 		__entry->order = order;
-		__entry->gfp_flags = gfp_flags;
+		__entry->gfp_flags = (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("nid=%d order=%d gfp_flags=%s",
diff --git a/mm/compaction.c b/mm/compaction.c
index b4e94cda3019..746dafa4e6eb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2558,7 +2558,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
 		unsigned int alloc_flags, const struct alloc_context *ac,
 		enum compact_priority prio, struct page **capture)
 {
-	int may_perform_io = gfp_mask & __GFP_IO;
+	int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
 	struct zoneref *z;
 	struct zone *zone;
 	enum compact_result rc = COMPACT_SKIPPED;
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 19:02     ` [PATCH mm] tracing: incorrect gfp_t conversion Vasily Averin
@ 2022-05-07 19:37       ` Andrew Morton
  2022-05-07 22:28         ` Vasily Averin
  0 siblings, 1 reply; 22+ messages in thread
From: Andrew Morton @ 2022-05-07 19:37 UTC (permalink / raw)
  To: Vasily Averin; +Cc: Steven Rostedt, Ingo Molnar, kernel, linux-kernel, linux-mm

On Sat, 7 May 2022 22:02:05 +0300 Vasily Averin <vvs@openvz.org> wrote:

> Fixes the following sparse warnings:
> 
> include/trace/events/*: sparse: cast to restricted gfp_t
> include/trace/events/*: sparse: restricted gfp_t degrades to integer
> 
> gfp_t type is bitwise and requires __force attributes for any casts.
> 
> --- a/include/trace/events/mmflags.h
> +++ b/include/trace/events/mmflags.h
> @@ -14,43 +14,43 @@
>   */
>  
>  #define __def_gfpflag_names						\
> -	{(unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> -	{(unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
> -	{(unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
> -	{(unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
> -	{(unsigned long)GFP_USER,		"GFP_USER"},		\
> -	{(unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
> -	{(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
>
> ...
>
> +	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> +	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
> +	{(__force unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
> +	{(__force unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
> +	{(__force unsigned long)GFP_USER,		"GFP_USER"},		\
> +	{(__force unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
> +	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
> +	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\

This got all repetitive, line-wrappy and ugly :(

What do we think of something silly like this?



--- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
+++ a/include/trace/events/mmflags.h
@@ -13,53 +13,57 @@
  * Thus most bits set go first.
  */
 
+#define FUL __force unsigned long
+
 #define __def_gfpflag_names						\
-	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
-	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
-	{(__force unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
-	{(__force unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
-	{(__force unsigned long)GFP_USER,		"GFP_USER"},		\
-	{(__force unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
-	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
-	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
-	{(__force unsigned long)GFP_ATOMIC,		"GFP_ATOMIC"},		\
-	{(__force unsigned long)GFP_NOIO,		"GFP_NOIO"},		\
-	{(__force unsigned long)GFP_NOWAIT,		"GFP_NOWAIT"},		\
-	{(__force unsigned long)GFP_DMA,		"GFP_DMA"},		\
-	{(__force unsigned long)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
-	{(__force unsigned long)GFP_DMA32,		"GFP_DMA32"},		\
-	{(__force unsigned long)__GFP_HIGH,		"__GFP_HIGH"},		\
-	{(__force unsigned long)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
-	{(__force unsigned long)__GFP_IO,		"__GFP_IO"},		\
-	{(__force unsigned long)__GFP_FS,		"__GFP_FS"},		\
-	{(__force unsigned long)__GFP_NOWARN,		"__GFP_NOWARN"},	\
-	{(__force unsigned long)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
-	{(__force unsigned long)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
-	{(__force unsigned long)__GFP_NORETRY,		"__GFP_NORETRY"},	\
-	{(__force unsigned long)__GFP_COMP,		"__GFP_COMP"},		\
-	{(__force unsigned long)__GFP_ZERO,		"__GFP_ZERO"},		\
-	{(__force unsigned long)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
-	{(__force unsigned long)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
-	{(__force unsigned long)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
-	{(__force unsigned long)__GFP_THISNODE,		"__GFP_THISNODE"},	\
-	{(__force unsigned long)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
-	{(__force unsigned long)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
-	{(__force unsigned long)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
-	{(__force unsigned long)__GFP_WRITE,		"__GFP_WRITE"},		\
-	{(__force unsigned long)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
-	{(__force unsigned long)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
-	{(__force unsigned long)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},\
-	{(__force unsigned long)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"}	\
+	{(FUL)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
+	{(FUL)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
+	{(FUL)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
+	{(FUL)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
+	{(FUL)GFP_USER,		"GFP_USER"},		\
+	{(FUL)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
+	{(FUL)GFP_KERNEL,		"GFP_KERNEL"},		\
+	{(FUL)GFP_NOFS,		"GFP_NOFS"},		\
+	{(FUL)GFP_ATOMIC,		"GFP_ATOMIC"},		\
+	{(FUL)GFP_NOIO,		"GFP_NOIO"},		\
+	{(FUL)GFP_NOWAIT,		"GFP_NOWAIT"},		\
+	{(FUL)GFP_DMA,		"GFP_DMA"},		\
+	{(FUL)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
+	{(FUL)GFP_DMA32,		"GFP_DMA32"},		\
+	{(FUL)__GFP_HIGH,		"__GFP_HIGH"},		\
+	{(FUL)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
+	{(FUL)__GFP_IO,		"__GFP_IO"},		\
+	{(FUL)__GFP_FS,		"__GFP_FS"},		\
+	{(FUL)__GFP_NOWARN,		"__GFP_NOWARN"},	\
+	{(FUL)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
+	{(FUL)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
+	{(FUL)__GFP_NORETRY,		"__GFP_NORETRY"},	\
+	{(FUL)__GFP_COMP,		"__GFP_COMP"},		\
+	{(FUL)__GFP_ZERO,		"__GFP_ZERO"},		\
+	{(FUL)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
+	{(FUL)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
+	{(FUL)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
+	{(FUL)__GFP_THISNODE,		"__GFP_THISNODE"},	\
+	{(FUL)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
+	{(FUL)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
+	{(FUL)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
+	{(FUL)__GFP_WRITE,		"__GFP_WRITE"},		\
+	{(FUL)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
+	{(FUL)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
+	{(FUL)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},\
+	{(FUL)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"}	\
 
 #ifdef CONFIG_KASAN_HW_TAGS
 #define __def_gfpflag_names_kasan ,					       \
-	{(__force unsigned long)__GFP_SKIP_ZERO,	   "__GFP_SKIP_ZERO"},	       \
-	{(__force unsigned long)__GFP_SKIP_KASAN_POISON,   "__GFP_SKIP_KASAN_POISON"}, \
-	{(__force unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
+	{(FUL)__GFP_SKIP_ZERO,	   "__GFP_SKIP_ZERO"},	       \
+	{(FUL)__GFP_SKIP_KASAN_POISON,   "__GFP_SKIP_KASAN_POISON"}, \
+	{(FUL)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
 #else
 #define __def_gfpflag_names_kasan
 #endif
 
+#undef FUL
+
 #define show_gfp_flags(flags)						\
 	(flags) ? __print_flags(flags, "|",				\
 	__def_gfpflag_names __def_gfpflag_names_kasan			\
_


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 19:37       ` Andrew Morton
@ 2022-05-07 22:28         ` Vasily Averin
  2022-05-07 22:48           ` Andrew Morton
  0 siblings, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-07 22:28 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Steven Rostedt, Ingo Molnar, kernel, linux-kernel, linux-mm

On 5/7/22 22:37, Andrew Morton wrote:
> On Sat, 7 May 2022 22:02:05 +0300 Vasily Averin <vvs@openvz.org> wrote:
>> +	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
>> +	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
> 
> This got all repetitive, line-wrappy and ugly :(
> 
> What do we think of something silly like this?

> --- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
> +++ a/include/trace/events/mmflags.h
> @@ -13,53 +13,57 @@
>   * Thus most bits set go first.
>   */
>  
> +#define FUL __force unsigned long
> +
>  #define __def_gfpflag_names						\
> -	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> -	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
...
> +	{(FUL)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> +	{(FUL)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \


I think it's a good idea, and I regret it was your idea and not mine.

Should I resend my patch with these changes or would you prefer 
to keep your patch as a separate one?

Thank you,
	Vasily Averin

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 22:28         ` Vasily Averin
@ 2022-05-07 22:48           ` Andrew Morton
  2022-05-07 23:00             ` Andrew Morton
  2022-05-08 20:51             ` Joe Perches
  0 siblings, 2 replies; 22+ messages in thread
From: Andrew Morton @ 2022-05-07 22:48 UTC (permalink / raw)
  To: Vasily Averin; +Cc: Steven Rostedt, Ingo Molnar, kernel, linux-kernel, linux-mm

On Sun, 8 May 2022 01:28:58 +0300 Vasily Averin <vvs@openvz.org> wrote:

> On 5/7/22 22:37, Andrew Morton wrote:
> > On Sat, 7 May 2022 22:02:05 +0300 Vasily Averin <vvs@openvz.org> wrote:
> >> +	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
> >> +	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
> > 
> > This got all repetitive, line-wrappy and ugly :(
> > 
> > What do we think of something silly like this?
> 
> > --- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
> > +++ a/include/trace/events/mmflags.h
> > @@ -13,53 +13,57 @@
> >   * Thus most bits set go first.
> >   */
> >  
> > +#define FUL __force unsigned long
> > +
> >  #define __def_gfpflag_names						\
> > -	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> > -	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
> ...
> > +	{(FUL)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> > +	{(FUL)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
> 
> 
> I think it's a good idea, and I regret it was your idea and not mine.

heh

> Should I resend my patch with these changes or would you prefer 
> to keep your patch as a separate one?

I did the below.  I'll squash them together later.


--- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
+++ a/include/trace/events/mmflags.h
@@ -13,53 +13,57 @@
  * Thus most bits set go first.
  */
 
+#define FUL __force unsigned long
+
 #define __def_gfpflag_names						\
-	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
-	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
-	{(__force unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
-	{(__force unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
-	{(__force unsigned long)GFP_USER,		"GFP_USER"},		\
-	{(__force unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
-	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
-	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
-	{(__force unsigned long)GFP_ATOMIC,		"GFP_ATOMIC"},		\
-	{(__force unsigned long)GFP_NOIO,		"GFP_NOIO"},		\
-	{(__force unsigned long)GFP_NOWAIT,		"GFP_NOWAIT"},		\
-	{(__force unsigned long)GFP_DMA,		"GFP_DMA"},		\
-	{(__force unsigned long)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
-	{(__force unsigned long)GFP_DMA32,		"GFP_DMA32"},		\
-	{(__force unsigned long)__GFP_HIGH,		"__GFP_HIGH"},		\
-	{(__force unsigned long)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
-	{(__force unsigned long)__GFP_IO,		"__GFP_IO"},		\
-	{(__force unsigned long)__GFP_FS,		"__GFP_FS"},		\
-	{(__force unsigned long)__GFP_NOWARN,		"__GFP_NOWARN"},	\
-	{(__force unsigned long)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
-	{(__force unsigned long)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
-	{(__force unsigned long)__GFP_NORETRY,		"__GFP_NORETRY"},	\
-	{(__force unsigned long)__GFP_COMP,		"__GFP_COMP"},		\
-	{(__force unsigned long)__GFP_ZERO,		"__GFP_ZERO"},		\
-	{(__force unsigned long)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
-	{(__force unsigned long)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
-	{(__force unsigned long)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
-	{(__force unsigned long)__GFP_THISNODE,		"__GFP_THISNODE"},	\
-	{(__force unsigned long)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
-	{(__force unsigned long)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
-	{(__force unsigned long)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
-	{(__force unsigned long)__GFP_WRITE,		"__GFP_WRITE"},		\
-	{(__force unsigned long)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
-	{(__force unsigned long)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
-	{(__force unsigned long)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},\
-	{(__force unsigned long)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"}	\
+	{(FUL)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},		\
+	{(FUL)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, 	\
+	{(FUL)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},	\
+	{(FUL)GFP_HIGHUSER,		"GFP_HIGHUSER"},		\
+	{(FUL)GFP_USER,			"GFP_USER"},			\
+	{(FUL)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},		\
+	{(FUL)GFP_KERNEL,		"GFP_KERNEL"},			\
+	{(FUL)GFP_NOFS,			"GFP_NOFS"},			\
+	{(FUL)GFP_ATOMIC,		"GFP_ATOMIC"},			\
+	{(FUL)GFP_NOIO,			"GFP_NOIO"},			\
+	{(FUL)GFP_NOWAIT,		"GFP_NOWAIT"},			\
+	{(FUL)GFP_DMA,			"GFP_DMA"},			\
+	{(FUL)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},		\
+	{(FUL)GFP_DMA32,		"GFP_DMA32"},			\
+	{(FUL)__GFP_HIGH,		"__GFP_HIGH"},			\
+	{(FUL)__GFP_ATOMIC,		"__GFP_ATOMIC"},		\
+	{(FUL)__GFP_IO,			"__GFP_IO"},			\
+	{(FUL)__GFP_FS,			"__GFP_FS"},			\
+	{(FUL)__GFP_NOWARN,		"__GFP_NOWARN"},		\
+	{(FUL)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},		\
+	{(FUL)__GFP_NOFAIL,		"__GFP_NOFAIL"},		\
+	{(FUL)__GFP_NORETRY,		"__GFP_NORETRY"},		\
+	{(FUL)__GFP_COMP,		"__GFP_COMP"},			\
+	{(FUL)__GFP_ZERO,		"__GFP_ZERO"},			\
+	{(FUL)__GFP_NOMEMALLOC,		"__GFP_NOMEMALLOC"},		\
+	{(FUL)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},		\
+	{(FUL)__GFP_HARDWALL,		"__GFP_HARDWALL"},		\
+	{(FUL)__GFP_THISNODE,		"__GFP_THISNODE"},		\
+	{(FUL)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},		\
+	{(FUL)__GFP_MOVABLE,		"__GFP_MOVABLE"},		\
+	{(FUL)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},		\
+	{(FUL)__GFP_WRITE,		"__GFP_WRITE"},			\
+	{(FUL)__GFP_RECLAIM,		"__GFP_RECLAIM"},		\
+	{(FUL)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},	\
+	{(FUL)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},	\
+	{(FUL)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"}		\
 
 #ifdef CONFIG_KASAN_HW_TAGS
-#define __def_gfpflag_names_kasan ,					       \
-	{(__force unsigned long)__GFP_SKIP_ZERO,	   "__GFP_SKIP_ZERO"},	       \
-	{(__force unsigned long)__GFP_SKIP_KASAN_POISON,   "__GFP_SKIP_KASAN_POISON"}, \
-	{(__force unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
+#define __def_gfpflag_names_kasan ,					\
+	{(FUL)__GFP_SKIP_ZERO,		"__GFP_SKIP_ZERO"},		\
+	{(FUL)__GFP_SKIP_KASAN_POISON,	"__GFP_SKIP_KASAN_POISON"},	\
+	{(FUL)__GFP_SKIP_KASAN_UNPOISON,"__GFP_SKIP_KASAN_UNPOISON"}
 #else
 #define __def_gfpflag_names_kasan
 #endif
 
+#undef FUL
+
 #define show_gfp_flags(flags)						\
 	(flags) ? __print_flags(flags, "|",				\
 	__def_gfpflag_names __def_gfpflag_names_kasan			\
_


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 22:48           ` Andrew Morton
@ 2022-05-07 23:00             ` Andrew Morton
  2022-05-08 20:37               ` Matthew Wilcox
  2022-05-08 20:51             ` Joe Perches
  1 sibling, 1 reply; 22+ messages in thread
From: Andrew Morton @ 2022-05-07 23:00 UTC (permalink / raw)
  To: Vasily Averin, Steven Rostedt, Ingo Molnar, kernel, linux-kernel,
	linux-mm

On Sat, 7 May 2022 15:48:35 -0700 Andrew Morton <akpm@linux-foundation.org> wrote:

> I did the below.
> 

Silly me, doesn't work.

> 
> --- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
> +++ a/include/trace/events/mmflags.h
> @@ -13,53 +13,57 @@
>   * Thus most bits set go first.
>   */
>  
> +#define FUL __force unsigned long
> +
>  #define __def_gfpflag_names						\
> -	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\

Can't expand FUL here within the macro definition.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 23:00             ` Andrew Morton
@ 2022-05-08 20:37               ` Matthew Wilcox
  0 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2022-05-08 20:37 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Vasily Averin, Steven Rostedt, Ingo Molnar, kernel, linux-kernel,
	linux-mm

On Sat, May 07, 2022 at 04:00:10PM -0700, Andrew Morton wrote:
> On Sat, 7 May 2022 15:48:35 -0700 Andrew Morton <akpm@linux-foundation.org> wrote:
> 
> > I did the below.
> > 
> 
> Silly me, doesn't work.
> 
> > 
> > --- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
> > +++ a/include/trace/events/mmflags.h
> > @@ -13,53 +13,57 @@
> >   * Thus most bits set go first.
> >   */
> >  
> > +#define FUL __force unsigned long
> > +
> >  #define __def_gfpflag_names						\
> > -	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> 
> Can't expand FUL here within the macro definition.

Can we do something even better?

#define GFP_NAME(flag) { (__force unsigned long)flag, #flag },

... with one or more layers of indirection to satisfy the arcane
rules of C macros?

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm] tracing: incorrect gfp_t conversion
  2022-05-07 22:48           ` Andrew Morton
  2022-05-07 23:00             ` Andrew Morton
@ 2022-05-08 20:51             ` Joe Perches
  2022-05-11  7:20               ` [PATCH mm v2] " Vasily Averin
  1 sibling, 1 reply; 22+ messages in thread
From: Joe Perches @ 2022-05-08 20:51 UTC (permalink / raw)
  To: Andrew Morton, Vasily Averin
  Cc: Steven Rostedt, Ingo Molnar, kernel, linux-kernel, linux-mm

On Sat, 2022-05-07 at 15:48 -0700, Andrew Morton wrote:
> On Sun, 8 May 2022 01:28:58 +0300 Vasily Averin <vvs@openvz.org> wrote:
> 
> > On 5/7/22 22:37, Andrew Morton wrote:
> > > On Sat, 7 May 2022 22:02:05 +0300 Vasily Averin <vvs@openvz.org> wrote:
> > > > +	{(__force unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
> > > > +	{(__force unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
> > > 
> > > This got all repetitive, line-wrappy and ugly :(
> > > 
> > > What do we think of something silly like this?
> > 
> > > --- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
> > > +++ a/include/trace/events/mmflags.h
> > > @@ -13,53 +13,57 @@
> > >   * Thus most bits set go first.
> > >   */
> > >  
> > > +#define FUL __force unsigned long
> > > +
> > >  #define __def_gfpflag_names						\
> > > -	{(__force unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> > > -	{(__force unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
> > ...
> > > +	{(FUL)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
> > > +	{(FUL)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
> > 
> > 
> > I think it's a good idea, and I regret it was your idea and not mine.
> 
> heh
> 
> > Should I resend my patch with these changes or would you prefer 
> > to keep your patch as a separate one?
> 
> I did the below.  I'll squash them together later.

Very repetitive indeed.

Why not use another stringifying macro?

Maybe something like:

#define gfpflag_string(GFP)	\
	{(__force unsigned long)GFP, #GFP)}

#define __def_gfpflag_names			\
	gfp_flag_string(GFP_TRANSHUGE),		\
	etc...



^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] percpu: improve percpu_alloc_percpu event trace
  2022-05-07 14:51   ` Vasily Averin
  2022-05-07 19:02     ` [PATCH mm] tracing: incorrect gfp_t conversion Vasily Averin
@ 2022-05-09 21:06     ` Steven Rostedt
  2022-05-10  4:22       ` Vasily Averin
  1 sibling, 1 reply; 22+ messages in thread
From: Steven Rostedt @ 2022-05-09 21:06 UTC (permalink / raw)
  To: Vasily Averin
  Cc: kernel test robot, Ingo Molnar, kbuild-all, Shakeel Butt, kernel,
	linux-kernel, Roman Gushchin, Vlastimil Babka, Michal Hocko,
	cgroups, Andrew Morton, Linux Memory Management List,
	Dennis Zhou, Tejun Heo, Christoph Lameter

On Sat, 7 May 2022 17:51:16 +0300
Vasily Averin <vvs@openvz.org> wrote:

> The same messages are generated for any other gfp_t argument in trace events.
> As far as I understand it is not a bug per se,
> but trace macros lacks __force attribute in 'gfp_t'-> 'unsigned long' casts.
> The same thing happens with mode_t and with some other places using __print_flags()
> for __bitwise marked types.

I'm curious as to where the gfp_t to unsigned long is happening in the
macros?

-- Steve

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] percpu: improve percpu_alloc_percpu event trace
  2022-05-09 21:06     ` [PATCH] percpu: improve percpu_alloc_percpu event trace Steven Rostedt
@ 2022-05-10  4:22       ` Vasily Averin
  2022-05-10 14:16         ` Steven Rostedt
  0 siblings, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-10  4:22 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: kernel test robot, Ingo Molnar, kbuild-all, Shakeel Butt, kernel,
	linux-kernel, Roman Gushchin, Vlastimil Babka, Michal Hocko,
	cgroups, Andrew Morton, Linux Memory Management List,
	Dennis Zhou, Tejun Heo, Christoph Lameter

On 5/10/22 00:06, Steven Rostedt wrote:
> I'm curious as to where the gfp_t to unsigned long is happening in the
> macros?

original ___GFP_* flags are usual defines

/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA              0x01u
#define ___GFP_HIGHMEM          0x02u
#define ___GFP_DMA32            0x04u

... but __GFP_* flags used elsewhere are declared as 'forced to gfp_t'

#define __GFP_DMA       ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM   ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32     ((__force gfp_t)___GFP_DMA32)
...
#define GFP_DMA         __GFP_DMA
#define GFP_DMA32       __GFP_DMA32

... and when  __def_gfpflag_names() traslates them to unsigned long

       {(unsigned long)GFP_DMA,                "GFP_DMA"},             \
       {(unsigned long)__GFP_HIGHMEM,          "__GFP_HIGHMEM"},       \
       {(unsigned long)GFP_DMA32,              "GFP_DMA32"},           \

... it leads to sparse warnings bacuse type gfp_t was declared as 'bitwise'
From mas sparse

       -Wbitwise
              Warn about unsupported operations or type mismatches with
              restricted integer types.

               Sparse supports an extended attribute,
              __attribute__((bitwise)), which creates a new restricted
              integer type from a base integer type, distinct from the
              base integer type and from any other restricted integer
              type not declared in the same declaration or typedef.

             __bitwise is for *unique types* that cannot be mixed with
              other types, and that you'd never want to just use as a
              random integer (the integer 0 is special, though, and gets
              silently accepted iirc - it's kind of like "NULL" for
              pointers). So "gfp_t" or the "safe endianness" types would
              be __bitwise: you can only operate on them by doing
              specific operations that know about *that* particular
              type.

              Sparse issues these warnings by default.

Thank you,
	Vasily Averin

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] percpu: improve percpu_alloc_percpu event trace
  2022-05-10  4:22       ` Vasily Averin
@ 2022-05-10 14:16         ` Steven Rostedt
  0 siblings, 0 replies; 22+ messages in thread
From: Steven Rostedt @ 2022-05-10 14:16 UTC (permalink / raw)
  To: Vasily Averin
  Cc: kernel test robot, Ingo Molnar, kbuild-all, Shakeel Butt, kernel,
	linux-kernel, Roman Gushchin, Vlastimil Babka, Michal Hocko,
	cgroups, Andrew Morton, Linux Memory Management List,
	Dennis Zhou, Tejun Heo, Christoph Lameter

On Tue, 10 May 2022 07:22:02 +0300
Vasily Averin <vvs@openvz.org> wrote:

> ... and when  __def_gfpflag_names() traslates them to unsigned long
> 
>        {(unsigned long)GFP_DMA,                "GFP_DMA"},             \
>        {(unsigned long)__GFP_HIGHMEM,          "__GFP_HIGHMEM"},       \
>        {(unsigned long)GFP_DMA32,              "GFP_DMA32"},           \
> 
> ... it leads to sparse warnings bacuse type gfp_t was declared as 'bitwise'

Ah' it's the printing of the flag bits. Got it.

-- Steve

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2] percpu: improve percpu_alloc_percpu event trace
  2022-05-06 19:29   ` [PATCH v2] " Vasily Averin
@ 2022-05-11  2:33     ` Roman Gushchin
  2022-05-11  5:11       ` Vasily Averin
  2022-05-15 22:06       ` Steven Rostedt
  0 siblings, 2 replies; 22+ messages in thread
From: Roman Gushchin @ 2022-05-11  2:33 UTC (permalink / raw)
  To: Vasily Averin
  Cc: Shakeel Butt, Steven Rostedt, Ingo Molnar, kernel, linux-kernel,
	Vlastimil Babka, Michal Hocko, cgroups, Andrew Morton,
	Dennis Zhou, Tejun Heo, Christoph Lameter, linux-mm

On Fri, May 06, 2022 at 10:29:25PM +0300, Vasily Averin wrote:
> Added call_site, bytes_alloc and gfp_flags fields to the output
> of the percpu_alloc_percpu ftrace event:
> 
> mkdir-4393  [001]   169.334788: percpu_alloc_percpu:
>  call_site=mem_cgroup_css_alloc+0xa6 reserved=0 is_atomic=0 size=2408 align=8
>   base_addr=0xffffc7117fc00000 off=402176 ptr=0x3dc867a62300 bytes_alloc=14448
>    gfp_flags=GFP_KERNEL_ACCOUNT
> 
> This is required to track memcg-accounted percpu allocations.
> 
> Signed-off-by: Vasily Averin <vvs@openvz.org>

Acked-by: Roman Gushchin <roman.gushchin@linux.dev>

LGTM, thanks Vasily!

One minor thing below.

> ---
> v2: added call_site, improved patch description
> ---
>  include/trace/events/percpu.h | 23 +++++++++++++++++------
>  mm/percpu-internal.h          |  8 ++++----
>  mm/percpu.c                   |  5 +++--
>  3 files changed, 24 insertions(+), 12 deletions(-)
> 
> diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h
> index df112a64f6c9..e989cefc0def 100644
> --- a/include/trace/events/percpu.h
> +++ b/include/trace/events/percpu.h
> @@ -6,15 +6,20 @@
>  #define _TRACE_PERCPU_H
>  
>  #include <linux/tracepoint.h>
> +#include <trace/events/mmflags.h>
>  
>  TRACE_EVENT(percpu_alloc_percpu,
>  
> -	TP_PROTO(bool reserved, bool is_atomic, size_t size,
> -		 size_t align, void *base_addr, int off, void __percpu *ptr),
> +	TP_PROTO(unsigned long call_site,
> +		 bool reserved, bool is_atomic, size_t size,
> +		 size_t align, void *base_addr, int off,
> +		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),

Don't we want to preserve the order and add the call_site at the end?
Trace events are not ABI, but if we don't have a strong reason to break it,
I'd preserve the old order.

Thanks!

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2] percpu: improve percpu_alloc_percpu event trace
  2022-05-11  2:33     ` Roman Gushchin
@ 2022-05-11  5:11       ` Vasily Averin
  2022-05-11 17:30         ` Roman Gushchin
  2022-05-15 22:06       ` Steven Rostedt
  1 sibling, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-11  5:11 UTC (permalink / raw)
  To: Roman Gushchin
  Cc: Shakeel Butt, Steven Rostedt, Ingo Molnar, kernel, linux-kernel,
	Vlastimil Babka, Michal Hocko, cgroups, Andrew Morton,
	Dennis Zhou, Tejun Heo, Christoph Lameter, linux-mm

On 5/11/22 05:33, Roman Gushchin wrote:
> On Fri, May 06, 2022 at 10:29:25PM +0300, Vasily Averin wrote:
>>  TRACE_EVENT(percpu_alloc_percpu,
>>  
>> -	TP_PROTO(bool reserved, bool is_atomic, size_t size,
>> -		 size_t align, void *base_addr, int off, void __percpu *ptr),
>> +	TP_PROTO(unsigned long call_site,
>> +		 bool reserved, bool is_atomic, size_t size,
>> +		 size_t align, void *base_addr, int off,
>> +		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
> 
> Don't we want to preserve the order and add the call_site at the end?
> Trace events are not ABI, but if we don't have a strong reason to break it,
> I'd preserve the old order.

I checked recent trace patches and found that order changes is acceptable.

commit 8c39b8bc82aafcc8dd378bd79c76fac8e8a89c8d
Author: David Howells <dhowells@redhat.com>
Date:   Fri Jan 14 11:44:54 2022 +0000

    cachefiles: Make some tracepoint adjustments

-           TP_printk("o=%08x i=%lx e=%d",
-                     __entry->obj, __entry->ino, __entry->error)
+           TP_printk("o=%08x dB=%lx B=%lx e=%d",
+                     __entry->obj, __entry->dino, __entry->ino, __entry->error)

On the other hand I'm agree to keep old order by default.
that's why I added bytes_alloc and gfp_flags to end of output.
However I think call_site is an exception. In all cases found, 
call_site is output first.
For me personally it simplified output parsing.

So I would like to know Steven's position on this question.

Thank you,
	Vasily Averin

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH mm v2] tracing: incorrect gfp_t conversion
  2022-05-08 20:51             ` Joe Perches
@ 2022-05-11  7:20               ` Vasily Averin
  2022-05-15 22:09                 ` Steven Rostedt
  0 siblings, 1 reply; 22+ messages in thread
From: Vasily Averin @ 2022-05-11  7:20 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar, Andrew Morton, Matthew Wilcox, Joe Perches
  Cc: kernel, linux-kernel, linux-mm

Fixes the following sparse warnings:

include/trace/events/*: sparse: cast to restricted gfp_t
include/trace/events/*: sparse: restricted gfp_t degrades to integer

gfp_t type is bitwise and requires __force attributes for any casts.

Signed-off-by:	Vasily Averin <vvs@openvz.org>
---
v2: 1) re-based to 5.18-rc6
    2) re-defined __def_gfpflag_names array according to
	akpm@, willy@ and Joe Perches recommendations
---
 include/linux/gfp.h               |  2 +-
 include/trace/events/btrfs.h      |  4 +-
 include/trace/events/compaction.h |  4 +-
 include/trace/events/kmem.h       | 12 ++---
 include/trace/events/mmflags.h    | 84 ++++++++++++++++---------------
 include/trace/events/vmscan.h     | 16 +++---
 mm/compaction.c                   |  2 +-
 7 files changed, 63 insertions(+), 61 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3e3d36fc2109..ac6d750d512c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -367,7 +367,7 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
 		return MIGRATE_UNMOVABLE;
 
 	/* Group based on mobility */
-	return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+	return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
 }
 #undef GFP_MOVABLE_MASK
 #undef GFP_MOVABLE_SHIFT
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index f068ff30d654..ed92c80331ea 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1344,13 +1344,13 @@ TRACE_EVENT(alloc_extent_state,
 
 	TP_STRUCT__entry(
 		__field(const struct extent_state *, state)
-		__field(gfp_t, mask)
+		__field(unsigned long, mask)
 		__field(const void*, ip)
 	),
 
 	TP_fast_assign(
 		__entry->state	= state,
-		__entry->mask	= mask,
+		__entry->mask	= (__force unsigned long)mask,
 		__entry->ip	= (const void *)IP
 	),
 
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c6d5d70dc7a5..3313eb83c117 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -162,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
 
 	TP_STRUCT__entry(
 		__field(int, order)
-		__field(gfp_t, gfp_mask)
+		__field(unsigned long, gfp_mask)
 		__field(int, prio)
 	),
 
 	TP_fast_assign(
 		__entry->order = order;
-		__entry->gfp_mask = gfp_mask;
+		__entry->gfp_mask = (__force unsigned long)gfp_mask;
 		__entry->prio = prio;
 	),
 
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ddc8c944f417..71c141804222 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
 		__field(	const void *,	ptr		)
 		__field(	size_t,		bytes_req	)
 		__field(	size_t,		bytes_alloc	)
-		__field(	gfp_t,		gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 	),
 
 	TP_fast_assign(
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
 		__entry->ptr		= ptr;
 		__entry->bytes_req	= bytes_req;
 		__entry->bytes_alloc	= bytes_alloc;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
@@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
 		__field(	const void *,	ptr		)
 		__field(	size_t,		bytes_req	)
 		__field(	size_t,		bytes_alloc	)
-		__field(	gfp_t,		gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 		__field(	int,		node		)
 	),
 
@@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
 		__entry->ptr		= ptr;
 		__entry->bytes_req	= bytes_req;
 		__entry->bytes_alloc	= bytes_alloc;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 		__entry->node		= node;
 	),
 
@@ -208,14 +208,14 @@ TRACE_EVENT(mm_page_alloc,
 	TP_STRUCT__entry(
 		__field(	unsigned long,	pfn		)
 		__field(	unsigned int,	order		)
-		__field(	gfp_t,		gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 		__field(	int,		migratetype	)
 	),
 
 	TP_fast_assign(
 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
 		__entry->order		= order;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 		__entry->migratetype	= migratetype;
 	),
 
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 6532119a6bf1..bbef42058a7e 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -13,49 +13,51 @@
  * Thus most bits set go first.
  */
 
-#define __def_gfpflag_names						\
-	{(unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
-	{(unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
-	{(unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
-	{(unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
-	{(unsigned long)GFP_USER,		"GFP_USER"},		\
-	{(unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
-	{(unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
-	{(unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
-	{(unsigned long)GFP_ATOMIC,		"GFP_ATOMIC"},		\
-	{(unsigned long)GFP_NOIO,		"GFP_NOIO"},		\
-	{(unsigned long)GFP_NOWAIT,		"GFP_NOWAIT"},		\
-	{(unsigned long)GFP_DMA,		"GFP_DMA"},		\
-	{(unsigned long)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
-	{(unsigned long)GFP_DMA32,		"GFP_DMA32"},		\
-	{(unsigned long)__GFP_HIGH,		"__GFP_HIGH"},		\
-	{(unsigned long)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
-	{(unsigned long)__GFP_IO,		"__GFP_IO"},		\
-	{(unsigned long)__GFP_FS,		"__GFP_FS"},		\
-	{(unsigned long)__GFP_NOWARN,		"__GFP_NOWARN"},	\
-	{(unsigned long)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
-	{(unsigned long)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
-	{(unsigned long)__GFP_NORETRY,		"__GFP_NORETRY"},	\
-	{(unsigned long)__GFP_COMP,		"__GFP_COMP"},		\
-	{(unsigned long)__GFP_ZERO,		"__GFP_ZERO"},		\
-	{(unsigned long)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
-	{(unsigned long)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
-	{(unsigned long)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
-	{(unsigned long)__GFP_THISNODE,		"__GFP_THISNODE"},	\
-	{(unsigned long)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
-	{(unsigned long)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
-	{(unsigned long)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
-	{(unsigned long)__GFP_WRITE,		"__GFP_WRITE"},		\
-	{(unsigned long)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
-	{(unsigned long)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
-	{(unsigned long)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"},\
-	{(unsigned long)__GFP_ZEROTAGS,		"__GFP_ZEROTAGS"}	\
+#define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
+
+#define __def_gfpflag_names				\
+	gfpflag_string(GFP_TRANSHUGE),			\
+	gfpflag_string(GFP_TRANSHUGE_LIGHT),		\
+	gfpflag_string(GFP_HIGHUSER_MOVABLE),		\
+	gfpflag_string(GFP_HIGHUSER),			\
+	gfpflag_string(GFP_USER),			\
+	gfpflag_string(GFP_KERNEL_ACCOUNT),		\
+	gfpflag_string(GFP_KERNEL),			\
+	gfpflag_string(GFP_NOFS),			\
+	gfpflag_string(GFP_ATOMIC),			\
+	gfpflag_string(GFP_NOIO),			\
+	gfpflag_string(GFP_NOWAIT),			\
+	gfpflag_string(GFP_DMA),			\
+	gfpflag_string(__GFP_HIGHMEM),			\
+	gfpflag_string(GFP_DMA32),			\
+	gfpflag_string(__GFP_HIGH),			\
+	gfpflag_string(__GFP_ATOMIC),			\
+	gfpflag_string(__GFP_IO),			\
+	gfpflag_string(__GFP_FS),			\
+	gfpflag_string(__GFP_NOWARN),			\
+	gfpflag_string(__GFP_RETRY_MAYFAIL),		\
+	gfpflag_string(__GFP_NOFAIL),			\
+	gfpflag_string(__GFP_NORETRY),			\
+	gfpflag_string(__GFP_COMP),			\
+	gfpflag_string(__GFP_ZERO),			\
+	gfpflag_string(__GFP_NOMEMALLOC),		\
+	gfpflag_string(__GFP_MEMALLOC),			\
+	gfpflag_string(__GFP_HARDWALL),			\
+	gfpflag_string(__GFP_THISNODE),			\
+	gfpflag_string(__GFP_RECLAIMABLE),		\
+	gfpflag_string(__GFP_MOVABLE),			\
+	gfpflag_string(__GFP_ACCOUNT),			\
+	gfpflag_string(__GFP_WRITE),			\
+	gfpflag_string(__GFP_RECLAIM),			\
+	gfpflag_string(__GFP_DIRECT_RECLAIM),		\
+	gfpflag_string(__GFP_KSWAPD_RECLAIM),		\
+	gfpflag_string(__GFP_ZEROTAGS)
 
 #ifdef CONFIG_KASAN_HW_TAGS
-#define __def_gfpflag_names_kasan ,					       \
-	{(unsigned long)__GFP_SKIP_ZERO,	   "__GFP_SKIP_ZERO"},	       \
-	{(unsigned long)__GFP_SKIP_KASAN_POISON,   "__GFP_SKIP_KASAN_POISON"}, \
-	{(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
+#define __def_gfpflag_names_kasan ,			\
+	gfpflag_string(__GFP_SKIP_ZERO),		\
+	gfpflag_string(__GFP_SKIP_KASAN_POISON),	\
+	gfpflag_string(__GFP_SKIP_KASAN_UNPOISON)
 #else
 #define __def_gfpflag_names_kasan
 #endif
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index de136dbd623a..408c86244d64 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -96,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
 		__field(	int,	nid		)
 		__field(	int,	zid		)
 		__field(	int,	order		)
-		__field(	gfp_t,	gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 	),
 
 	TP_fast_assign(
 		__entry->nid		= nid;
 		__entry->zid		= zid;
 		__entry->order		= order;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -120,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
 
 	TP_STRUCT__entry(
 		__field(	int,	order		)
-		__field(	gfp_t,	gfp_flags	)
+		__field(	unsigned long,	gfp_flags	)
 	),
 
 	TP_fast_assign(
 		__entry->order		= order;
-		__entry->gfp_flags	= gfp_flags;
+		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("order=%d gfp_flags=%s",
@@ -210,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
 		__field(void *, shrink)
 		__field(int, nid)
 		__field(long, nr_objects_to_shrink)
-		__field(gfp_t, gfp_flags)
+		__field(unsigned long, gfp_flags)
 		__field(unsigned long, cache_items)
 		__field(unsigned long long, delta)
 		__field(unsigned long, total_scan)
@@ -222,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start,
 		__entry->shrink = shr->scan_objects;
 		__entry->nid = sc->nid;
 		__entry->nr_objects_to_shrink = nr_objects_to_shrink;
-		__entry->gfp_flags = sc->gfp_mask;
+		__entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
 		__entry->cache_items = cache_items;
 		__entry->delta = delta;
 		__entry->total_scan = total_scan;
@@ -446,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
 	TP_STRUCT__entry(
 		__field(int, nid)
 		__field(int, order)
-		__field(gfp_t, gfp_flags)
+		__field(unsigned long, gfp_flags)
 	),
 
 	TP_fast_assign(
 		__entry->nid = nid;
 		__entry->order = order;
-		__entry->gfp_flags = gfp_flags;
+		__entry->gfp_flags = (__force unsigned long)gfp_flags;
 	),
 
 	TP_printk("nid=%d order=%d gfp_flags=%s",
diff --git a/mm/compaction.c b/mm/compaction.c
index fe915db6149b..bcdf167ab286 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2592,7 +2592,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
 		unsigned int alloc_flags, const struct alloc_context *ac,
 		enum compact_priority prio, struct page **capture)
 {
-	int may_perform_io = gfp_mask & __GFP_IO;
+	int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
 	struct zoneref *z;
 	struct zone *zone;
 	enum compact_result rc = COMPACT_SKIPPED;
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH v2] percpu: improve percpu_alloc_percpu event trace
  2022-05-11  5:11       ` Vasily Averin
@ 2022-05-11 17:30         ` Roman Gushchin
  0 siblings, 0 replies; 22+ messages in thread
From: Roman Gushchin @ 2022-05-11 17:30 UTC (permalink / raw)
  To: Vasily Averin
  Cc: Shakeel Butt, Steven Rostedt, Ingo Molnar, kernel, linux-kernel,
	Vlastimil Babka, Michal Hocko, cgroups, Andrew Morton,
	Dennis Zhou, Tejun Heo, Christoph Lameter, linux-mm

On Wed, May 11, 2022 at 08:11:54AM +0300, Vasily Averin wrote:
> On 5/11/22 05:33, Roman Gushchin wrote:
> > On Fri, May 06, 2022 at 10:29:25PM +0300, Vasily Averin wrote:
> >>  TRACE_EVENT(percpu_alloc_percpu,
> >>  
> >> -	TP_PROTO(bool reserved, bool is_atomic, size_t size,
> >> -		 size_t align, void *base_addr, int off, void __percpu *ptr),
> >> +	TP_PROTO(unsigned long call_site,
> >> +		 bool reserved, bool is_atomic, size_t size,
> >> +		 size_t align, void *base_addr, int off,
> >> +		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
> > 
> > Don't we want to preserve the order and add the call_site at the end?
> > Trace events are not ABI, but if we don't have a strong reason to break it,
> > I'd preserve the old order.
> 
> I checked recent trace patches and found that order changes is acceptable.
> 
> commit 8c39b8bc82aafcc8dd378bd79c76fac8e8a89c8d
> Author: David Howells <dhowells@redhat.com>
> Date:   Fri Jan 14 11:44:54 2022 +0000
> 
>     cachefiles: Make some tracepoint adjustments
> 
> -           TP_printk("o=%08x i=%lx e=%d",
> -                     __entry->obj, __entry->ino, __entry->error)
> +           TP_printk("o=%08x dB=%lx B=%lx e=%d",
> +                     __entry->obj, __entry->dino, __entry->ino, __entry->error)
> 
> On the other hand I'm agree to keep old order by default.
> that's why I added bytes_alloc and gfp_flags to end of output.
> However I think call_site is an exception. In all cases found, 
> call_site is output first.
> For me personally it simplified output parsing.
> 
> So I would like to know Steven's position on this question.

Ok, not a strong opinion, I think both options are acceptable.

Thanks!

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2] percpu: improve percpu_alloc_percpu event trace
  2022-05-11  2:33     ` Roman Gushchin
  2022-05-11  5:11       ` Vasily Averin
@ 2022-05-15 22:06       ` Steven Rostedt
  1 sibling, 0 replies; 22+ messages in thread
From: Steven Rostedt @ 2022-05-15 22:06 UTC (permalink / raw)
  To: Roman Gushchin
  Cc: Vasily Averin, Shakeel Butt, Ingo Molnar, kernel, linux-kernel,
	Vlastimil Babka, Michal Hocko, cgroups, Andrew Morton,
	Dennis Zhou, Tejun Heo, Christoph Lameter, linux-mm,
	linux-trace-users

On Tue, 10 May 2022 19:33:17 -0700
Roman Gushchin <roman.gushchin@linux.dev> wrote:

>  --- a/include/trace/events/percpu.h
> > +++ b/include/trace/events/percpu.h
> > @@ -6,15 +6,20 @@
> >  #define _TRACE_PERCPU_H
> >  
> >  #include <linux/tracepoint.h>
> > +#include <trace/events/mmflags.h>
> >  
> >  TRACE_EVENT(percpu_alloc_percpu,
> >  
> > -	TP_PROTO(bool reserved, bool is_atomic, size_t size,
> > -		 size_t align, void *base_addr, int off, void __percpu *ptr),
> > +	TP_PROTO(unsigned long call_site,
> > +		 bool reserved, bool is_atomic, size_t size,
> > +		 size_t align, void *base_addr, int off,
> > +		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),  
> 
> Don't we want to preserve the order and add the call_site at the end?
> Trace events are not ABI, but if we don't have a strong reason to break it,
> I'd preserve the old order.

Ideally everyone should be using libtraceevent which will parse the format
file for the needed entries.

Nothing (important) should be parsing the raw ascii from the trace files.
It's slow and unreliable. The raw format (trace_pipe_raw) files, along with
libtraceevent will handle fining the fields you are looking for, even if
the fields move around (internally or externally).

Then there's trace-cruncher (a python script that uses libtracefs and
libtraceevent) that will work too.

  https://github.com/vmware/trace-cruncher

-- Steve

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm v2] tracing: incorrect gfp_t conversion
  2022-05-11  7:20               ` [PATCH mm v2] " Vasily Averin
@ 2022-05-15 22:09                 ` Steven Rostedt
  2022-05-16 20:55                   ` Andrew Morton
  0 siblings, 1 reply; 22+ messages in thread
From: Steven Rostedt @ 2022-05-15 22:09 UTC (permalink / raw)
  To: Vasily Averin
  Cc: Ingo Molnar, Andrew Morton, Matthew Wilcox, Joe Perches, kernel,
	linux-kernel, linux-mm

On Wed, 11 May 2022 10:20:39 +0300
Vasily Averin <vvs@openvz.org> wrote:

> Fixes the following sparse warnings:
> 
> include/trace/events/*: sparse: cast to restricted gfp_t
> include/trace/events/*: sparse: restricted gfp_t degrades to integer
> 
> gfp_t type is bitwise and requires __force attributes for any casts.
> 
> Signed-off-by:	Vasily Averin <vvs@openvz.org>
> ---
> v2: 1) re-based to 5.18-rc6
>     2) re-defined __def_gfpflag_names array according to
> 	akpm@, willy@ and Joe Perches recommendations
> ---
>  include/linux/gfp.h               |  2 +-
>  include/trace/events/btrfs.h      |  4 +-
>  include/trace/events/compaction.h |  4 +-
>  include/trace/events/kmem.h       | 12 ++---
>  include/trace/events/mmflags.h    | 84 ++++++++++++++++---------------
>  include/trace/events/vmscan.h     | 16 +++---
>  mm/compaction.c                   |  2 +-
>  7 files changed, 63 insertions(+), 61 deletions(-)

From the tracing POV:

Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>

-- Steve

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH mm v2] tracing: incorrect gfp_t conversion
  2022-05-15 22:09                 ` Steven Rostedt
@ 2022-05-16 20:55                   ` Andrew Morton
  0 siblings, 0 replies; 22+ messages in thread
From: Andrew Morton @ 2022-05-16 20:55 UTC (permalink / raw)
  To: Vasily Averin; +Cc: kernel, linux-kernel, linux-mm, Steven Rostedt

On Wed, 11 May 2022 10:20:39 +0300 Vasily Averin <vvs@openvz.org> wrote:

> Fixes the following sparse warnings:
> 
> include/trace/events/*: sparse: cast to restricted gfp_t
> include/trace/events/*: sparse: restricted gfp_t degrades to integer
> 
> gfp_t type is bitwise and requires __force attributes for any casts.

I already moved the previous version into mm-stable.  Would prefer not
to have to rebase it.

> v2: 1) re-based to 5.18-rc6
>     2) re-defined __def_gfpflag_names array according to
> 	akpm@, willy@ and Joe Perches recommendations

Please redo this against the mm-stable or mm-unstable branches against
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm, or against your
own previous version.

The new patch will simply switch to the gfpflag_string() trick, so it will
not be a "fix", so please let's position it as a "cleanup".


^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2022-05-16 21:13 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-06  4:46 [PATCH] percpu: improve percpu_alloc_percpu event trace Vasily Averin
2022-05-06  7:52 ` Vasily Averin
2022-05-06 19:29   ` [PATCH v2] " Vasily Averin
2022-05-11  2:33     ` Roman Gushchin
2022-05-11  5:11       ` Vasily Averin
2022-05-11 17:30         ` Roman Gushchin
2022-05-15 22:06       ` Steven Rostedt
2022-05-06 20:38 ` [PATCH] " kernel test robot
2022-05-07 14:51   ` Vasily Averin
2022-05-07 19:02     ` [PATCH mm] tracing: incorrect gfp_t conversion Vasily Averin
2022-05-07 19:37       ` Andrew Morton
2022-05-07 22:28         ` Vasily Averin
2022-05-07 22:48           ` Andrew Morton
2022-05-07 23:00             ` Andrew Morton
2022-05-08 20:37               ` Matthew Wilcox
2022-05-08 20:51             ` Joe Perches
2022-05-11  7:20               ` [PATCH mm v2] " Vasily Averin
2022-05-15 22:09                 ` Steven Rostedt
2022-05-16 20:55                   ` Andrew Morton
2022-05-09 21:06     ` [PATCH] percpu: improve percpu_alloc_percpu event trace Steven Rostedt
2022-05-10  4:22       ` Vasily Averin
2022-05-10 14:16         ` Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).