All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning
@ 2016-01-22  6:52 Wang Nan
  2016-01-22  7:06 ` kbuild test robot
  2016-01-22  8:00 ` Peter Zijlstra
  0 siblings, 2 replies; 4+ messages in thread
From: Wang Nan @ 2016-01-22  6:52 UTC (permalink / raw)
  To: peterz; +Cc: linux-kernel, Wang Nan, pi3orama

On s390 with CONFIG_WARN_DYNAMIC_STACK set, 'uses dynamic stack
allocation' warning is issued when defining 'struct perf_sample_data'
local variable.

This patch suppress this warning by allocating extra 255 bytes and
compute aligned pointer manually.

Reported-by: kbuild test robot <fengguang.wu@intel.com>
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: pi3orama@163.com
---

I'm not confident on this patch because I know nothing about s390,
and the extra 255 bytes seems too large. Please simply ignore this
patch if it is inappropriate.

KBuild robot say:

 kernel/events/ring_buffer.c: In function 'perf_output_begin':
 kernel/events/ring_buffer.c:251:1: warning: 'perf_output_begin' uses dynamic stack allocation
  }
  ^

---
 include/linux/perf_event.h  | 11 ++++++
 kernel/events/core.c        | 86 ++++++++++++++++++++++-----------------------
 kernel/events/ring_buffer.c |  6 ++--
 3 files changed, 57 insertions(+), 46 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f9828a4..263b6ef 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -797,6 +797,17 @@ struct perf_sample_data {
 	u64				stack_user_size;
 } ____cacheline_aligned;
 
+#ifdef CONFIG_WARN_DYNAMIC_STACK
+#define DEFINE_PERF_SAMPLE_DATA_ALIGNED(pn, an) \
+	u8 an[SMP_CACHE_BYTES - 1 + sizeof(struct perf_sample_data)]; \
+	struct perf_sample_data *pn = \
+		(struct perf_sample_data *)PTR_ALIGN(&an, SMP_CACHE_BYTES)
+#else
+#define DEFINE_PERF_SAMPLE_DATA_ALIGNED(pn, an) \
+	struct perf_sample_data an; \
+	struct perf_sample_data *pn = &an;
+#endif
+
 /* default value for data source */
 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
 		    PERF_MEM_S(LVL, NA)   |\
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9e9c84da..36abe60 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5580,7 +5580,7 @@ perf_event_read_event(struct perf_event *event,
 			struct task_struct *task)
 {
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	struct perf_read_event read_event = {
 		.header = {
 			.type = PERF_RECORD_READ,
@@ -5592,14 +5592,14 @@ perf_event_read_event(struct perf_event *event,
 	};
 	int ret;
 
-	perf_event_header__init_id(&read_event.header, &sample, event);
+	perf_event_header__init_id(&read_event.header, psample, event);
 	ret = perf_output_begin(&handle, event, read_event.header.size);
 	if (ret)
 		return;
 
 	perf_output_put(&handle, read_event);
 	perf_output_read(&handle, event);
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 }
@@ -5704,14 +5704,14 @@ static void perf_event_task_output(struct perf_event *event,
 {
 	struct perf_task_event *task_event = data;
 	struct perf_output_handle handle;
-	struct perf_sample_data	sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	struct task_struct *task = task_event->task;
 	int ret, size = task_event->event_id.header.size;
 
 	if (!perf_event_task_match(event))
 		return;
 
-	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
+	perf_event_header__init_id(&task_event->event_id.header, psample, event);
 
 	ret = perf_output_begin(&handle, event,
 				task_event->event_id.header.size);
@@ -5728,7 +5728,7 @@ static void perf_event_task_output(struct perf_event *event,
 
 	perf_output_put(&handle, task_event->event_id);
 
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 out:
@@ -5800,14 +5800,14 @@ static void perf_event_comm_output(struct perf_event *event,
 {
 	struct perf_comm_event *comm_event = data;
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	int size = comm_event->event_id.header.size;
 	int ret;
 
 	if (!perf_event_comm_match(event))
 		return;
 
-	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
+	perf_event_header__init_id(&comm_event->event_id.header, psample, event);
 	ret = perf_output_begin(&handle, event,
 				comm_event->event_id.header.size);
 
@@ -5821,7 +5821,7 @@ static void perf_event_comm_output(struct perf_event *event,
 	__output_copy(&handle, comm_event->comm,
 				   comm_event->comm_size);
 
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 out:
@@ -5913,7 +5913,7 @@ static void perf_event_mmap_output(struct perf_event *event,
 {
 	struct perf_mmap_event *mmap_event = data;
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	int size = mmap_event->event_id.header.size;
 	int ret;
 
@@ -5930,7 +5930,7 @@ static void perf_event_mmap_output(struct perf_event *event,
 		mmap_event->event_id.header.size += sizeof(mmap_event->flags);
 	}
 
-	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
+	perf_event_header__init_id(&mmap_event->event_id.header, psample, event);
 	ret = perf_output_begin(&handle, event,
 				mmap_event->event_id.header.size);
 	if (ret)
@@ -5953,7 +5953,7 @@ static void perf_event_mmap_output(struct perf_event *event,
 	__output_copy(&handle, mmap_event->file_name,
 				   mmap_event->file_size);
 
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 out:
@@ -6118,7 +6118,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
 			  unsigned long size, u64 flags)
 {
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	struct perf_aux_event {
 		struct perf_event_header	header;
 		u64				offset;
@@ -6136,14 +6136,14 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
 	};
 	int ret;
 
-	perf_event_header__init_id(&rec.header, &sample, event);
+	perf_event_header__init_id(&rec.header, psample, event);
 	ret = perf_output_begin(&handle, event, rec.header.size);
 
 	if (ret)
 		return;
 
 	perf_output_put(&handle, rec);
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 }
@@ -6154,7 +6154,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
 void perf_log_lost_samples(struct perf_event *event, u64 lost)
 {
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	int ret;
 
 	struct {
@@ -6169,7 +6169,7 @@ void perf_log_lost_samples(struct perf_event *event, u64 lost)
 		.lost		= lost,
 	};
 
-	perf_event_header__init_id(&lost_samples_event.header, &sample, event);
+	perf_event_header__init_id(&lost_samples_event.header, psample, event);
 
 	ret = perf_output_begin(&handle, event,
 				lost_samples_event.header.size);
@@ -6177,7 +6177,7 @@ void perf_log_lost_samples(struct perf_event *event, u64 lost)
 		return;
 
 	perf_output_put(&handle, lost_samples_event);
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 	perf_output_end(&handle);
 }
 
@@ -6205,7 +6205,7 @@ static void perf_event_switch_output(struct perf_event *event, void *data)
 {
 	struct perf_switch_event *se = data;
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	int ret;
 
 	if (!perf_event_switch_match(event))
@@ -6224,7 +6224,7 @@ static void perf_event_switch_output(struct perf_event *event, void *data)
 					perf_event_tid(event, se->next_prev);
 	}
 
-	perf_event_header__init_id(&se->event_id.header, &sample, event);
+	perf_event_header__init_id(&se->event_id.header, psample, event);
 
 	ret = perf_output_begin(&handle, event, se->event_id.header.size);
 	if (ret)
@@ -6235,7 +6235,7 @@ static void perf_event_switch_output(struct perf_event *event, void *data)
 	else
 		perf_output_put(&handle, se->event_id);
 
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 }
@@ -6273,7 +6273,7 @@ static void perf_event_switch(struct task_struct *task,
 static void perf_log_throttle(struct perf_event *event, int enable)
 {
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	int ret;
 
 	struct {
@@ -6295,7 +6295,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
 	if (enable)
 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
 
-	perf_event_header__init_id(&throttle_event.header, &sample, event);
+	perf_event_header__init_id(&throttle_event.header, psample, event);
 
 	ret = perf_output_begin(&handle, event,
 				throttle_event.header.size);
@@ -6303,14 +6303,14 @@ static void perf_log_throttle(struct perf_event *event, int enable)
 		return;
 
 	perf_output_put(&handle, throttle_event);
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 	perf_output_end(&handle);
 }
 
 static void perf_log_itrace_start(struct perf_event *event)
 {
 	struct perf_output_handle handle;
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	struct perf_aux_event {
 		struct perf_event_header        header;
 		u32				pid;
@@ -6331,14 +6331,14 @@ static void perf_log_itrace_start(struct perf_event *event)
 	rec.pid	= perf_event_pid(event, current);
 	rec.tid	= perf_event_tid(event, current);
 
-	perf_event_header__init_id(&rec.header, &sample, event);
+	perf_event_header__init_id(&rec.header, psample, event);
 	ret = perf_output_begin(&handle, event, rec.header.size);
 
 	if (ret)
 		return;
 
 	perf_output_put(&handle, rec);
-	perf_event__output_id_sample(event, &handle, &sample);
+	perf_event__output_id_sample(event, &handle, psample);
 
 	perf_output_end(&handle);
 }
@@ -6647,13 +6647,13 @@ inline void perf_swevent_put_recursion_context(int rctx)
 
 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
-	struct perf_sample_data data;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(pdata, temp);
 
 	if (WARN_ON_ONCE(!regs))
 		return;
 
-	perf_sample_data_init(&data, addr, 0);
-	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
+	perf_sample_data_init(pdata, addr, 0);
+	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, pdata, regs);
 }
 
 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
@@ -6905,7 +6905,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 		   struct pt_regs *regs, struct hlist_head *head, int rctx,
 		   struct task_struct *task)
 {
-	struct perf_sample_data data;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(pdata, temp);
 	struct perf_event *event;
 
 	struct perf_raw_record raw = {
@@ -6913,12 +6913,12 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 		.data = record,
 	};
 
-	perf_sample_data_init(&data, addr, 0);
-	data.raw = &raw;
+	perf_sample_data_init(pdata, addr, 0);
+	pdata->raw = &raw;
 
 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
-		if (perf_tp_event_match(event, &data, regs))
-			perf_swevent_event(event, count, &data, regs);
+		if (perf_tp_event_match(event, pdata, regs))
+			perf_swevent_event(event, count, pdata, regs);
 	}
 
 	/*
@@ -6939,8 +6939,8 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 				continue;
 			if (event->attr.config != entry->type)
 				continue;
-			if (perf_tp_event_match(event, &data, regs))
-				perf_swevent_event(event, count, &data, regs);
+			if (perf_tp_event_match(event, pdata, regs))
+				perf_swevent_event(event, count, pdata, regs);
 		}
 unlock:
 		rcu_read_unlock();
@@ -7087,13 +7087,13 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 void perf_bp_event(struct perf_event *bp, void *data)
 {
-	struct perf_sample_data sample;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
 	struct pt_regs *regs = data;
 
-	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
+	perf_sample_data_init(psample, bp->attr.bp_addr, 0);
 
 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
-		perf_swevent_event(bp, 1, &sample, regs);
+		perf_swevent_event(bp, 1, psample, regs);
 }
 #endif
 
@@ -7104,7 +7104,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 {
 	enum hrtimer_restart ret = HRTIMER_RESTART;
-	struct perf_sample_data data;
+	DEFINE_PERF_SAMPLE_DATA_ALIGNED(pdata, temp);
 	struct pt_regs *regs;
 	struct perf_event *event;
 	u64 period;
@@ -7116,12 +7116,12 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 
 	event->pmu->read(event);
 
-	perf_sample_data_init(&data, 0, event->hw.last_period);
+	perf_sample_data_init(pdata, 0, event->hw.last_period);
 	regs = get_irq_regs();
 
 	if (regs && !perf_exclude_event(event, regs)) {
 		if (!(event->attr.exclude_idle && is_idle_task(current)))
-			if (__perf_event_overflow(event, 1, &data, regs))
+			if (__perf_event_overflow(event, 1, pdata, regs))
 				ret = HRTIMER_NORESTART;
 	}
 
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 9f1a93f..2827bea 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -181,7 +181,7 @@ int perf_output_begin(struct perf_output_handle *handle,
 	handle->size = (1UL << page_shift) - offset;
 
 	if (unlikely(have_lost)) {
-		struct perf_sample_data sample_data;
+		DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample_data, temp);
 
 		lost_event.header.size = sizeof(lost_event);
 		lost_event.header.type = PERF_RECORD_LOST;
@@ -190,9 +190,9 @@ int perf_output_begin(struct perf_output_handle *handle,
 		lost_event.lost        = local_xchg(&rb->lost, 0);
 
 		perf_event_header__init_id(&lost_event.header,
-					   &sample_data, event);
+					   psample_data, event);
 		perf_output_put(handle, lost_event);
-		perf_event__output_id_sample(event, handle, &sample_data);
+		perf_event__output_id_sample(event, handle, psample_data);
 	}
 
 	return 0;
-- 
1.8.3.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning
  2016-01-22  6:52 [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning Wang Nan
@ 2016-01-22  7:06 ` kbuild test robot
  2016-01-22  8:00 ` Peter Zijlstra
  1 sibling, 0 replies; 4+ messages in thread
From: kbuild test robot @ 2016-01-22  7:06 UTC (permalink / raw)
  To: Wang Nan; +Cc: kbuild-all, peterz, linux-kernel, Wang Nan, pi3orama

[-- Attachment #1: Type: text/plain, Size: 5707 bytes --]

Hi Wang,

[auto build test WARNING on tip/perf/core]
[also build test WARNING on v4.4 next-20160121]
[if your patch is applied to the wrong git tree, please drop us a note to help improving the system]

url:    https://github.com/0day-ci/linux/commits/Wang-Nan/perf-core-Get-rid-of-uses-dynamic-stack-allocation-warning/20160122-145515
config: i386-tinyconfig (attached as .config)
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   kernel/events/core.c: In function 'perf_event_read_event':
>> kernel/events/core.c:5571:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     struct perf_read_event read_event = {
     ^
   kernel/events/core.c: In function 'perf_event_task_output':
   kernel/events/core.c:5695:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     struct task_struct *task = task_event->task;
     ^
   kernel/events/core.c: In function 'perf_event_comm_output':
   kernel/events/core.c:5791:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     int size = comm_event->event_id.header.size;
     ^
   kernel/events/core.c: In function 'perf_event_mmap_output':
   kernel/events/core.c:5904:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     int size = mmap_event->event_id.header.size;
     ^
   kernel/events/core.c: In function 'perf_event_aux_event':
   kernel/events/core.c:6109:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     struct perf_aux_event {
     ^
   kernel/events/core.c: In function 'perf_log_lost_samples':
   kernel/events/core.c:6145:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     int ret;
     ^
   kernel/events/core.c: In function 'perf_event_switch_output':
   kernel/events/core.c:6196:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     int ret;
     ^
   kernel/events/core.c: In function 'perf_log_throttle':
   kernel/events/core.c:6264:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     int ret;
     ^
   kernel/events/core.c: In function 'perf_log_itrace_start':
   kernel/events/core.c:6301:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     struct perf_aux_event {
     ^
   kernel/events/core.c: In function 'perf_bp_event':
   kernel/events/core.c:7078:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     struct pt_regs *regs = data;
     ^
   kernel/events/core.c: In function 'perf_swevent_hrtimer':
   kernel/events/core.c:7095:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
     struct pt_regs *regs;
     ^

vim +5571 kernel/events/core.c

cdd6c482 kernel/perf_event.c   Ingo Molnar              2009-09-21  5555   * read event_id
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5556   */
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5557  
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5558  struct perf_read_event {
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5559  	struct perf_event_header	header;
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5560  
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5561  	u32				pid;
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5562  	u32				tid;
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5563  };
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5564  
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5565  static void
cdd6c482 kernel/perf_event.c   Ingo Molnar              2009-09-21  5566  perf_event_read_event(struct perf_event *event,
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5567  			struct task_struct *task)
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5568  {
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5569  	struct perf_output_handle handle;
3ff786eb kernel/events/core.c  Wang Nan                 2016-01-22  5570  	DEFINE_PERF_SAMPLE_DATA_ALIGNED(psample, temp);
dfc65094 kernel/perf_counter.c Ingo Molnar              2009-09-21 @5571  	struct perf_read_event read_event = {
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5572  		.header = {
cdd6c482 kernel/perf_event.c   Ingo Molnar              2009-09-21  5573  			.type = PERF_RECORD_READ,
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5574  			.misc = 0,
c320c7b7 kernel/perf_event.c   Arnaldo Carvalho de Melo 2010-10-20  5575  			.size = sizeof(read_event) + event->read_size,
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5576  		},
cdd6c482 kernel/perf_event.c   Ingo Molnar              2009-09-21  5577  		.pid = perf_event_pid(event, task),
cdd6c482 kernel/perf_event.c   Ingo Molnar              2009-09-21  5578  		.tid = perf_event_tid(event, task),
38b200d6 kernel/perf_counter.c Peter Zijlstra           2009-06-23  5579  	};

:::::: The code at line 5571 was first introduced by commit
:::::: dfc65094d0313cc48969fa60bcf33d693aeb05a7 perf_counter: Rename 'event' to event_id/hw_event

:::::: TO: Ingo Molnar <mingo@elte.hu>
:::::: CC: Ingo Molnar <mingo@elte.hu>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 6098 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning
  2016-01-22  6:52 [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning Wang Nan
  2016-01-22  7:06 ` kbuild test robot
@ 2016-01-22  8:00 ` Peter Zijlstra
  2016-01-22  8:06   ` Wangnan (F)
  1 sibling, 1 reply; 4+ messages in thread
From: Peter Zijlstra @ 2016-01-22  8:00 UTC (permalink / raw)
  To: Wang Nan; +Cc: linux-kernel, pi3orama

On Fri, Jan 22, 2016 at 06:52:01AM +0000, Wang Nan wrote:
> On s390 with CONFIG_WARN_DYNAMIC_STACK set, 'uses dynamic stack
> allocation' warning is issued when defining 'struct perf_sample_data'
> local variable.
> 
> This patch suppress this warning by allocating extra 255 bytes and
> compute aligned pointer manually.

I've seen the warning and always wondered wtf it was about. This
Changelog doesn't actually explain what the problem is, so I cannot
judge the solution.

That said, 255 extra bytes does sound excessive just to avoid a warn.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning
  2016-01-22  8:00 ` Peter Zijlstra
@ 2016-01-22  8:06   ` Wangnan (F)
  0 siblings, 0 replies; 4+ messages in thread
From: Wangnan (F) @ 2016-01-22  8:06 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, pi3orama



On 2016/1/22 16:00, Peter Zijlstra wrote:
> On Fri, Jan 22, 2016 at 06:52:01AM +0000, Wang Nan wrote:
>> On s390 with CONFIG_WARN_DYNAMIC_STACK set, 'uses dynamic stack
>> allocation' warning is issued when defining 'struct perf_sample_data'
>> local variable.
>>
>> This patch suppress this warning by allocating extra 255 bytes and
>> compute aligned pointer manually.
> I've seen the warning and always wondered wtf it was about. This
> Changelog doesn't actually explain what the problem is, so I cannot
> judge the solution.
>
> That said, 255 extra bytes does sound excessive just to avoid a warn.

OK. Then I happly ignore it.

I also don't know the exact reason.  I guess it related
to the ____cacheline_aligned decorator after 'struct
perf_sample_data'. When allocating that structure on the
stack, kernel has to dynamically compute the stack size it
need to satisify the alignment constrain. My patch does
this work manually.

Thank you.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2016-01-22  8:07 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-22  6:52 [PATCH] perf core: Get rid of 'uses dynamic stack allocation' warning Wang Nan
2016-01-22  7:06 ` kbuild test robot
2016-01-22  8:00 ` Peter Zijlstra
2016-01-22  8:06   ` Wangnan (F)

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.