linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages
@ 2020-10-14 15:16 Qiujun Huang
  2020-10-14 15:38 ` Steven Rostedt
  0 siblings, 1 reply; 5+ messages in thread
From: Qiujun Huang @ 2020-10-14 15:16 UTC (permalink / raw)
  To: rostedt, mingo, linux-kernel; +Cc: Qiujun Huang

It may be better to check each page is aligned by 4 bytes. The 2
least significant bits of the address will be used as flags.

Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
---
 kernel/trace/ring_buffer.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 93ef0ab6ea20..9dec7d58b177 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1420,7 +1420,8 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 	return 0;
 }
 
-static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+		long nr_pages, struct list_head *pages, int cpu)
 {
 	struct buffer_page *bpage, *tmp;
 	bool user_thread = current->mm != NULL;
@@ -1464,6 +1465,8 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
 		if (!bpage)
 			goto free_pages;
 
+		rb_check_bpage(cpu_buffer, bpage);
+
 		list_add(&bpage->list, pages);
 
 		page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
@@ -1498,7 +1501,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 
 	WARN_ON(!nr_pages);
 
-	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
+	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages, cpu_buffer->cpu))
 		return -ENOMEM;
 
 	/*
@@ -2007,7 +2010,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
 			 * allocated without receiving ENOMEM
 			 */
 			INIT_LIST_HEAD(&cpu_buffer->new_pages);
-			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
+			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
 						&cpu_buffer->new_pages, cpu)) {
 				/* not enough memory for new pages */
 				err = -ENOMEM;
@@ -2073,7 +2076,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
 
 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
 		if (cpu_buffer->nr_pages_to_update > 0 &&
-			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
+			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
 					    &cpu_buffer->new_pages, cpu_id)) {
 			err = -ENOMEM;
 			goto out_err;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages
  2020-10-14 15:16 [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages Qiujun Huang
@ 2020-10-14 15:38 ` Steven Rostedt
  2020-10-14 15:48   ` Qiujun Huang
  0 siblings, 1 reply; 5+ messages in thread
From: Steven Rostedt @ 2020-10-14 15:38 UTC (permalink / raw)
  To: Qiujun Huang; +Cc: mingo, linux-kernel

On Wed, 14 Oct 2020 23:16:14 +0800
Qiujun Huang <hqjagain@gmail.com> wrote:

> It may be better to check each page is aligned by 4 bytes. The 2
> least significant bits of the address will be used as flags.
> 
> Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
> ---
>  kernel/trace/ring_buffer.c | 11 +++++++----
>  1 file changed, 7 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 93ef0ab6ea20..9dec7d58b177 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -1420,7 +1420,8 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
>  	return 0;
>  }
>  
> -static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> +static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> +		long nr_pages, struct list_head *pages, int cpu)
>  {
>  	struct buffer_page *bpage, *tmp;
>  	bool user_thread = current->mm != NULL;
> @@ -1464,6 +1465,8 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
>  		if (!bpage)
>  			goto free_pages;
>  
> +		rb_check_bpage(cpu_buffer, bpage);
> +
>  

Why add it here, and not just add this check to the scan in
rb_check_pages()?

-- Steve

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages
  2020-10-14 15:38 ` Steven Rostedt
@ 2020-10-14 15:48   ` Qiujun Huang
  2020-10-14 16:11     ` Steven Rostedt
  0 siblings, 1 reply; 5+ messages in thread
From: Qiujun Huang @ 2020-10-14 15:48 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: mingo, LKML

On Wed, Oct 14, 2020 at 11:38 PM Steven Rostedt <rostedt@goodmis.org> wrote:
>
> On Wed, 14 Oct 2020 23:16:14 +0800
> Qiujun Huang <hqjagain@gmail.com> wrote:
>
> > It may be better to check each page is aligned by 4 bytes. The 2
> > least significant bits of the address will be used as flags.
> >
> > Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
> > ---
> >  kernel/trace/ring_buffer.c | 11 +++++++----
> >  1 file changed, 7 insertions(+), 4 deletions(-)
> >
> > diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> > index 93ef0ab6ea20..9dec7d58b177 100644
> > --- a/kernel/trace/ring_buffer.c
> > +++ b/kernel/trace/ring_buffer.c
> > @@ -1420,7 +1420,8 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> >       return 0;
> >  }
> >
> > -static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> > +static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> > +             long nr_pages, struct list_head *pages, int cpu)
> >  {
> >       struct buffer_page *bpage, *tmp;
> >       bool user_thread = current->mm != NULL;
> > @@ -1464,6 +1465,8 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> >               if (!bpage)
> >                       goto free_pages;
> >
> > +             rb_check_bpage(cpu_buffer, bpage);
> > +
> >
>
> Why add it here, and not just add this check to the scan in
> rb_check_pages()?

rb_head_page_deactivate() in rb_check_pages() will clear the 2 LSB first.

>
> -- Steve

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages
  2020-10-14 15:48   ` Qiujun Huang
@ 2020-10-14 16:11     ` Steven Rostedt
  2020-10-14 16:32       ` Qiujun Huang
  0 siblings, 1 reply; 5+ messages in thread
From: Steven Rostedt @ 2020-10-14 16:11 UTC (permalink / raw)
  To: Qiujun Huang; +Cc: mingo, LKML

On Wed, 14 Oct 2020 23:48:05 +0800
Qiujun Huang <hqjagain@gmail.com> wrote:

> On Wed, Oct 14, 2020 at 11:38 PM Steven Rostedt <rostedt@goodmis.org> wrote:
> >
> > On Wed, 14 Oct 2020 23:16:14 +0800
> > Qiujun Huang <hqjagain@gmail.com> wrote:
> >  
> > > It may be better to check each page is aligned by 4 bytes. The 2
> > > least significant bits of the address will be used as flags.
> > >
> > > Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
> > > ---
> > >  kernel/trace/ring_buffer.c | 11 +++++++----
> > >  1 file changed, 7 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> > > index 93ef0ab6ea20..9dec7d58b177 100644
> > > --- a/kernel/trace/ring_buffer.c
> > > +++ b/kernel/trace/ring_buffer.c
> > > @@ -1420,7 +1420,8 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> > >       return 0;
> > >  }
> > >
> > > -static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> > > +static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> > > +             long nr_pages, struct list_head *pages, int cpu)
> > >  {
> > >       struct buffer_page *bpage, *tmp;
> > >       bool user_thread = current->mm != NULL;
> > > @@ -1464,6 +1465,8 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> > >               if (!bpage)
> > >                       goto free_pages;
> > >
> > > +             rb_check_bpage(cpu_buffer, bpage);
> > > +
> > >  
> >
> > Why add it here, and not just add this check to the scan in
> > rb_check_pages()?  
> 
> rb_head_page_deactivate() in rb_check_pages() will clear the 2 LSB first.
> 

Well, you could just add another scan there, but if you want to do it this
way, then remove passing the int cpu to these functions, and use the
cpu_buffer->cpu, as keeping the cpu is just redundant.

Also, did you see an issue? This check is more of me being paranoid to
make sure we don't crash later. I've honestly never seen it trigger.

-- Steve

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages
  2020-10-14 16:11     ` Steven Rostedt
@ 2020-10-14 16:32       ` Qiujun Huang
  0 siblings, 0 replies; 5+ messages in thread
From: Qiujun Huang @ 2020-10-14 16:32 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: mingo, LKML

On Thu, Oct 15, 2020 at 12:11 AM Steven Rostedt <rostedt@goodmis.org> wrote:
>
> On Wed, 14 Oct 2020 23:48:05 +0800
> Qiujun Huang <hqjagain@gmail.com> wrote:
>
> > On Wed, Oct 14, 2020 at 11:38 PM Steven Rostedt <rostedt@goodmis.org> wrote:
> > >
> > > On Wed, 14 Oct 2020 23:16:14 +0800
> > > Qiujun Huang <hqjagain@gmail.com> wrote:
> > >
> > > > It may be better to check each page is aligned by 4 bytes. The 2
> > > > least significant bits of the address will be used as flags.
> > > >
> > > > Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
> > > > ---
> > > >  kernel/trace/ring_buffer.c | 11 +++++++----
> > > >  1 file changed, 7 insertions(+), 4 deletions(-)
> > > >
> > > > diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> > > > index 93ef0ab6ea20..9dec7d58b177 100644
> > > > --- a/kernel/trace/ring_buffer.c
> > > > +++ b/kernel/trace/ring_buffer.c
> > > > @@ -1420,7 +1420,8 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> > > >       return 0;
> > > >  }
> > > >
> > > > -static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> > > > +static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> > > > +             long nr_pages, struct list_head *pages, int cpu)
> > > >  {
> > > >       struct buffer_page *bpage, *tmp;
> > > >       bool user_thread = current->mm != NULL;
> > > > @@ -1464,6 +1465,8 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
> > > >               if (!bpage)
> > > >                       goto free_pages;
> > > >
> > > > +             rb_check_bpage(cpu_buffer, bpage);
> > > > +
> > > >
> > >
> > > Why add it here, and not just add this check to the scan in
> > > rb_check_pages()?
> >
> > rb_head_page_deactivate() in rb_check_pages() will clear the 2 LSB first.
> >
>
> Well, you could just add another scan there, but if you want to do it this
> way, then remove passing the int cpu to these functions, and use the
> cpu_buffer->cpu, as keeping the cpu is just redundant.
Get it.
>
> Also, did you see an issue? This check is more of me being paranoid to
No, I'm a little paranoid too following the code :-)
> make sure we don't crash later. I've honestly never seen it trigger.
>
> -- Steve

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-10-14 16:32 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-14 15:16 [PATCH] ring-buffer: Add rb_check_bpage in __rb_allocate_pages Qiujun Huang
2020-10-14 15:38 ` Steven Rostedt
2020-10-14 15:48   ` Qiujun Huang
2020-10-14 16:11     ` Steven Rostedt
2020-10-14 16:32       ` Qiujun Huang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).