linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Masami Hiramatsu <mhiramat@kernel.org>
To: Jeff Xie <xiehuan09@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>,
	mingo@redhat.com, Tom Zanussi <zanussi@kernel.org>,
	linux-kernel@vger.kernel.org
Subject: Re: [RFC][PATCH v6 1/5] trace: Add trace any kernel object
Date: Tue, 21 Dec 2021 16:36:06 +0900	[thread overview]
Message-ID: <20211221163606.be53698d89ce08a272810ae1@kernel.org> (raw)
In-Reply-To: <CAEr6+EAn3+vWvp46mheO=MTetLyHXy-GDENtN8O6y+5T+Y-N7w@mail.gmail.com>

Hi Jeff,

On Sat, 18 Dec 2021 00:32:57 +0800
Jeff Xie <xiehuan09@gmail.com> wrote:

> > > +struct object_instance {
> > > +     void *object;
> > > +     struct freelist_node free_list;
> > > +     struct list_head active_list;
> > > +};
> > > +
> > > +struct obj_pool {
> > > +     struct freelist_head free_list;
> > > +     struct list_head active_list;
> > > +};
> > > +static struct obj_pool *obj_pool;
> > > +
> > > +static bool object_exist(void *obj)
> > > +{
> > > +     struct object_instance *inst;
> > > +     bool ret = false;
> > > +
> > > +     list_for_each_entry_rcu(inst, &obj_pool->active_list, active_list) {
> > > +             if (inst->object == obj) {
> > > +                     ret = true;
> > > +                     goto out;
> > > +             }
> > > +     }
> > > +out:
> > > +     return ret;
> > > +}
> > > +
> > > +static bool object_empty(void)
> > > +{
> > > +     return list_empty(&obj_pool->active_list);
> > > +}
> > > +
> > > +static void set_trace_object(void *obj)
> > > +{
> > > +     struct freelist_node *fn;
> > > +     struct object_instance *ins;
> > > +     unsigned long flags;
> > > +
> > > +     if (in_nmi())
> > > +             return;
> > > +
> > > +     if (!obj)
> > > +             return;
> > > +
> > > +     if (object_exist(obj))
> > > +             return;
> > > +
> > > +     fn = freelist_try_get(&obj_pool->free_list);
> > > +     if (!fn) {
> > > +             trace_printk("object_pool is full, can't trace object:0x%px\n", obj);
> > > +             return;
> > > +     }
> > > +
> > > +     ins = container_of(fn, struct object_instance, free_list);
> > > +     ins->object = obj;
> > > +
> > > +     raw_spin_lock_irqsave(&object_spin_lock, flags);
> > > +     list_add_rcu(&ins->active_list, &obj_pool->active_list);
> > > +     raw_spin_unlock_irqrestore(&object_spin_lock, flags);
> >
> > Please add a comment that why this spinlock is needed here and why
> > other operation doesn't need it.
> 
> (Only this place has write operations, and it cannot be concurrent)
> I agree, I will add it.

BTW, I have another better solution for object pool. If the
object pool size is fixed (of course to avoid performance overhead,
it should be small enough) and it can not avoid using spinlock, 
it is better to use fixed-size array. That makes the implementation
much simpler.

static struct object_instance {
	void *obj;	/* trace object */
	// add offset in the next patch	
} traced_obj[MAX_TRACE_OBJECT];

static atomic_t num_traced_obj;
static DEFINE_RAW_SPINLOCK(trace_obj_lock);

static void set_trace_object(void *obj)
{
	...

	raw_spin_lock_irqsave(&trace_obj_lock, flags);
	if (num_traced_obj == MAX_TRACED_OBJECT)
		goto out;

	traced_obj[num_traced_obj].obj = obj;
	smp_wmb();	// make sure the num_traced_obj update always appears after trace_obj update. 
	num_traced_obj++;
out:
	raw_spin_unlock_irqrestore(&trace_obj_lock, flags);
}

static bool object_exist(void *obj)
{
	...
	max = num_traced_obj;
	smp_rmb();	// then the num_traced_obj will cap the max.
	for (i = 0; i < max; i++) {
		if (traced_obj[i].obj == obj)
			return true;
	}
	return false;
}

static inline void free_object_pool(void)
{
	num_traced_obj = 0;
	memset(traced_obj, 0, sizeof(traced_obj));
}

Sorry if I confuse you but I think you shouldn't take a time on those unneeded
complexity. :)


> > > +static void submit_trace_object(unsigned long ip, unsigned long parent_ip,
> > > +                              unsigned long object)
> > > +{
> > > +
> > > +     struct trace_buffer *buffer;
> > > +     struct ring_buffer_event *event;
> > > +     struct trace_object_entry *entry;
> > > +     int pc;
> > > +
> > > +     pc = preempt_count();
> > > +     event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
> > > +                     TRACE_OBJECT, sizeof(*entry), pc);
> > > +     if (!event)
> > > +             return;
> > > +     entry   = ring_buffer_event_data(event);
> > > +     entry->ip                       = ip;
> > > +     entry->parent_ip                = parent_ip;
> > > +     entry->object                   = object;
> > > +
> > > +     event_trigger_unlock_commit(&event_trace_file, buffer, event,
> > > +             entry, pc);
> > > +}
> > > +
> > > +static void
> > > +trace_object_events_call(unsigned long ip, unsigned long parent_ip,
> > > +             struct ftrace_ops *op, struct ftrace_regs *fregs)
> > > +{
> > > +     struct pt_regs *pt_regs = ftrace_get_regs(fregs);
> > > +     unsigned long obj;
> > > +     long disabled;
> > > +     int cpu, n;
> > > +
> > > +     preempt_disable_notrace();
> > > +
> > > +     cpu = raw_smp_processor_id();
> > > +     disabled = atomic_inc_return(&per_cpu(trace_object_event_disable, cpu));
> >
> > So what is the purpose of this check? (are there any issue if the same
> > cpu reenter here?)
> >
> 
> There may be an interrupt context that can preempt it. I am not yet
> sure whether the cpu reenter  will affect it.
> I will debug and test it. (Referred from function_test_events_call())

Maybe you can use ftrace_test_recursion_trylock(), as kprobe_ftrace_handler()
does.

Thank you,

-- 
Masami Hiramatsu <mhiramat@kernel.org>

  parent reply	other threads:[~2021-12-21  7:36 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-29 16:49 [RFC][PATCH v6 0/5] trace: Introduce objtrace trigger to trace the kernel object Jeff Xie
2021-11-29 16:49 ` [RFC][PATCH v6 1/5] trace: Add trace any " Jeff Xie
2021-12-17  4:51   ` Masami Hiramatsu
2021-12-17 16:32     ` Jeff Xie
2021-12-19  3:07       ` Masami Hiramatsu
2022-01-08  0:21         ` Steven Rostedt
2022-01-10  2:00           ` Jeff Xie
2021-12-21  7:36       ` Masami Hiramatsu [this message]
2021-12-21 10:29         ` Jeff Xie
2021-12-23 14:12           ` Jeff Xie
2021-11-29 16:49 ` [RFC][PATCH v6 2/5] trace/objtrace: get the value of the object Jeff Xie
2021-11-29 16:49 ` [RFC][PATCH v6 3/5] trace/README: Document objtrace trigger syntax Jeff Xie
2021-12-16 15:02   ` Masami Hiramatsu
2021-12-17  1:45     ` Jeff Xie
2021-11-29 16:49 ` [RFC][PATCH v6 4/5] trace/objtrace: Add testcases for objtrace Jeff Xie
2021-11-29 16:49 ` [RFC][PATCH v6 5/5] trace/objtrace: Add documentation " Jeff Xie
2021-12-10 16:55 ` [RFC][PATCH v6 0/5] trace: Introduce objtrace trigger to trace the kernel object Jeff Xie
2021-12-10 17:31   ` Steven Rostedt
2021-12-13 10:16   ` Masami Hiramatsu
2021-12-16 14:50 ` Masami Hiramatsu
2021-12-17  1:43   ` Jeff Xie

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211221163606.be53698d89ce08a272810ae1@kernel.org \
    --to=mhiramat@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=rostedt@goodmis.org \
    --cc=xiehuan09@gmail.com \
    --cc=zanussi@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).