From: Peter Zijlstra <peterz@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: walken@google.com, dave@stgolabs.net, mingo@kernel.org,
tglx@linutronix.de, oleg@redhat.com, irogers@google.com,
juri.lelli@redhat.com, vincent.guittot@linaro.org,
peterz@infradead.org
Subject: [RFC][PATCH 4/7] rbtree, perf: Use new rbtree helpers
Date: Wed, 29 Apr 2020 17:33:02 +0200 [thread overview]
Message-ID: <20200429153549.191480567@infradead.org> (raw)
In-Reply-To: 20200429153258.563269446@infradead.org
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/events/core.c | 178 +++++++++++++++++++++++----------------------------
1 file changed, 81 insertions(+), 97 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1581,44 +1581,84 @@ static void perf_event_groups_init(struc
* Implements complex key that first sorts by CPU and then by virtual index
* which provides ordering when rotating groups for the same CPU.
*/
-static bool
-perf_event_groups_less(struct perf_event *left, struct perf_event *right)
-{
- if (left->cpu < right->cpu)
- return true;
- if (left->cpu > right->cpu)
- return false;
+static __always_inline int
+perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup,
+ const u64 left_group_index, const struct perf_event *right)
+{
+ if (left_cpu < right->cpu)
+ return -1;
+ if (left_cpu > right->cpu)
+ return 1;
#ifdef CONFIG_CGROUP_PERF
- if (left->cgrp != right->cgrp) {
- if (!left->cgrp || !left->cgrp->css.cgroup) {
+ {
+ struct cgroup *right_cgroup = right->cgrp ? right->cgrp->css.cgroup : NULL;
+
+ if (left_cgroup != right_cgroup) {
+ if (!left_cgroup) {
/*
* Left has no cgroup but right does, no cgroups come
* first.
*/
- return true;
+ return -1;
}
- if (!right->cgrp || !right->cgrp->css.cgroup) {
+ if (!right->cgroup) {
/*
* Right has no cgroup but left does, no cgroups come
* first.
*/
- return false;
+ return 1;
}
/* Two dissimilar cgroups, order by id. */
- if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id)
- return true;
+ if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
+ return -1;
- return false;
+ return 1;
+ }
}
#endif
- if (left->group_index < right->group_index)
- return true;
- if (left->group_index > right->group_index)
- return false;
+ if (left_group_index < right->group_index)
+ return -1;
+ if (left_group_index > right->group_index)
+ return 1;
- return false;
+ return 0;
+}
+
+static inline struct cgroup *event_cgroup(struct perf_event *event)
+{
+ struct cgroup *cgroup = NULL;
+
+#ifdef CONFIG_CGROUP_PERF
+ if (event->cgrp)
+ cgroup = event->cgrp->css.cgroup;
+#endif
+
+ return cgroup;
+}
+
+#define __node_2_pe(node) \
+ rb_entry((node), struct perf_event, group_node)
+
+static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
+{
+ struct perf_event *e = __node_2_pe(a);
+ return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index,
+ __node_2_pe(b)) < 0;
+}
+
+struct __group_key {
+ int cpu;
+ struct cgroup *cgroup;
+};
+
+static inline int __group_cmp(const void *key, const struct rb_node *node)
+{
+ const struct __group_key *a = key;
+ const struct perf_event *b = __node_2_pe(node);
+
+ return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b);
}
/*
@@ -1630,27 +1670,9 @@ static void
perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event)
{
- struct perf_event *node_event;
- struct rb_node *parent;
- struct rb_node **node;
-
event->group_index = ++groups->index;
- node = &groups->tree.rb_node;
- parent = *node;
-
- while (*node) {
- parent = *node;
- node_event = container_of(*node, struct perf_event, group_node);
-
- if (perf_event_groups_less(event, node_event))
- node = &parent->rb_left;
- else
- node = &parent->rb_right;
- }
-
- rb_link_node(&event->group_node, parent, node);
- rb_insert_color(&event->group_node, &groups->tree);
+ rb_add(&groups->tree, &event->group_node, __group_less);
}
/*
@@ -1698,45 +1720,17 @@ static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
struct cgroup *cgrp)
{
- struct perf_event *node_event = NULL, *match = NULL;
- struct rb_node *node = groups->tree.rb_node;
-#ifdef CONFIG_CGROUP_PERF
- u64 node_cgrp_id, cgrp_id = 0;
-
- if (cgrp)
- cgrp_id = cgrp->kn->id;
-#endif
-
- while (node) {
- node_event = container_of(node, struct perf_event, group_node);
+ struct __group_key key = {
+ .cpu = cpu,
+ .cgroup = cgrp,
+ };
+ struct rb_node *node;
+
+ node = rb_find_first(&groups->tree, &key, __group_cmp);
+ if (node)
+ return __node_2_pe(node);
- if (cpu < node_event->cpu) {
- node = node->rb_left;
- continue;
- }
- if (cpu > node_event->cpu) {
- node = node->rb_right;
- continue;
- }
-#ifdef CONFIG_CGROUP_PERF
- node_cgrp_id = 0;
- if (node_event->cgrp && node_event->cgrp->css.cgroup)
- node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
-
- if (cgrp_id < node_cgrp_id) {
- node = node->rb_left;
- continue;
- }
- if (cgrp_id > node_cgrp_id) {
- node = node->rb_right;
- continue;
- }
-#endif
- match = node_event;
- node = node->rb_left;
- }
-
- return match;
+ return NULL;
}
/*
@@ -1745,27 +1739,17 @@ perf_event_groups_first(struct perf_even
static struct perf_event *
perf_event_groups_next(struct perf_event *event)
{
- struct perf_event *next;
-#ifdef CONFIG_CGROUP_PERF
- u64 curr_cgrp_id = 0;
- u64 next_cgrp_id = 0;
-#endif
+ struct __group_key key = {
+ .cpu = event->cpu,
+ .cgroup = event_cgroup(event),
+ };
+ struct rb_node *next;
+
+ next = rb_next_match(&event->group_node, &key, __group_cmp);
+ if (next)
+ return __node_2_pe(next);
- next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
- if (next == NULL || next->cpu != event->cpu)
- return NULL;
-
-#ifdef CONFIG_CGROUP_PERF
- if (event->cgrp && event->cgrp->css.cgroup)
- curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
-
- if (next->cgrp && next->cgrp->css.cgroup)
- next_cgrp_id = next->cgrp->css.cgroup->kn->id;
-
- if (curr_cgrp_id != next_cgrp_id)
- return NULL;
-#endif
- return next;
+ return NULL;
}
/*
next prev parent reply other threads:[~2020-04-29 15:37 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-29 15:32 [RFC][PATCH 0/7] Generic RB-tree helpers Peter Zijlstra
2020-04-29 15:32 ` [RFC][PATCH 1/7] rbtree: Add generic add and find helpers Peter Zijlstra
2020-04-30 1:04 ` Michel Lespinasse
2020-04-30 8:46 ` Peter Zijlstra
2020-04-30 9:26 ` Peter Zijlstra
2020-04-30 7:28 ` Juri Lelli
2020-04-30 7:51 ` Michel Lespinasse
2020-04-30 8:07 ` Juri Lelli
2020-04-30 8:27 ` Peter Zijlstra
2020-04-29 15:33 ` [RFC][PATCH 2/7] rbtree, sched/fair: Use rb_add_cached() Peter Zijlstra
2020-04-29 15:33 ` [RFC][PATCH 3/7] rbtree, sched/deadline: " Peter Zijlstra
2020-04-29 15:33 ` Peter Zijlstra [this message]
2020-04-29 15:33 ` [RFC][PATCH 5/7] rbtree, uprobes: Use rbtree helpers Peter Zijlstra
2020-04-29 15:33 ` [RFC][PATCH 6/7] rbtree, rtmutex: Use rb_add_cached() Peter Zijlstra
2020-04-29 15:33 ` [RFC][PATCH 7/7] rbtree, timerqueue: " Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200429153549.191480567@infradead.org \
--to=peterz@infradead.org \
--cc=dave@stgolabs.net \
--cc=irogers@google.com \
--cc=juri.lelli@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=oleg@redhat.com \
--cc=tglx@linutronix.de \
--cc=vincent.guittot@linaro.org \
--cc=walken@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).