From: Ingo Molnar <mingo@elte.hu>
To: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Thomas Gleixner <tglx@linutronix.de>,
Andrew Morton <akpm@linux-foundation.org>
Subject: [GIT PULL] scheduler fixes
Date: Wed, 20 Jul 2011 23:06:44 +0200 [thread overview]
Message-ID: <20110720210644.GA31617@elte.hu> (raw)
Linus,
Please pull the latest sched-urgent-for-linus git tree from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git sched-urgent-for-linus
Thanks,
Ingo
------------------>
Peter Zijlstra (3):
sched: Break out cpu_power from the sched_group structure
sched: Allow for overlapping sched_domain spans
sched: Avoid creating superfluous NUMA domains on non-NUMA systems
include/linux/sched.h | 14 +++-
kernel/sched.c | 189 ++++++++++++++++++++++++++++++++++++++---------
kernel/sched_fair.c | 46 ++++++------
kernel/sched_features.h | 2 +
4 files changed, 190 insertions(+), 61 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 496770a..bde99d5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -844,6 +844,7 @@ enum cpu_idle_type {
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
+#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
enum powersavings_balance_level {
POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)
return 0;
}
-struct sched_group {
- struct sched_group *next; /* Must be a circular list */
+struct sched_group_power {
atomic_t ref;
-
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
- unsigned int cpu_power, cpu_power_orig;
+ unsigned int power, power_orig;
+};
+
+struct sched_group {
+ struct sched_group *next; /* Must be a circular list */
+ atomic_t ref;
+
unsigned int group_weight;
+ struct sched_group_power *sgp;
/*
* The CPUs this group covers.
diff --git a/kernel/sched.c b/kernel/sched.c
index 3dc716f..14168c4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6557,7 +6557,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break;
}
- if (!group->cpu_power) {
+ if (!group->sgp->power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
@@ -6581,9 +6581,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
- if (group->cpu_power != SCHED_POWER_SCALE) {
+ if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
- group->cpu_power);
+ group->sgp->power);
}
group = group->next;
@@ -6774,11 +6774,39 @@ static struct root_domain *alloc_rootdomain(void)
return rd;
}
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+ struct sched_group *tmp, *first;
+
+ if (!sg)
+ return;
+
+ first = sg;
+ do {
+ tmp = sg->next;
+
+ if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+ kfree(sg->sgp);
+
+ kfree(sg);
+ sg = tmp;
+ } while (sg != first);
+}
+
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
- if (atomic_dec_and_test(&sd->groups->ref))
+
+ /*
+ * If its an overlapping domain it has private groups, iterate and
+ * nuke them all.
+ */
+ if (sd->flags & SD_OVERLAP) {
+ free_sched_groups(sd->groups, 1);
+ } else if (atomic_dec_and_test(&sd->groups->ref)) {
+ kfree(sd->groups->sgp);
kfree(sd->groups);
+ }
kfree(sd);
}
@@ -6945,6 +6973,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
+ struct sched_group_power **__percpu sgp;
};
struct s_data {
@@ -6964,15 +6993,73 @@ struct sched_domain_topology_level;
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+#define SDTL_OVERLAP 0x01
+
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
+ int flags;
struct sd_data data;
};
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+ struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+ const struct cpumask *span = sched_domain_span(sd);
+ struct cpumask *covered = sched_domains_tmpmask;
+ struct sd_data *sdd = sd->private;
+ struct sched_domain *child;
+ int i;
+
+ cpumask_clear(covered);
+
+ for_each_cpu(i, span) {
+ struct cpumask *sg_span;
+
+ if (cpumask_test_cpu(i, covered))
+ continue;
+
+ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, cpu_to_node(i));
+
+ if (!sg)
+ goto fail;
+
+ sg_span = sched_group_cpus(sg);
+
+ child = *per_cpu_ptr(sdd->sd, i);
+ if (child->child) {
+ child = child->child;
+ cpumask_copy(sg_span, sched_domain_span(child));
+ } else
+ cpumask_set_cpu(i, sg_span);
+
+ cpumask_or(covered, covered, sg_span);
+
+ sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+ atomic_inc(&sg->sgp->ref);
+
+ if (cpumask_test_cpu(cpu, sg_span))
+ groups = sg;
+
+ if (!first)
+ first = sg;
+ if (last)
+ last->next = sg;
+ last = sg;
+ last->next = first;
+ }
+ sd->groups = groups;
+
+ return 0;
+
+fail:
+ free_sched_groups(first, 0);
+
+ return -ENOMEM;
+}
+
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6981,24 +7068,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
if (child)
cpu = cpumask_first(sched_domain_span(child));
- if (sg)
+ if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
+ (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+ atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+ }
return cpu;
}
/*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
*/
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
@@ -7006,6 +7093,12 @@ build_sched_groups(struct sched_domain *sd)
struct cpumask *covered;
int i;
+ get_group(cpu, sdd, &sd->groups);
+ atomic_inc(&sd->groups->ref);
+
+ if (cpu != cpumask_first(sched_domain_span(sd)))
+ return 0;
+
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
@@ -7020,7 +7113,7 @@ build_sched_groups(struct sched_domain *sd)
continue;
cpumask_clear(sched_group_cpus(sg));
- sg->cpu_power = 0;
+ sg->sgp->power = 0;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
@@ -7037,6 +7130,8 @@ build_sched_groups(struct sched_domain *sd)
last = sg;
}
last->next = first;
+
+ return 0;
}
/*
@@ -7051,12 +7146,17 @@ build_sched_groups(struct sched_domain *sd)
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
- WARN_ON(!sd || !sd->groups);
+ struct sched_group *sg = sd->groups;
- if (cpu != group_first_cpu(sd->groups))
- return;
+ WARN_ON(!sd || !sg);
- sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+ do {
+ sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+ sg = sg->next;
+ } while (sg != sd->groups);
+
+ if (cpu != group_first_cpu(sg))
+ return;
update_group_power(sd, cpu);
}
@@ -7177,15 +7277,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
- struct sched_group *sg = sd->groups;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
- if (cpu == cpumask_first(sched_group_cpus(sg))) {
- WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+ if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
- }
+
+ if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+ *per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
@@ -7210,7 +7310,7 @@ static struct sched_domain_topology_level default_topology[] = {
#endif
{ sd_init_CPU, cpu_cpu_mask, },
#ifdef CONFIG_NUMA
- { sd_init_NODE, cpu_node_mask, },
+ { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
{ sd_init_ALLNODES, cpu_allnodes_mask, },
#endif
{ NULL, },
@@ -7234,9 +7334,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sdd->sg)
return -ENOMEM;
+ sdd->sgp = alloc_percpu(struct sched_group_power *);
+ if (!sdd->sgp)
+ return -ENOMEM;
+
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
+ struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7356,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
return -ENOMEM;
*per_cpu_ptr(sdd->sg, j) = sg;
+
+ sgp = kzalloc_node(sizeof(struct sched_group_power),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sgp)
+ return -ENOMEM;
+
+ *per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
@@ -7266,11 +7378,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
- kfree(*per_cpu_ptr(sdd->sd, j));
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+ if (sd && (sd->flags & SD_OVERLAP))
+ free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sg, j));
+ kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
free_percpu(sdd->sg);
+ free_percpu(sdd->sgp);
}
}
@@ -7316,8 +7432,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_topology_level *tl;
sd = NULL;
- for (tl = sched_domain_topology; tl->init; tl++)
+ for (tl = sched_domain_topology; tl->init; tl++) {
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+ if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+ sd->flags |= SD_OVERLAP;
+ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+ break;
+ }
while (sd->child)
sd = sd->child;
@@ -7329,13 +7450,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
- get_group(i, sd->private, &sd->groups);
- atomic_inc(&sd->groups->ref);
-
- if (i != cpumask_first(sched_domain_span(sd)))
- continue;
-
- build_sched_groups(sd);
+ if (sd->flags & SD_OVERLAP) {
+ if (build_overlap_sched_groups(sd, i))
+ goto error;
+ } else {
+ if (build_sched_groups(sd, i))
+ goto error;
+ }
}
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 433491c..c768588 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
}
/* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
+ avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
if (local_group) {
this_load = avg_load;
@@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power >>= SCHED_POWER_SHIFT;
}
- sdg->cpu_power_orig = power;
+ sdg->sgp->power_orig = power;
if (sched_feat(ARCH_POWER))
power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power = 1;
cpu_rq(cpu)->cpu_power = power;
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- power += group->cpu_power;
+ power += group->sgp->power;
group = group->next;
} while (group != child->groups);
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
/*
@@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
/*
* If ~90% of the cpu_power is still there, we're good.
*/
- if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+ if (group->sgp->power * 32 > group->sgp->power_orig * 29)
return 1;
return 0;
@@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
}
/* Adjust by relative CPU power of the group */
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
+ sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
/*
* Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
return;
sds->total_load += sgs.group_load;
- sds->total_pwr += sg->cpu_power;
+ sds->total_pwr += sg->sgp->power;
/*
* In case the child domain prefers tasks go to siblings
@@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
if (this_cpu > busiest_cpu)
return 0;
- *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
+ *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
SCHED_POWER_SCALE);
return 1;
}
@@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->cpu_power;
+ scaled_busy_load_per_task /= sds->busiest->sgp->power;
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
* moving them.
*/
- pwr_now += sds->busiest->cpu_power *
+ pwr_now += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->cpu_power *
+ pwr_now += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->cpu_power;
+ sds->busiest->sgp->power;
if (sds->max_load > tmp)
- pwr_move += sds->busiest->cpu_power *
+ pwr_move += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load - tmp);
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->cpu_power <
+ if (sds->max_load * sds->busiest->sgp->power <
sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->cpu_power) /
- sds->this->cpu_power;
+ tmp = (sds->max_load * sds->busiest->sgp->power) /
+ sds->this->sgp->power;
else
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->cpu_power;
- pwr_move += sds->this->cpu_power *
+ sds->this->sgp->power;
+ pwr_move += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
@@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
- load_above_capacity /= sds->busiest->cpu_power;
+ load_above_capacity /= sds->busiest->sgp->power;
}
/*
@@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds->busiest->cpu_power,
- (sds->avg_load - sds->this_load) * sds->this->cpu_power)
+ *imbalance = min(max_pull * sds->busiest->sgp->power,
+ (sds->avg_load - sds->this_load) * sds->this->sgp->power)
/ SCHED_POWER_SCALE;
/*
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index be40f73..1e7066d 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, 1)
+
+SCHED_FEAT(FORCE_SD_OVERLAP, 0)
next reply other threads:[~2011-07-20 21:07 UTC|newest]
Thread overview: 392+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-07-20 21:06 Ingo Molnar [this message]
-- strict thread matches above, loose matches on Subject: below --
2023-10-28 12:23 [GIT PULL] Scheduler changes for v6.7 Ingo Molnar
2024-01-08 14:07 ` [GIT PULL] Scheduler changes for v6.8 Ingo Molnar
2024-01-10 22:19 ` Linus Torvalds
2024-01-10 22:41 ` Linus Torvalds
2024-01-10 22:57 ` Linus Torvalds
2024-01-11 11:09 ` [GIT PULL] scheduler fixes Ingo Molnar
2024-01-11 13:04 ` Vincent Guittot
2023-10-21 15:28 Ingo Molnar
2023-10-21 18:29 ` pr-tracker-bot
2023-10-14 22:02 Ingo Molnar
2023-10-14 22:49 ` pr-tracker-bot
2023-10-08 9:32 Ingo Molnar
2023-10-08 18:06 ` pr-tracker-bot
2023-09-17 17:39 Ingo Molnar
2023-09-17 18:24 ` pr-tracker-bot
2023-09-02 10:09 Ingo Molnar
2023-09-02 16:13 ` pr-tracker-bot
2023-02-17 7:42 Ingo Molnar
2023-02-17 22:47 ` pr-tracker-bot
2023-01-12 14:25 Ingo Molnar
2023-01-12 23:01 ` pr-tracker-bot
2022-08-06 19:21 Ingo Molnar
2022-08-07 0:50 ` pr-tracker-bot
2021-07-11 13:32 Ingo Molnar
2021-07-11 18:16 ` Linus Torvalds
2021-07-11 18:22 ` pr-tracker-bot
2021-06-12 13:02 Ingo Molnar
2021-06-12 19:09 ` pr-tracker-bot
2021-05-15 7:50 Ingo Molnar
2021-05-15 17:55 ` pr-tracker-bot
2020-08-15 11:27 Ingo Molnar
2020-08-16 1:55 ` pr-tracker-bot
2020-07-25 10:47 Ingo Molnar
2020-07-25 22:30 ` pr-tracker-bot
2020-04-25 9:28 Ingo Molnar
2020-04-25 19:30 ` pr-tracker-bot
2020-02-15 9:44 Ingo Molnar
2020-02-15 21:25 ` pr-tracker-bot
2019-12-21 16:19 Ingo Molnar
2019-12-21 18:55 ` pr-tracker-bot
2019-11-16 21:37 Ingo Molnar
2019-11-16 22:44 ` Valentin Schneider
2019-11-17 0:10 ` Linus Torvalds
2019-11-17 9:31 ` Ingo Molnar
2019-11-17 9:45 ` Ingo Molnar
2019-11-17 10:19 ` Valentin Schneider
2019-11-17 10:29 ` Ingo Molnar
2019-11-17 16:29 ` Linus Torvalds
2019-11-17 20:43 ` Valentin Schneider
2019-11-18 8:03 ` Ingo Molnar
2019-11-01 17:55 Ingo Molnar
2019-11-01 19:10 ` pr-tracker-bot
2019-11-02 0:15 ` Valentin Schneider
2019-10-12 14:58 Ingo Molnar
2019-10-12 22:35 ` pr-tracker-bot
2019-09-28 12:39 Ingo Molnar
2019-09-28 20:50 ` pr-tracker-bot
2019-09-30 23:45 ` John Stultz
2019-10-01 7:19 ` Peter Zijlstra
2019-10-01 18:15 ` John Stultz
2019-10-01 20:39 ` Joel Fernandes
2019-09-05 8:02 Ingo Molnar
2019-09-05 21:15 ` pr-tracker-bot
2019-04-20 7:33 Ingo Molnar
2019-04-20 19:25 ` pr-tracker-bot
2018-11-03 23:52 Ingo Molnar
2018-11-04 1:38 ` Linus Torvalds
2018-10-20 8:45 Ingo Molnar
2018-10-20 13:28 ` Greg Kroah-Hartman
2018-10-05 9:50 Ingo Molnar
2018-10-05 23:06 ` Greg Kroah-Hartman
2018-09-15 13:20 Ingo Molnar
2018-07-30 17:56 Ingo Molnar
2018-07-21 12:49 Ingo Molnar
2018-03-25 8:57 Ingo Molnar
2018-02-15 1:00 Ingo Molnar
2018-01-12 13:48 Ingo Molnar
2017-12-15 15:35 Ingo Molnar
2017-12-06 22:21 Ingo Molnar
2017-11-26 12:43 Ingo Molnar
2017-10-14 16:11 Ingo Molnar
2017-09-13 17:57 Ingo Molnar
2017-09-12 15:35 Ingo Molnar
2017-07-21 10:18 Ingo Molnar
2017-03-07 20:33 Ingo Molnar
2017-02-28 8:05 Ingo Molnar
2016-11-22 15:38 Ingo Molnar
2016-08-18 20:43 Ingo Molnar
2016-08-12 19:39 Ingo Molnar
2016-07-08 13:53 Ingo Molnar
2016-06-10 12:56 Ingo Molnar
2016-05-25 21:58 Ingo Molnar
2016-05-10 12:00 Ingo Molnar
2016-03-24 7:51 Ingo Molnar
2016-01-08 12:50 Ingo Molnar
2015-10-23 11:38 Ingo Molnar
2015-09-17 8:06 Ingo Molnar
2015-07-04 11:27 Ingo Molnar
2015-05-15 7:19 Ingo Molnar
2015-02-20 13:42 Ingo Molnar
2015-02-06 18:31 Ingo Molnar
2015-01-11 8:47 Ingo Molnar
2014-11-20 7:57 Ingo Molnar
2014-10-31 11:17 Ingo Molnar
2014-09-26 11:32 Ingo Molnar
2014-07-16 11:18 Ingo Molnar
2014-06-01 8:17 Ingo Molnar
2014-05-22 8:10 Ingo Molnar
2014-04-19 10:55 Ingo Molnar
2014-03-16 16:36 Ingo Molnar
2014-03-03 16:29 Ingo Molnar
2014-02-22 19:20 Ingo Molnar
2014-01-31 8:17 Ingo Molnar
2014-01-25 7:26 Ingo Molnar
2013-12-19 16:55 Ingo Molnar
2013-12-17 13:40 Ingo Molnar
2013-12-02 14:43 Ingo Molnar
2013-11-13 20:14 Ingo Molnar
2013-09-25 18:03 Ingo Molnar
2013-09-18 16:19 Ingo Molnar
2013-08-13 16:58 Ingo Molnar
2013-06-20 9:06 Ingo Molnar
2013-05-02 9:06 Ingo Molnar
2013-04-14 15:51 Ingo Molnar
2013-02-26 7:29 Ingo Molnar
2013-02-04 18:26 Ingo Molnar
2012-10-12 9:11 Ingo Molnar
2012-09-13 14:43 Ingo Molnar
2012-08-20 9:12 Ingo Molnar
2012-08-20 17:35 ` Linus Torvalds
2012-08-21 7:56 ` Ingo Molnar
2012-08-03 16:43 Ingo Molnar
2012-07-14 7:57 Ingo Molnar
2012-06-08 20:29 Ingo Molnar
2012-06-05 9:21 Ingo Molnar
2012-04-27 6:39 Ingo Molnar
2012-03-31 17:07 Ingo Molnar
2012-03-29 10:50 Ingo Molnar
2012-02-02 10:07 Ingo Molnar
2012-01-12 6:15 Ingo Molnar
2011-12-17 20:56 Ingo Molnar
2011-12-05 18:49 Ingo Molnar
2011-09-30 18:36 Ingo Molnar
2011-09-30 19:44 ` Thomas Gleixner
2011-07-07 18:19 Ingo Molnar
2011-06-15 20:04 Ingo Molnar
2011-06-07 18:01 Ingo Molnar
2011-05-28 16:40 Ingo Molnar
2011-04-16 10:07 Ingo Molnar
2011-04-02 10:31 Ingo Molnar
2011-04-04 15:45 ` Linus Torvalds
2011-04-04 16:08 ` Sisir Koppaka
2011-04-04 16:16 ` Andrew Morton
2011-04-04 19:19 ` Ingo Molnar
2011-04-05 8:14 ` Peter Zijlstra
2011-03-25 13:22 Ingo Molnar
2011-02-03 15:54 Ingo Molnar
2011-01-27 17:31 Ingo Molnar
2011-01-20 20:21 Ingo Molnar
2011-01-07 17:00 Ingo Molnar
2010-12-19 15:27 Ingo Molnar
2010-12-19 20:45 ` Linus Torvalds
2010-12-20 10:15 ` Peter Zijlstra
2010-11-26 13:17 Ingo Molnar
2010-11-16 23:10 Ingo Molnar
2010-10-29 8:35 Ingo Molnar
2010-09-21 19:46 Ingo Molnar
2010-09-11 8:34 Ingo Molnar
2010-08-25 12:00 Ingo Molnar
2010-06-02 12:21 Ingo Molnar
2010-04-04 10:11 Ingo Molnar
2010-03-26 15:23 Ingo Molnar
2010-03-13 16:42 Ingo Molnar
2010-01-31 17:28 Ingo Molnar
2010-01-21 15:35 Ingo Molnar
2009-12-21 18:07 Ingo Molnar
2009-12-18 18:55 Ingo Molnar
2009-12-12 6:15 Ingo Molnar
2009-11-10 17:43 Ingo Molnar
2009-11-04 15:54 Ingo Molnar
2009-10-23 14:43 Ingo Molnar
2009-10-13 18:23 Ingo Molnar
2009-09-28 17:15 Linux 2.6.32-rc1 Martin Schwidefsky
2009-09-28 18:41 ` Eric Dumazet
2009-09-29 20:42 ` Eric Dumazet
2009-09-29 21:17 ` Linus Torvalds
2009-09-29 21:22 ` Arjan van de Ven
2009-09-29 21:56 ` Linus Torvalds
2009-09-30 15:07 ` Arjan van de Ven
2009-09-30 15:57 ` Eric Dumazet
2009-09-30 16:14 ` Linus Torvalds
2009-09-30 18:53 ` Ingo Molnar
2009-09-30 22:03 ` [GIT PULL] scheduler fixes Ingo Molnar
2009-10-01 0:42 ` Linus Torvalds
2009-10-01 0:57 ` Linus Torvalds
2009-10-01 5:30 ` Eric Dumazet
2009-10-01 6:11 ` Ingo Molnar
2009-10-01 6:18 ` Eric Dumazet
2009-10-01 6:42 ` Ingo Molnar
2009-10-01 6:59 ` Eric Dumazet
2009-10-01 7:28 ` Sam Ravnborg
2009-10-02 16:40 ` Yuhong Bao
2009-10-01 6:05 ` Ingo Molnar
2009-09-21 13:05 Ingo Molnar
2009-08-04 19:07 Ingo Molnar
2009-06-20 17:00 Ingo Molnar
2009-05-18 14:27 Ingo Molnar
2009-05-18 16:13 ` Linus Torvalds
2009-05-18 16:49 ` Ingo Molnar
2009-05-18 16:58 ` Linus Torvalds
2009-05-18 17:09 ` Ingo Molnar
2009-05-18 19:03 ` Ingo Molnar
2009-05-18 19:16 ` Linus Torvalds
2009-05-18 20:20 ` Ingo Molnar
2009-05-18 22:06 ` Linus Torvalds
2009-05-19 12:27 ` Rusty Russell
2009-05-24 16:13 ` Pekka J Enberg
2009-05-24 18:18 ` Linus Torvalds
2009-05-24 19:13 ` Pekka Enberg
2009-05-25 5:16 ` Benjamin Herrenschmidt
2009-05-24 18:34 ` Yinghai Lu
2009-05-24 19:15 ` Pekka Enberg
2009-05-25 2:53 ` Ingo Molnar
2009-05-25 4:45 ` Yinghai Lu
2009-05-25 5:15 ` Ingo Molnar
2009-05-25 5:54 ` Yinghai Lu
2009-05-25 8:47 ` Pekka J Enberg
2009-05-25 11:25 ` Nick Piggin
2009-05-25 11:37 ` Pekka Enberg
2009-05-25 11:41 ` Nick Piggin
2009-05-25 11:44 ` Pekka J Enberg
2009-05-25 15:01 ` Matt Mackall
2009-05-25 16:39 ` Linus Torvalds
2009-05-25 18:39 ` Pekka Enberg
2009-05-25 19:14 ` Linus Torvalds
2009-05-25 19:13 ` Pekka Enberg
2009-05-26 1:50 ` Yinghai Lu
2009-05-26 7:38 ` Nick Piggin
2009-05-28 12:06 ` Pekka Enberg
2009-05-28 12:12 ` Nick Piggin
2009-05-28 12:24 ` Pekka Enberg
2009-05-26 7:33 ` Nick Piggin
2009-05-25 12:04 ` Pekka J Enberg
2009-05-25 12:12 ` Nick Piggin
2009-05-25 14:55 ` Matt Mackall
2009-05-25 14:58 ` Pekka Enberg
2009-05-26 17:19 ` Christoph Lameter
2009-05-28 12:14 ` Pekka Enberg
2009-05-26 14:27 ` Christoph Lameter
2009-05-25 4:52 ` H. Peter Anvin
2009-05-25 5:05 ` Ingo Molnar
2009-05-25 5:13 ` Yinghai Lu
2009-05-25 5:19 ` Benjamin Herrenschmidt
2009-05-25 7:16 ` Rusty Russell
2009-04-17 0:59 Ingo Molnar
2009-04-09 15:41 Ingo Molnar
2009-03-03 21:02 [git pull] " Ingo Molnar
2009-02-11 14:41 Ingo Molnar
2009-02-01 15:43 Ingo Molnar
2009-01-30 23:09 Ingo Molnar
2009-01-31 17:11 ` Peter Zijlstra
2009-01-31 17:23 ` Peter Zijlstra
2009-01-31 17:29 ` Peter Zijlstra
2009-01-31 17:49 ` Alexey Zaytsev
2009-01-31 17:54 ` Ingo Molnar
2009-01-31 21:43 ` Alexey Zaytsev
2009-01-31 22:15 ` Ingo Molnar
2009-01-31 18:08 ` Peter Zijlstra
2009-01-31 21:21 ` Alan Cox
2009-01-31 22:19 ` Ingo Molnar
2009-02-02 9:52 ` Peter Zijlstra
2009-02-04 22:28 ` Alexey Zaytsev
2009-02-05 0:12 ` Ingo Molnar
2009-01-15 22:08 [GIT PULL] " Ingo Molnar
2009-01-15 23:17 ` Peter Zijlstra
2009-01-15 23:24 ` Ingo Molnar
2009-01-15 23:33 ` Peter Zijlstra
2009-01-11 14:43 [git pull] " Ingo Molnar
2009-01-14 20:15 ` Andrew Morton
2009-01-14 20:24 ` Peter Zijlstra
2009-01-17 4:40 ` Andrew Morton
2009-01-17 6:29 ` Mike Galbraith
2009-01-17 9:54 ` Mike Galbraith
2009-01-17 10:07 ` Peter Zijlstra
2009-01-17 10:34 ` Mike Galbraith
2009-01-17 12:00 ` Peter Zijlstra
2009-01-17 12:19 ` Mike Galbraith
2009-01-17 12:43 ` Andrew Morton
2009-01-17 13:22 ` Mike Galbraith
2009-01-17 16:01 ` Ingo Molnar
2009-01-17 16:21 ` Mike Galbraith
2009-01-17 16:25 ` Ingo Molnar
2009-01-17 16:37 ` Mike Galbraith
2009-01-18 7:45 ` Mike Galbraith
2009-01-18 8:29 ` Avi Kivity
2009-01-18 8:37 ` Ingo Molnar
2009-01-18 8:59 ` Avi Kivity
2009-01-18 9:21 ` Avi Kivity
2009-01-18 9:41 ` Kevin Shanahan
2009-01-18 14:00 ` Kevin Shanahan
2009-01-18 9:39 ` Kevin Shanahan
2009-01-18 9:55 ` Avi Kivity
2009-01-18 10:36 ` Avi Kivity
2009-01-17 12:59 ` Mike Galbraith
2009-01-17 15:49 ` Peter Zijlstra
2009-01-18 14:08 ` Mike Galbraith
2009-01-18 15:28 ` Peter Zijlstra
2009-01-18 18:54 ` Mike Galbraith
2009-01-21 12:40 ` Mike Galbraith
2009-01-17 16:12 ` Ingo Molnar
2009-01-17 16:28 ` Mike Galbraith
2009-01-17 8:52 ` Peter Zijlstra
2009-01-06 16:16 Ingo Molnar
2008-12-10 22:14 Ingo Molnar
2008-11-29 19:53 Ingo Molnar
2008-11-18 14:17 Ingo Molnar
2008-11-12 20:30 Ingo Molnar
2008-11-11 18:25 Ingo Molnar
2008-11-07 16:29 Ingo Molnar
2008-11-05 18:56 Ingo Molnar
2008-10-30 23:31 Ingo Molnar
2008-10-28 10:50 Ingo Molnar
2008-10-15 16:47 Ingo Molnar
2008-09-23 19:35 Ingo Molnar
2008-09-17 9:58 Ingo Molnar
2008-09-08 20:06 Ingo Molnar
2008-09-05 18:49 Ingo Molnar
2008-08-28 11:43 Ingo Molnar
2008-08-25 17:37 Ingo Molnar
2008-08-22 12:25 Ingo Molnar
2008-07-31 21:43 Ingo Molnar
2008-07-31 22:04 ` David Miller
2008-07-31 22:26 ` Ingo Molnar
2008-07-31 22:55 ` David Miller
2008-08-01 8:11 ` David Miller
2008-08-01 9:01 ` Ingo Molnar
2008-08-01 9:13 ` David Miller
2008-07-24 15:12 Ingo Molnar
2008-07-01 20:36 Toralf Förster
2008-07-01 19:58 Ingo Molnar
2008-06-30 15:31 Ingo Molnar
2008-06-30 17:39 ` Dhaval Giani
2008-06-30 18:03 ` Ingo Molnar
2008-06-23 19:43 Ingo Molnar
2008-06-19 15:14 Ingo Molnar
2008-06-12 19:18 Ingo Molnar
2008-05-29 15:45 Ingo Molnar
2008-05-07 12:21 AIM7 40% regression with 2.6.26-rc1 Andi Kleen
2008-05-07 14:36 ` Linus Torvalds
2008-05-07 15:19 ` Linus Torvalds
2008-05-08 2:44 ` Zhang, Yanmin
2008-05-08 3:29 ` Linus Torvalds
2008-05-08 4:08 ` Zhang, Yanmin
2008-05-08 4:17 ` Linus Torvalds
2008-05-08 12:01 ` [patch] speed up / fix the new generic semaphore code (fix AIM7 40% regression with 2.6.26-rc1) Ingo Molnar
2008-05-08 12:28 ` Ingo Molnar
2008-05-08 14:43 ` Ingo Molnar
2008-05-08 15:10 ` [git pull] scheduler fixes Ingo Molnar
2008-05-08 15:33 ` Adrian Bunk
2008-05-08 15:41 ` Ingo Molnar
2008-05-08 19:42 ` Adrian Bunk
2008-05-11 11:03 ` Matthew Wilcox
2008-05-11 11:14 ` Matthew Wilcox
2008-05-11 11:48 ` Matthew Wilcox
2008-05-11 12:50 ` Ingo Molnar
2008-05-11 12:52 ` Ingo Molnar
2008-05-11 13:02 ` Matthew Wilcox
2008-05-11 13:26 ` Matthew Wilcox
2008-05-11 14:00 ` Ingo Molnar
2008-05-11 14:18 ` Matthew Wilcox
2008-05-11 14:42 ` Ingo Molnar
2008-05-11 14:48 ` Matthew Wilcox
2008-05-11 15:19 ` Ingo Molnar
2008-05-11 15:29 ` Matthew Wilcox
2008-05-13 14:11 ` Ingo Molnar
2008-05-13 14:21 ` Matthew Wilcox
2008-05-13 14:42 ` Ingo Molnar
2008-05-13 15:28 ` Matthew Wilcox
2008-05-13 17:13 ` Ingo Molnar
2008-05-13 17:22 ` Linus Torvalds
2008-05-13 21:05 ` Ingo Molnar
2008-05-11 13:54 ` Ingo Molnar
2008-05-11 14:22 ` Matthew Wilcox
2008-05-11 14:32 ` Ingo Molnar
2008-05-11 14:46 ` Matthew Wilcox
2008-05-11 16:47 ` Linus Torvalds
2008-05-11 13:01 ` Ingo Molnar
2008-05-11 13:06 ` Matthew Wilcox
2008-05-11 13:45 ` Ingo Molnar
2008-05-11 14:10 ` Sven Wegener
2008-05-05 22:58 Ingo Molnar
2008-05-06 0:14 ` David Miller
2008-05-06 0:24 ` Ingo Molnar
2008-03-19 3:49 Ingo Molnar
2008-03-15 1:47 Ingo Molnar
2008-03-11 15:48 Ingo Molnar
2008-03-11 22:30 ` Rafael J. Wysocki
2008-03-12 1:22 ` Andrew Morton
2008-03-07 15:56 Ingo Molnar
2008-03-04 17:07 Ingo Molnar
2008-02-25 15:45 Ingo Molnar
2007-12-18 14:38 Ingo Molnar
2007-12-07 18:21 Ingo Molnar
2007-12-04 5:08 Linux 2.6.24-rc4 Linus Torvalds
2007-12-04 14:07 ` [local DoS] " Luiz Fernando N. Capitulino
2007-12-04 15:56 ` Linus Torvalds
2007-12-04 16:00 ` Ingo Molnar
2007-12-04 16:18 ` [git pull] scheduler fixes Ingo Molnar
2007-12-04 16:40 ` Luiz Fernando N. Capitulino
2007-12-04 18:28 ` Greg KH
2007-12-04 18:41 ` Luiz Fernando N. Capitulino
2007-12-04 21:04 ` Ingo Molnar
2007-11-28 15:10 Ingo Molnar
2007-11-26 20:35 Ingo Molnar
2007-11-15 20:09 Ingo Molnar
2007-11-09 22:06 Ingo Molnar
2007-10-29 20:39 Ingo Molnar
2007-10-18 19:37 Ingo Molnar
2007-10-17 14:29 Ingo Molnar
2007-10-17 14:59 ` Ingo Molnar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110720210644.GA31617@elte.hu \
--to=mingo@elte.hu \
--cc=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).