All of lore.kernel.org
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	Will Deacon <will@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Morten Rasmussen <morten.rasmussen@arm.com>,
	Qais Yousef <qais.yousef@arm.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Quentin Perret <qperret@google.com>, Tejun Heo <tj@kernel.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Ingo Molnar <mingo@redhat.com>,
	Juri Lelli <juri.lelli@redhat.com>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	"Rafael J. Wysocki" <rjw@rjwysocki.net>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Valentin Schneider <valentin.schneider@arm.com>,
	Mark Rutland <mark.rutland@arm.com>,
	kernel-team@android.com,
	Valentin Schneider <Valentin.Schneider@arm.com>
Subject: [PATCH v9 10/20] sched: Introduce task_struct::user_cpus_ptr to track requested affinity
Date: Tue,  8 Jun 2021 19:03:03 +0100	[thread overview]
Message-ID: <20210608180313.11502-11-will@kernel.org> (raw)
In-Reply-To: <20210608180313.11502-1-will@kernel.org>

In preparation for saving and restoring the user-requested CPU affinity
mask of a task, add a new cpumask_t pointer to 'struct task_struct'.

If the pointer is non-NULL, then the mask is copied across fork() and
freed on task exit.

Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 include/linux/sched.h | 13 +++++++++++++
 init/init_task.c      |  1 +
 kernel/fork.c         |  2 ++
 kernel/sched/core.c   | 20 ++++++++++++++++++++
 4 files changed, 36 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2c881384517..db32d4f7e5b3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -730,6 +730,7 @@ struct task_struct {
 	unsigned int			policy;
 	int				nr_cpus_allowed;
 	const cpumask_t			*cpus_ptr;
+	cpumask_t			*user_cpus_ptr;
 	cpumask_t			cpus_mask;
 	void				*migration_pending;
 #ifdef CONFIG_SMP
@@ -1688,6 +1689,8 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
@@ -1698,6 +1701,16 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
 		return -EINVAL;
 	return 0;
 }
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+	if (src->user_cpus_ptr)
+		return -EINVAL;
+	return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+	WARN_ON(p->user_cpus_ptr);
+}
 #endif
 
 extern int yield_to(struct task_struct *p, bool preempt);
diff --git a/init/init_task.c b/init/init_task.c
index 8b08c2e19cbb..158c2b1689e1 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -80,6 +80,7 @@ struct task_struct init_task
 	.normal_prio	= MAX_PRIO - 20,
 	.policy		= SCHED_NORMAL,
 	.cpus_ptr	= &init_task.cpus_mask,
+	.user_cpus_ptr	= NULL,
 	.cpus_mask	= CPU_MASK_ALL,
 	.nr_cpus_allowed= NR_CPUS,
 	.mm		= NULL,
diff --git a/kernel/fork.c b/kernel/fork.c
index dc06afd725cb..d3710e7f1686 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -446,6 +446,7 @@ void put_task_stack(struct task_struct *tsk)
 
 void free_task(struct task_struct *tsk)
 {
+	release_user_cpus_ptr(tsk);
 	scs_release(tsk);
 
 #ifndef CONFIG_THREAD_INFO_IN_TASK
@@ -918,6 +919,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 #endif
 	if (orig->cpus_ptr == &orig->cpus_mask)
 		tsk->cpus_ptr = &tsk->cpus_mask;
+	dup_user_cpus_ptr(tsk, orig, node);
 
 	/*
 	 * One for the user space visible state that goes away when reaped.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 09fe20b3d6bb..f6ea3d7a07f2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2126,6 +2126,26 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 	__do_set_cpus_allowed(p, new_mask, 0);
 }
 
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+		      int node)
+{
+	if (!src->user_cpus_ptr)
+		return 0;
+
+	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+	if (!dst->user_cpus_ptr)
+		return -ENOMEM;
+
+	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+	return 0;
+}
+
+void release_user_cpus_ptr(struct task_struct *p)
+{
+	kfree(p->user_cpus_ptr);
+	p->user_cpus_ptr = NULL;
+}
+
 /*
  * This function is wildly self concurrent; here be dragons.
  *
-- 
2.32.0.rc1.229.g3e70b5a671-goog


WARNING: multiple messages have this Message-ID (diff)
From: Will Deacon <will@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	Will Deacon <will@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Morten Rasmussen <morten.rasmussen@arm.com>,
	Qais Yousef <qais.yousef@arm.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Quentin Perret <qperret@google.com>, Tejun Heo <tj@kernel.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Ingo Molnar <mingo@redhat.com>,
	Juri Lelli <juri.lelli@redhat.com>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	"Rafael J. Wysocki" <rjw@rjwysocki.net>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Valentin Schneider <valentin.schneider@arm.com>,
	Mark Rutland <mark.rutland@arm.com>,
	kernel-team@android.com,
	Valentin Schneider <Valentin.Schneider@arm.com>
Subject: [PATCH v9 10/20] sched: Introduce task_struct::user_cpus_ptr to track requested affinity
Date: Tue,  8 Jun 2021 19:03:03 +0100	[thread overview]
Message-ID: <20210608180313.11502-11-will@kernel.org> (raw)
In-Reply-To: <20210608180313.11502-1-will@kernel.org>

In preparation for saving and restoring the user-requested CPU affinity
mask of a task, add a new cpumask_t pointer to 'struct task_struct'.

If the pointer is non-NULL, then the mask is copied across fork() and
freed on task exit.

Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 include/linux/sched.h | 13 +++++++++++++
 init/init_task.c      |  1 +
 kernel/fork.c         |  2 ++
 kernel/sched/core.c   | 20 ++++++++++++++++++++
 4 files changed, 36 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2c881384517..db32d4f7e5b3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -730,6 +730,7 @@ struct task_struct {
 	unsigned int			policy;
 	int				nr_cpus_allowed;
 	const cpumask_t			*cpus_ptr;
+	cpumask_t			*user_cpus_ptr;
 	cpumask_t			cpus_mask;
 	void				*migration_pending;
 #ifdef CONFIG_SMP
@@ -1688,6 +1689,8 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
@@ -1698,6 +1701,16 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
 		return -EINVAL;
 	return 0;
 }
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+	if (src->user_cpus_ptr)
+		return -EINVAL;
+	return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+	WARN_ON(p->user_cpus_ptr);
+}
 #endif
 
 extern int yield_to(struct task_struct *p, bool preempt);
diff --git a/init/init_task.c b/init/init_task.c
index 8b08c2e19cbb..158c2b1689e1 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -80,6 +80,7 @@ struct task_struct init_task
 	.normal_prio	= MAX_PRIO - 20,
 	.policy		= SCHED_NORMAL,
 	.cpus_ptr	= &init_task.cpus_mask,
+	.user_cpus_ptr	= NULL,
 	.cpus_mask	= CPU_MASK_ALL,
 	.nr_cpus_allowed= NR_CPUS,
 	.mm		= NULL,
diff --git a/kernel/fork.c b/kernel/fork.c
index dc06afd725cb..d3710e7f1686 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -446,6 +446,7 @@ void put_task_stack(struct task_struct *tsk)
 
 void free_task(struct task_struct *tsk)
 {
+	release_user_cpus_ptr(tsk);
 	scs_release(tsk);
 
 #ifndef CONFIG_THREAD_INFO_IN_TASK
@@ -918,6 +919,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 #endif
 	if (orig->cpus_ptr == &orig->cpus_mask)
 		tsk->cpus_ptr = &tsk->cpus_mask;
+	dup_user_cpus_ptr(tsk, orig, node);
 
 	/*
 	 * One for the user space visible state that goes away when reaped.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 09fe20b3d6bb..f6ea3d7a07f2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2126,6 +2126,26 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 	__do_set_cpus_allowed(p, new_mask, 0);
 }
 
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+		      int node)
+{
+	if (!src->user_cpus_ptr)
+		return 0;
+
+	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+	if (!dst->user_cpus_ptr)
+		return -ENOMEM;
+
+	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+	return 0;
+}
+
+void release_user_cpus_ptr(struct task_struct *p)
+{
+	kfree(p->user_cpus_ptr);
+	p->user_cpus_ptr = NULL;
+}
+
 /*
  * This function is wildly self concurrent; here be dragons.
  *
-- 
2.32.0.rc1.229.g3e70b5a671-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-06-08 18:04 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-08 18:02 [PATCH v9 00/20] Add support for 32-bit tasks on asymmetric AArch32 systems Will Deacon
2021-06-08 18:02 ` Will Deacon
2021-06-08 18:02 ` [PATCH v9 01/20] arm64: cpuinfo: Split AArch32 registers out into a separate struct Will Deacon
2021-06-08 18:02   ` Will Deacon
2021-06-08 18:02 ` [PATCH v9 02/20] arm64: Allow mismatched 32-bit EL0 support Will Deacon
2021-06-08 18:02   ` Will Deacon
2021-06-08 18:02 ` [PATCH v9 03/20] KVM: arm64: Kill 32-bit vCPUs on systems with mismatched " Will Deacon
2021-06-08 18:02   ` Will Deacon
2021-06-08 18:02 ` [PATCH v9 04/20] arm64: Kill 32-bit applications scheduled on 64-bit-only CPUs Will Deacon
2021-06-08 18:02   ` Will Deacon
2021-06-08 18:02 ` [PATCH v9 05/20] sched: Introduce task_cpu_possible_mask() to limit fallback rq selection Will Deacon
2021-06-08 18:02   ` Will Deacon
2021-06-08 18:02 ` [PATCH v9 06/20] cpuset: Don't use the cpu_possible_mask as a last resort for cgroup v1 Will Deacon
2021-06-08 18:02   ` Will Deacon
2021-06-10 12:59   ` Valentin Schneider
2021-06-10 12:59     ` Valentin Schneider
2021-06-08 18:03 ` [PATCH v9 07/20] cpuset: Honour task_cpu_possible_mask() in guarantee_online_cpus() Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 08/20] cpuset: Cleanup cpuset_cpus_allowed_fallback() use in select_fallback_rq() Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-10 12:59   ` Valentin Schneider
2021-06-10 12:59     ` Valentin Schneider
2021-06-08 18:03 ` [PATCH v9 09/20] sched: Reject CPU affinity changes based on task_cpu_possible_mask() Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` Will Deacon [this message]
2021-06-08 18:03   ` [PATCH v9 10/20] sched: Introduce task_struct::user_cpus_ptr to track requested affinity Will Deacon
2021-06-08 18:03 ` [PATCH v9 11/20] sched: Split the guts of sched_setaffinity() into a helper function Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 12/20] sched: Allow task CPU affinity to be restricted on asymmetric systems Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-10 13:00   ` Valentin Schneider
2021-06-10 13:00     ` Valentin Schneider
2021-06-08 18:03 ` [PATCH v9 13/20] sched: Introduce dl_task_check_affinity() to check proposed affinity Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 14/20] arm64: Implement task_cpu_possible_mask() Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 15/20] arm64: exec: Adjust affinity for compat tasks with mismatched 32-bit EL0 Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 16/20] arm64: Prevent offlining first CPU with 32-bit EL0 on mismatched system Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 17/20] arm64: Advertise CPUs capable of running 32-bit applications in sysfs Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 18/20] arm64: Hook up cmdline parameter to allow mismatched 32-bit EL0 Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 19/20] arm64: Remove logic to kill 32-bit tasks on 64-bit-only cores Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-08 18:03 ` [PATCH v9 20/20] Documentation: arm64: describe asymmetric 32-bit support Will Deacon
2021-06-08 18:03   ` Will Deacon
2021-06-11 16:15 ` [PATCH v9 00/20] Add support for 32-bit tasks on asymmetric AArch32 systems Will Deacon
2021-06-11 16:15   ` Will Deacon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210608180313.11502-11-will@kernel.org \
    --to=will@kernel.org \
    --cc=bristot@redhat.com \
    --cc=catalin.marinas@arm.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=juri.lelli@redhat.com \
    --cc=kernel-team@android.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=mingo@redhat.com \
    --cc=morten.rasmussen@arm.com \
    --cc=peterz@infradead.org \
    --cc=qais.yousef@arm.com \
    --cc=qperret@google.com \
    --cc=rjw@rjwysocki.net \
    --cc=surenb@google.com \
    --cc=tj@kernel.org \
    --cc=valentin.schneider@arm.com \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.