linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: James Morse <james.morse@arm.com>
To: x86@kernel.org, linux-kernel@vger.kernel.org
Cc: Fenghua Yu <fenghua.yu@intel.com>,
	Reinette Chatre <reinette.chatre@intel.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	H Peter Anvin <hpa@zytor.com>, Babu Moger <Babu.Moger@amd.com>,
	James Morse <james.morse@arm.com>,
	shameerali.kolothum.thodi@huawei.com,
	D Scott Phillips OS <scott@os.amperecomputing.com>,
	carl@os.amperecomputing.com, lcherian@marvell.com,
	bobo.shaobowang@huawei.com, tan.shaopeng@fujitsu.com,
	xingxin.hx@openanolis.org, baolin.wang@linux.alibaba.com,
	Jamie Iles <quic_jiles@quicinc.com>,
	Xin Hao <xhao@linux.alibaba.com>,
	peternewman@google.com
Subject: [PATCH v3 18/19] x86/resctrl: Add cpu offline callback for resctrl work
Date: Mon, 20 Mar 2023 17:26:19 +0000	[thread overview]
Message-ID: <20230320172620.18254-19-james.morse@arm.com> (raw)
In-Reply-To: <20230320172620.18254-1-james.morse@arm.com>

The resctrl architecture specific code may need to free a domain when
a CPU goes offline, it also needs to reset the CPUs PQR_ASSOC register.
The resctrl filesystem code needs to move the overflow and limbo work
to run on a different CPU, and clear this CPU from the cpu_mask of
control and monitor groups.

Currently this is all done in core.c and called from
resctrl_offline_cpu(), making the split between architecture and
filesystem code unclear.

Move the filesystem work into a filesystem helper called
resctrl_offline_cpu(), and rename the one in core.c
resctrl_arch_offline_cpu().

The rdtgroup_mutex is unlocked and locked again in the call in
preparation for changing the locking rules for the architecture
code.

resctrl_offline_cpu() is called before any of the resource/domains
are updated, and makes use of the exclude_cpu feature that was
previously added.

Tested-by: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Signed-off-by: James Morse <james.morse@arm.com>
---
 arch/x86/kernel/cpu/resctrl/core.c     | 41 ++++----------------------
 arch/x86/kernel/cpu/resctrl/rdtgroup.c | 39 ++++++++++++++++++++++++
 include/linux/resctrl.h                |  1 +
 3 files changed, 45 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index aafe4b74587c..4e5fc89dab6d 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -578,22 +578,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 
 		return;
 	}
-
-	if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
-		if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
-			cancel_delayed_work(&d->mbm_over);
-			/*
-			 * exclude_cpu=-1 as this CPU has already been removed
-			 * by cpumask_clear_cpu()d
-			 */
-			mbm_setup_overflow_handler(d, 0, RESCTRL_PICK_ANY_CPU);
-		}
-		if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
-		    has_busy_rmid(r, d)) {
-			cancel_delayed_work(&d->cqm_limbo);
-			cqm_setup_limbo_handler(d, 0, RESCTRL_PICK_ANY_CPU);
-		}
-	}
 }
 
 static void clear_closid_rmid(int cpu)
@@ -623,31 +607,15 @@ static int resctrl_arch_online_cpu(unsigned int cpu)
 	return err;
 }
 
-static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
+static int resctrl_arch_offline_cpu(unsigned int cpu)
 {
-	struct rdtgroup *cr;
-
-	list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
-		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
-			break;
-		}
-	}
-}
-
-static int resctrl_offline_cpu(unsigned int cpu)
-{
-	struct rdtgroup *rdtgrp;
 	struct rdt_resource *r;
 
 	mutex_lock(&rdtgroup_mutex);
+	resctrl_offline_cpu(cpu);
+
 	for_each_capable_rdt_resource(r)
 		domain_remove_cpu(cpu, r);
-	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
-		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
-			clear_childcpus(rdtgrp, cpu);
-			break;
-		}
-	}
 	clear_closid_rmid(cpu);
 	mutex_unlock(&rdtgroup_mutex);
 
@@ -970,7 +938,8 @@ static int __init resctrl_late_init(void)
 
 	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 				  "x86/resctrl/cat:online:",
-				  resctrl_arch_online_cpu, resctrl_offline_cpu);
+				  resctrl_arch_online_cpu,
+				  resctrl_arch_offline_cpu);
 	if (state < 0)
 		return state;
 
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index bf206bdb21ee..c27ec56c6c60 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -3710,6 +3710,45 @@ int resctrl_online_cpu(unsigned int cpu)
 	return 0;
 }
 
+static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
+{
+	struct rdtgroup *cr;
+
+	list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
+		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
+			break;
+	}
+}
+
+void resctrl_offline_cpu(unsigned int cpu)
+{
+	struct rdt_domain *d;
+	struct rdtgroup *rdtgrp;
+	struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+
+	lockdep_assert_held(&rdtgroup_mutex);
+
+	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
+			clear_childcpus(rdtgrp, cpu);
+			break;
+		}
+	}
+
+	d = get_domain_from_cpu(cpu, l3);
+	if (d) {
+		if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
+			cancel_delayed_work(&d->mbm_over);
+			mbm_setup_overflow_handler(d, 0, cpu);
+		}
+		if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
+		    has_busy_rmid(l3, d)) {
+			cancel_delayed_work(&d->cqm_limbo);
+			cqm_setup_limbo_handler(d, 0, cpu);
+		}
+	}
+}
+
 /*
  * rdtgroup_init - rdtgroup initialization
  *
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 3ea7d618f33f..f053527aaa5b 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -226,6 +226,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
 int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
 void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
 int resctrl_online_cpu(unsigned int cpu);
+void resctrl_offline_cpu(unsigned int cpu);
 
 /**
  * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
-- 
2.39.2


  parent reply	other threads:[~2023-03-20 17:47 UTC|newest]

Thread overview: 70+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-20 17:26 [PATCH v3 00/19] x86/resctrl: monitored closid+rmid together, separate arch/fs locking James Morse
2023-03-20 17:26 ` [PATCH v3 01/19] x86/resctrl: Track the closid with the rmid James Morse
2023-03-20 17:26 ` [PATCH v3 02/19] x86/resctrl: Access per-rmid structures by index James Morse
2023-03-21 10:57   ` Ilpo Järvinen
2023-03-31 23:19   ` Reinette Chatre
2023-04-24 13:06   ` Peter Newman
2023-05-25 17:32     ` James Morse
2023-03-20 17:26 ` [PATCH v3 03/19] x86/resctrl: Create helper for RMID allocation and mondata dir creation James Morse
2023-03-21 11:05   ` Ilpo Järvinen
2023-03-31 23:20   ` Reinette Chatre
2023-03-20 17:26 ` [PATCH v3 04/19] x86/resctrl: Move rmid allocation out of mkdir_rdt_prepare() James Morse
2023-03-20 17:26 ` [PATCH v3 05/19] x86/resctrl: Allow RMID allocation to be scoped by CLOSID James Morse
2023-03-21 11:29   ` Ilpo Järvinen
2023-03-20 17:26 ` [PATCH v3 06/19] x86/resctrl: Allow the allocator to check if a CLOSID can allocate clean RMID James Morse
2023-03-31 23:21   ` Reinette Chatre
2023-04-27 14:09     ` James Morse
2023-03-20 17:26 ` [PATCH v3 07/19] x86/resctrl: Move CLOSID/RMID matching and setting to use helpers James Morse
2023-03-20 17:26 ` [PATCH v3 08/19] x86/resctrl: Add cpumask_any_housekeeping() for limbo/overflow James Morse
2023-03-21 13:21   ` Ilpo Järvinen
2023-04-27 14:09     ` James Morse
2023-03-21 15:14   ` Ilpo Järvinen
2023-04-27 14:09     ` James Morse
2023-04-27 14:25       ` Ilpo Järvinen
2023-05-25 17:32         ` James Morse
2023-03-31 23:24   ` Reinette Chatre
2023-04-27 14:10     ` James Morse
2023-04-27 23:36       ` Reinette Chatre
2023-05-25 17:32         ` James Morse
2023-03-20 17:26 ` [PATCH v3 09/19] x86/resctrl: Queue mon_event_read() instead of sending an IPI James Morse
2023-03-22 14:07   ` Peter Newman
2023-03-23  9:09     ` Peter Newman
2023-04-27 14:12       ` James Morse
2023-04-27 14:11     ` James Morse
2023-03-31 23:25   ` Reinette Chatre
2023-04-27 14:12     ` James Morse
2023-03-20 17:26 ` [PATCH v3 10/19] x86/resctrl: Allow resctrl_arch_rmid_read() to sleep James Morse
2023-03-31 23:26   ` Reinette Chatre
2023-04-27 14:12     ` James Morse
2023-03-20 17:26 ` [PATCH v3 11/19] x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read() James Morse
2023-03-31 23:27   ` Reinette Chatre
2023-04-27 14:19     ` James Morse
2023-04-27 23:40       ` Reinette Chatre
2023-05-25 17:31         ` James Morse
2023-03-20 17:26 ` [PATCH v3 12/19] x86/resctrl: Make resctrl_mounted checks explicit James Morse
2023-03-31 23:28   ` Reinette Chatre
2023-04-27 14:19     ` James Morse
2023-04-27 23:37       ` Reinette Chatre
2023-05-25 17:31         ` James Morse
2023-03-20 17:26 ` [PATCH v3 13/19] x86/resctrl: Move alloc/mon static keys into helpers James Morse
2023-03-20 17:26 ` [PATCH v3 14/19] x86/resctrl: Make rdt_enable_key the arch's decision to switch James Morse
2023-03-20 17:26 ` [PATCH v3 15/19] x86/resctrl: Add helpers for system wide mon/alloc capable James Morse
2023-03-31 23:29   ` Reinette Chatre
2023-04-27 14:19     ` James Morse
2023-03-20 17:26 ` [PATCH v3 16/19] x86/resctrl: Add cpu online callback for resctrl work James Morse
2023-03-31 23:29   ` Reinette Chatre
2023-03-20 17:26 ` [PATCH v3 17/19] x86/resctrl: Allow overflow/limbo handlers to be scheduled on any-but cpu James Morse
2023-03-21 15:12   ` Ilpo Järvinen
2023-03-21 15:25     ` Ilpo Järvinen
2023-04-27 14:20       ` James Morse
2023-03-20 17:26 ` James Morse [this message]
2023-03-21 15:32   ` [PATCH v3 18/19] x86/resctrl: Add cpu offline callback for resctrl work Ilpo Järvinen
2023-04-27 14:20     ` James Morse
2023-04-27 14:51       ` Ilpo Järvinen
2023-04-05 23:48   ` Reinette Chatre
2023-04-27 14:20     ` James Morse
2023-03-20 17:26 ` [PATCH v3 19/19] x86/resctrl: Separate arch and fs resctrl locks James Morse
2023-05-23 17:14 ` [PATCH v3 00/19] x86/resctrl: monitored closid+rmid together, separate arch/fs locking Tony Luck
2023-05-25 17:31   ` James Morse
2023-05-25 21:00     ` Tony Luck
2023-05-28 20:52       ` Drew Fustini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230320172620.18254-19-james.morse@arm.com \
    --to=james.morse@arm.com \
    --cc=Babu.Moger@amd.com \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=bobo.shaobowang@huawei.com \
    --cc=bp@alien8.de \
    --cc=carl@os.amperecomputing.com \
    --cc=fenghua.yu@intel.com \
    --cc=hpa@zytor.com \
    --cc=lcherian@marvell.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peternewman@google.com \
    --cc=quic_jiles@quicinc.com \
    --cc=reinette.chatre@intel.com \
    --cc=scott@os.amperecomputing.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=tan.shaopeng@fujitsu.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    --cc=xhao@linux.alibaba.com \
    --cc=xingxin.hx@openanolis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).