linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Moger, Babu" <Babu.Moger@amd.com>
To: "tglx@linutronix.de" <tglx@linutronix.de>,
	"mingo@redhat.com" <mingo@redhat.com>,
	"hpa@zytor.com" <hpa@zytor.com>,
	"fenghua.yu@intel.com" <fenghua.yu@intel.com>,
	"reinette.chatre@intel.com" <reinette.chatre@intel.com>,
	"vikas.shivappa@linux.intel.com" <vikas.shivappa@linux.intel.com>,
	"tony.luck@intel.com" <tony.luck@intel.com>
Cc: "x86@kernel.org" <x86@kernel.org>,
	"peterz@infradead.org" <peterz@infradead.org>,
	"Moger, Babu" <Babu.Moger@amd.com>,
	"pombredanne@nexb.com" <pombredanne@nexb.com>,
	"gregkh@linuxfoundation.org" <gregkh@linuxfoundation.org>,
	"kstewart@linuxfoundation.org" <kstewart@linuxfoundation.org>,
	"bp@suse.de" <bp@suse.de>,
	"rafael.j.wysocki@intel.com" <rafael.j.wysocki@intel.com>,
	"ak@linux.intel.com" <ak@linux.intel.com>,
	"kirill.shutemov@linux.intel.com"
	<kirill.shutemov@linux.intel.com>,
	"xiaochen.shen@intel.com" <xiaochen.shen@intel.com>,
	"colin.king@canonical.com" <colin.king@canonical.com>,
	"Hurwitz, Sherry" <sherry.hurwitz@amd.com>,
	"Lendacky, Thomas" <Thomas.Lendacky@amd.com>,
	"pbonzini@redhat.com" <pbonzini@redhat.com>,
	"dwmw@amazon.co.uk" <dwmw@amazon.co.uk>,
	"luto@kernel.org" <luto@kernel.org>,
	"jroedel@suse.de" <jroedel@suse.de>,
	"jannh@google.com" <jannh@google.com>,
	"dima@arista.com" <dima@arista.com>,
	"jpoimboe@redhat.com" <jpoimboe@redhat.com>,
	"vkuznets@redhat.com" <vkuznets@redhat.com>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: [RFC PATCH 02/10] arch/x86: Rename the RDT functions and definitions
Date: Mon, 24 Sep 2018 19:18:58 +0000	[thread overview]
Message-ID: <20180924191841.29111-3-babu.moger@amd.com> (raw)
In-Reply-To: <20180924191841.29111-1-babu.moger@amd.com>

As AMD is starting to support RDT(or QOS) features, rename
the RDT functions and definitions to more generic names.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
 arch/x86/include/asm/rdt_sched.h   | 22 +++++++++++-----------
 arch/x86/kernel/cpu/rdt.c          | 24 ++++++++++++------------
 arch/x86/kernel/cpu/rdt.h          |  8 ++++----
 arch/x86/kernel/cpu/rdt_monitor.c  | 10 +++++-----
 arch/x86/kernel/cpu/rdt_rdtgroup.c | 10 +++++-----
 arch/x86/kernel/process_32.c       |  2 +-
 arch/x86/kernel/process_64.c       |  2 +-
 7 files changed, 39 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/rdt_sched.h b/arch/x86/include/asm/rdt_sched.h
index 9acb06b6f81e..666bf9acb41d 100644
--- a/arch/x86/include/asm/rdt_sched.h
+++ b/arch/x86/include/asm/rdt_sched.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_RDT_SCHED_H
-#define _ASM_X86_INTEL_RDT_SCHED_H
+#ifndef _ASM_X86_RDT_SCHED_H
+#define _ASM_X86_RDT_SCHED_H
 
 #ifdef CONFIG_INTEL_RDT
 
@@ -24,21 +24,21 @@
  * The cache also helps to avoid pointless updates if the value does
  * not change.
  */
-struct intel_pqr_state {
+struct rdt_pqr_state {
 	u32			cur_rmid;
 	u32			cur_closid;
 	u32			default_rmid;
 	u32			default_closid;
 };
 
-DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+DECLARE_PER_CPU(struct rdt_pqr_state, pqr_state);
 
 DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
 DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
 
 /*
- * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
+ * __rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
  *
  * Following considerations are made so that this has minimal impact
  * on scheduler hot path:
@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
  *   simple as possible.
  * Must be called with preemption disabled.
  */
-static void __intel_rdt_sched_in(void)
+static void __rdt_sched_in(void)
 {
-	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+	struct rdt_pqr_state *state = this_cpu_ptr(&pqr_state);
 	u32 closid = state->default_closid;
 	u32 rmid = state->default_rmid;
 
@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
 	}
 }
 
-static inline void intel_rdt_sched_in(void)
+static inline void rdt_sched_in(void)
 {
 	if (static_branch_likely(&rdt_enable_key))
-		__intel_rdt_sched_in();
+		__rdt_sched_in();
 }
 
 #else
 
-static inline void intel_rdt_sched_in(void) {}
+static inline void rdt_sched_in(void) {}
 
 #endif /* CONFIG_INTEL_RDT */
 
-#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
+#endif /* _ASM_X86_RDT_SCHED_H */
diff --git a/arch/x86/kernel/cpu/rdt.c b/arch/x86/kernel/cpu/rdt.c
index 28d6cd254ba9..b361c63170d7 100644
--- a/arch/x86/kernel/cpu/rdt.c
+++ b/arch/x86/kernel/cpu/rdt.c
@@ -40,12 +40,12 @@
 DEFINE_MUTEX(rdtgroup_mutex);
 
 /*
- * The cached intel_pqr_state is strictly per CPU and can never be
+ * The cached rdt_pqr_state is strictly per CPU and can never be
  * updated from a remote CPU. Functions which modify the state
  * are called with interrupts disabled and no preemption, which
  * is sufficient for the protection.
  */
-DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
+DEFINE_PER_CPU(struct rdt_pqr_state, pqr_state);
 
 /*
  * Used to store the max resource name width and max resource data width
@@ -634,7 +634,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 
 static void clear_closid_rmid(int cpu)
 {
-	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+	struct rdt_pqr_state *state = this_cpu_ptr(&pqr_state);
 
 	state->default_closid = 0;
 	state->default_rmid = 0;
@@ -643,7 +643,7 @@ static void clear_closid_rmid(int cpu)
 	wrmsr(IA32_PQR_ASSOC, 0, 0);
 }
 
-static int intel_rdt_online_cpu(unsigned int cpu)
+static int rdt_online_cpu(unsigned int cpu)
 {
 	struct rdt_resource *r;
 
@@ -669,7 +669,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
 	}
 }
 
-static int intel_rdt_offline_cpu(unsigned int cpu)
+static int rdt_offline_cpu(unsigned int cpu)
 {
 	struct rdtgroup *rdtgrp;
 	struct rdt_resource *r;
@@ -861,7 +861,7 @@ static __init bool get_rdt_resources(void)
 
 static enum cpuhp_state rdt_online;
 
-static int __init intel_rdt_late_init(void)
+static int __init rdt_late_init(void)
 {
 	struct rdt_resource *r;
 	int state, ret;
@@ -873,7 +873,7 @@ static int __init intel_rdt_late_init(void)
 
 	state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 				  "x86/rdt/cat:online:",
-				  intel_rdt_online_cpu, intel_rdt_offline_cpu);
+				  rdt_online_cpu, rdt_offline_cpu);
 	if (state < 0)
 		return state;
 
@@ -885,20 +885,20 @@ static int __init intel_rdt_late_init(void)
 	rdt_online = state;
 
 	for_each_alloc_capable_rdt_resource(r)
-		pr_info("Intel RDT %s allocation detected\n", r->name);
+		pr_info("RDT %s allocation detected\n", r->name);
 
 	for_each_mon_capable_rdt_resource(r)
-		pr_info("Intel RDT %s monitoring detected\n", r->name);
+		pr_info("RDT %s monitoring detected\n", r->name);
 
 	return 0;
 }
 
-late_initcall(intel_rdt_late_init);
+late_initcall(rdt_late_init);
 
-static void __exit intel_rdt_exit(void)
+static void __exit rdt_exit(void)
 {
 	cpuhp_remove_state(rdt_online);
 	rdtgroup_exit();
 }
 
-__exitcall(intel_rdt_exit);
+__exitcall(rdt_exit);
diff --git a/arch/x86/kernel/cpu/rdt.h b/arch/x86/kernel/cpu/rdt.h
index 4e588f36228f..c15417a6b1af 100644
--- a/arch/x86/kernel/cpu/rdt.h
+++ b/arch/x86/kernel/cpu/rdt.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_RDT_H
-#define _ASM_X86_INTEL_RDT_H
+#ifndef _ASM_X86_RDT_H
+#define _ASM_X86_RDT_H
 
 #include <linux/sched.h>
 #include <linux/kernfs.h>
@@ -69,7 +69,7 @@ struct rmid_read {
 	u64			val;
 };
 
-extern unsigned int intel_cqm_threshold;
+extern unsigned int rdt_cqm_threshold;
 extern bool rdt_alloc_capable;
 extern bool rdt_mon_capable;
 extern unsigned int rdt_mon_features;
@@ -559,4 +559,4 @@ void cqm_handle_limbo(struct work_struct *work);
 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
 void __check_limbo(struct rdt_domain *d, bool force_free);
 
-#endif /* _ASM_X86_INTEL_RDT_H */
+#endif /* _ASM_X86_RDT_H */
diff --git a/arch/x86/kernel/cpu/rdt_monitor.c b/arch/x86/kernel/cpu/rdt_monitor.c
index 2898a61cbdd9..577514cd4a71 100644
--- a/arch/x86/kernel/cpu/rdt_monitor.c
+++ b/arch/x86/kernel/cpu/rdt_monitor.c
@@ -73,7 +73,7 @@ unsigned int rdt_mon_features;
  * This is the threshold cache occupancy at which we will consider an
  * RMID available for re-allocation.
  */
-unsigned int intel_cqm_threshold;
+unsigned int rdt_cqm_threshold;
 
 static inline struct rmid_entry *__rmid_entry(u32 rmid)
 {
@@ -107,7 +107,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
 {
 	u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
 
-	return val >= intel_cqm_threshold;
+	return val >= rdt_cqm_threshold;
 }
 
 /*
@@ -187,7 +187,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
 	list_for_each_entry(d, &r->domains, list) {
 		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
 			val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
-			if (val <= intel_cqm_threshold)
+			if (val <= rdt_cqm_threshold)
 				continue;
 		}
 
@@ -637,10 +637,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
 	 *
 	 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
 	 */
-	intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
+	rdt_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
 
 	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
-	intel_cqm_threshold /= r->mon_scale;
+	rdt_cqm_threshold /= r->mon_scale;
 
 	ret = dom_data_init(r);
 	if (ret)
diff --git a/arch/x86/kernel/cpu/rdt_rdtgroup.c b/arch/x86/kernel/cpu/rdt_rdtgroup.c
index c4a226ebf4e9..159b56980715 100644
--- a/arch/x86/kernel/cpu/rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/rdt_rdtgroup.c
@@ -281,7 +281,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
 }
 
 /*
- * This is safe against intel_rdt_sched_in() called from __switch_to()
+ * This is safe against rdt_sched_in() called from __switch_to()
  * because __switch_to() is executed with interrupts disabled. A local call
  * from update_closid_rmid() is proteced against __switch_to() because
  * preemption is disabled.
@@ -300,7 +300,7 @@ static void update_cpu_closid_rmid(void *info)
 	 * executing task might have its own closid selected. Just reuse
 	 * the context switch code.
 	 */
-	intel_rdt_sched_in();
+	rdt_sched_in();
 }
 
 /*
@@ -525,7 +525,7 @@ static void move_myself(struct callback_head *head)
 
 	preempt_disable();
 	/* update PQR_ASSOC MSR to make resource group go into effect */
-	intel_rdt_sched_in();
+	rdt_sched_in();
 	preempt_enable();
 
 	kfree(callback);
@@ -909,7 +909,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
 {
 	struct rdt_resource *r = of->kn->parent->priv;
 
-	seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
+	seq_printf(seq, "%u\n", rdt_cqm_threshold * r->mon_scale);
 
 	return 0;
 }
@@ -928,7 +928,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
 	if (bytes > (boot_cpu_data.x86_cache_size * 1024))
 		return -EINVAL;
 
-	intel_cqm_threshold = bytes / r->mon_scale;
+	rdt_cqm_threshold = bytes / r->mon_scale;
 
 	return nbytes;
 }
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 931b2d0cb95e..d9e7e5668fe1 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	this_cpu_write(current_task, next_p);
 
 	/* Load the Intel cache allocation PQR MSR. */
-	intel_rdt_sched_in();
+	rdt_sched_in();
 
 	return prev_p;
 }
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index c029782a9216..3b38d37b7742 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -536,7 +536,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	}
 
 	/* Load the Intel cache allocation PQR MSR. */
-	intel_rdt_sched_in();
+	rdt_sched_in();
 
 	return prev_p;
 }
-- 
2.17.1


  parent reply	other threads:[~2018-09-24 19:19 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-24 19:18 [RFC PATCH 00/10] arch/x86: AMD QoS support Moger, Babu
2018-09-24 19:18 ` [RFC PATCH 01/10] arch/x86: Start renaming the rdt files to more generic names Moger, Babu
2018-09-24 19:18 ` Moger, Babu [this message]
2018-09-24 19:19 ` [RFC PATCH 03/10] arch/x86: Re-arrange RDT init code Moger, Babu
2018-10-02 19:21   ` Reinette Chatre
2018-10-02 23:41     ` Moger, Babu
2018-10-03 18:54       ` Reinette Chatre
2018-10-03 20:12         ` Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 04/10] arch/x86: Introduce a new config parameter PLATFORM_QOS Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 05/10] arch/x86: Use new config parameter PLATFORM_QOS for compilation Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 06/10] arch/x86: Initialize the resource functions that are different Moger, Babu
2018-10-02 22:06   ` Reinette Chatre
2018-10-03 15:25     ` Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 07/10] arch/x86: Bring few more functions into the resource structure Moger, Babu
2018-10-02 22:07   ` Reinette Chatre
2018-10-03 15:32     ` Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 08/10] arch/x86: Introduce new config parameter AMD_QOS Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 09/10] arch/x86: Add AMD feature bit X86_FEATURE_MBA in cpuid bits array Moger, Babu
2018-09-24 19:19 ` [RFC PATCH 10/10] arch/x86: Introduce QOS feature for AMD Moger, Babu
2018-10-02 18:27   ` Fenghua Yu
2018-10-03 15:56     ` Moger, Babu
2018-10-02 22:13   ` Reinette Chatre
2018-10-03 17:21     ` Moger, Babu
2018-10-05 16:20   ` James Morse
2018-10-05 17:18     ` Moger, Babu
2018-09-27 20:14 ` [RFC PATCH 00/10] arch/x86: AMD QoS support Thomas Gleixner
2018-09-28  1:57   ` Moger, Babu
2018-10-05 16:18     ` James Morse
2018-10-05 17:03       ` Moger, Babu
2018-10-02 17:06 ` Fenghua Yu
2018-10-02 17:44   ` Moger, Babu
2018-10-02 18:46     ` Fenghua Yu
2018-10-02 19:16       ` Moger, Babu
2018-10-03 18:52         ` Fenghua Yu
2018-10-03 19:48           ` Thomas Gleixner
2018-10-03 20:09           ` Moger, Babu
2018-10-05 16:19             ` James Morse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180924191841.29111-3-babu.moger@amd.com \
    --to=babu.moger@amd.com \
    --cc=Thomas.Lendacky@amd.com \
    --cc=ak@linux.intel.com \
    --cc=bp@suse.de \
    --cc=colin.king@canonical.com \
    --cc=dima@arista.com \
    --cc=dwmw@amazon.co.uk \
    --cc=fenghua.yu@intel.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hpa@zytor.com \
    --cc=jannh@google.com \
    --cc=jpoimboe@redhat.com \
    --cc=jroedel@suse.de \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kstewart@linuxfoundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pombredanne@nexb.com \
    --cc=rafael.j.wysocki@intel.com \
    --cc=reinette.chatre@intel.com \
    --cc=sherry.hurwitz@amd.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@linux.intel.com \
    --cc=vkuznets@redhat.com \
    --cc=x86@kernel.org \
    --cc=xiaochen.shen@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).