From: "Moger, Babu" <Babu.Moger@amd.com>
To: "tglx@linutronix.de" <tglx@linutronix.de>,
"mingo@redhat.com" <mingo@redhat.com>,
"bp@alien8.de" <bp@alien8.de>, "corbet@lwn.net" <corbet@lwn.net>,
"fenghua.yu@intel.com" <fenghua.yu@intel.com>,
"reinette.chatre@intel.com" <reinette.chatre@intel.com>,
"peterz@infradead.org" <peterz@infradead.org>,
"gregkh@linuxfoundation.org" <gregkh@linuxfoundation.org>,
"davem@davemloft.net" <davem@davemloft.net>,
"akpm@linux-foundation.org" <akpm@linux-foundation.org>
Cc: "hpa@zytor.com" <hpa@zytor.com>,
"x86@kernel.org" <x86@kernel.org>,
"mchehab+samsung@kernel.org" <mchehab+samsung@kernel.org>,
"arnd@arndb.de" <arnd@arndb.de>,
"kstewart@linuxfoundation.org" <kstewart@linuxfoundation.org>,
"pombredanne@nexb.com" <pombredanne@nexb.com>,
"rafael@kernel.org" <rafael@kernel.org>,
"kirill.shutemov@linux.intel.com"
<kirill.shutemov@linux.intel.com>,
"tony.luck@intel.com" <tony.luck@intel.com>,
"qianyue.zj@alibaba-inc.com" <qianyue.zj@alibaba-inc.com>,
"xiaochen.shen@intel.com" <xiaochen.shen@intel.com>,
"pbonzini@redhat.com" <pbonzini@redhat.com>,
"Singh, Brijesh" <brijesh.singh@amd.com>,
"Hurwitz, Sherry" <sherry.hurwitz@amd.com>,
"dwmw2@infradead.org" <dwmw2@infradead.org>,
"Lendacky, Thomas" <Thomas.Lendacky@amd.com>,
"luto@kernel.org" <luto@kernel.org>,
"joro@8bytes.org" <joro@8bytes.org>,
"jannh@google.com" <jannh@google.com>,
"vkuznets@redhat.com" <vkuznets@redhat.com>,
"rian@alum.mit.edu" <rian@alum.mit.edu>,
"jpoimboe@redhat.com" <jpoimboe@redhat.com>,
"Moger, Babu" <Babu.Moger@amd.com>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
"linux-doc@vger.kernel.org" <linux-doc@vger.kernel.org>
Subject: [PATCH v7 11/13] arch/x86: Introduce QOS feature for AMD
Date: Fri, 9 Nov 2018 20:52:45 +0000 [thread overview]
Message-ID: <20181109205153.14811-12-babu.moger@amd.com> (raw)
In-Reply-To: <20181109205153.14811-1-babu.moger@amd.com>
Enables QOS feature on AMD.
Following QoS sub-features are supported in AMD if the underlying
hardware supports it.
- L3 Cache allocation enforcement
- L3 Cache occupancy monitoring
- L3 Code-Data Prioritization support
- Memory Bandwidth Enforcement(Allocation)
The specification for this feature is available at
https://developer.amd.com/wp-content/resources/56375.pdf
There are differences in the way some of the features are implemented.
Separate those functions and add those as vendor specific functions.
The major difference is in MBA feature.
- AMD uses CPUID leaf 0x80000020 to initialize the MBA features.
- AMD uses direct bandwidth value instead of delay based on bandwidth
values.
- MSR register base addresses are different for MBA.
- Also AMD allows non-contiguous L3 cache bit masks.
Adds following functions to take care of the differences.
rdt_get_mem_config_amd : MBA initialization function
parse_bw_amd : Bandwidth parsing
mba_wrmsr_amd: Writes bandwidth value
cbm_validate_amd : L3 cache bitmask validation
Signed-off-by: Babu Moger <babu.moger@amd.com>
---
arch/x86/kernel/cpu/resctrl.c | 69 +++++++++++++++++++++-
arch/x86/kernel/cpu/resctrl.h | 5 ++
arch/x86/kernel/cpu/resctrl_ctrlmondata.c | 70 +++++++++++++++++++++++
3 files changed, 142 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl.c b/arch/x86/kernel/cpu/resctrl.c
index 7e4bea3bba98..a0a08f8a87b5 100644
--- a/arch/x86/kernel/cpu/resctrl.c
+++ b/arch/x86/kernel/cpu/resctrl.c
@@ -61,6 +61,9 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
struct rdt_resource *r);
static void
cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
+static void
+mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
+ struct rdt_resource *r);
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
@@ -280,6 +283,31 @@ static bool rdt_get_mem_config(struct rdt_resource *r)
return true;
}
+static bool rdt_get_mem_config_amd(struct rdt_resource *r)
+{
+ union cpuid_0x10_3_eax eax;
+ union cpuid_0x10_x_edx edx;
+ u32 ebx, ecx;
+
+ cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
+ r->num_closid = edx.split.cos_max + 1;
+ r->default_ctrl = MAX_MBA_BW_AMD;
+
+ /* AMD does not use delay. Set delay_linear to false by default */
+ r->membw.delay_linear = false;
+
+ /* FIX ME - May need to be read from MSR */
+ r->membw.min_bw = 0;
+ r->membw.bw_gran = 1;
+ /* Max value is 2048, Data width should be 4 in decimal */
+ r->data_width = 4;
+
+ r->alloc_capable = true;
+ r->alloc_enabled = true;
+
+ return true;
+}
+
static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{
union cpuid_0x10_1_eax eax;
@@ -339,6 +367,16 @@ static int get_cache_id(int cpu, int level)
return -1;
}
+static void
+mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+{
+ unsigned int i;
+
+ /* Write the bw values for mba. */
+ for (i = m->low; i < m->high; i++)
+ wrmsrl(r->msr_base + i, d->ctrl_val[i]);
+}
+
/*
* Map the memory b/w percentage value to delay values
* that can be written to QOS_MSRs.
@@ -793,8 +831,13 @@ static bool __init rdt_cpu_has(int flag)
static __init bool rdt_mba_config(void)
{
if (rdt_cpu_has(X86_FEATURE_MBA)) {
- if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
- return true;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
+ return true;
+ } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]))
+ return true;
+ }
}
return false;
@@ -894,10 +937,32 @@ static __init void rdt_init_res_defs_intel(void)
}
}
+static __init void rdt_init_res_defs_amd(void)
+{
+ struct rdt_resource *r;
+
+ for_each_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_L3 ||
+ r->rid == RDT_RESOURCE_L3DATA ||
+ r->rid == RDT_RESOURCE_L3CODE ||
+ r->rid == RDT_RESOURCE_L2 ||
+ r->rid == RDT_RESOURCE_L2DATA ||
+ r->rid == RDT_RESOURCE_L2CODE)
+ r->cbm_validate = cbm_validate_amd;
+ else if (r->rid == RDT_RESOURCE_MBA) {
+ r->msr_base = IA32_MBA_BW_BASE;
+ r->msr_update = mba_wrmsr_amd;
+ r->parse_ctrlval = parse_bw_amd;
+ }
+ }
+}
+
static __init void rdt_init_res_defs(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
rdt_init_res_defs_intel();
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ rdt_init_res_defs_amd();
}
static enum cpuhp_state rdt_online;
diff --git a/arch/x86/kernel/cpu/resctrl.h b/arch/x86/kernel/cpu/resctrl.h
index 102bcffbefd7..54ba21b7de2c 100644
--- a/arch/x86/kernel/cpu/resctrl.h
+++ b/arch/x86/kernel/cpu/resctrl.h
@@ -11,6 +11,7 @@
#define IA32_L3_CBM_BASE 0xc90
#define IA32_L2_CBM_BASE 0xd10
#define IA32_MBA_THRTL_BASE 0xd50
+#define IA32_MBA_BW_BASE 0xc0000200
#define IA32_QM_CTR 0x0c8e
#define IA32_QM_EVTSEL 0x0c8d
@@ -34,6 +35,7 @@
#define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
+#define MAX_MBA_BW_AMD 0x800
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
@@ -448,6 +450,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
+int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -579,5 +583,6 @@ void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
+bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
#endif /* _ASM_X86_RESCTRL_H */
diff --git a/arch/x86/kernel/cpu/resctrl_ctrlmondata.c b/arch/x86/kernel/cpu/resctrl_ctrlmondata.c
index 71aa1d971430..b6ceb4db9322 100644
--- a/arch/x86/kernel/cpu/resctrl_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl_ctrlmondata.c
@@ -28,6 +28,52 @@
#include <linux/slab.h>
#include "resctrl.h"
+/*
+ * Check whether MBA bandwidth percentage value is correct. The value is
+ * checked against the minimum and max bandwidth values specified by the
+ * hardware. The allocated bandwidth percentage is rounded to the next
+ * control step available on the hardware.
+ */
+static bool bw_validate_amd(char *buf, unsigned long *data,
+ struct rdt_resource *r)
+{
+ unsigned long bw;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &bw);
+ if (ret) {
+ rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
+ return false;
+ }
+
+ if (bw < r->membw.min_bw || bw > r->default_ctrl) {
+ rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
+ r->membw.min_bw, r->default_ctrl);
+ return false;
+ }
+
+ *data = roundup(bw, (unsigned long)r->membw.bw_gran);
+ return true;
+}
+
+int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
+{
+ unsigned long bw_val;
+
+ if (d->have_new_ctrl) {
+ rdt_last_cmd_printf("duplicate domain %d\n", d->id);
+ return -EINVAL;
+ }
+
+ if (!bw_validate_amd(data->buf, &bw_val, r))
+ return -EINVAL;
+ d->new_ctrl = bw_val;
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
/*
* Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and max bandwidth values specified by the
@@ -123,6 +169,30 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
+/*
+ * Check whether a cache bit mask is valid. AMD allows non-contiguous
+ * bitmasks
+ */
+bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret) {
+ rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
+ return false;
+ }
+
+ if (val > r->default_ctrl) {
+ rdt_last_cmd_puts("mask out of range\n");
+ return false;
+ }
+
+ *data = val;
+ return true;
+}
+
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
--
2.17.1
next prev parent reply other threads:[~2018-11-09 20:52 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-09 20:52 [PATCH v7 00/13] arch/x86: AMD QoS support Moger, Babu
2018-11-09 20:52 ` [PATCH v7 01/13] arch/x86: Start renaming the rdt files to more generic names Moger, Babu
2018-11-12 17:16 ` Borislav Petkov
2018-11-12 19:16 ` Moger, Babu
2018-11-13 21:35 ` Yu, Fenghua
2018-11-13 21:40 ` Borislav Petkov
2018-11-09 20:52 ` [PATCH v7 02/13] arch/x86: Rename the RDT functions and definitions Moger, Babu
2018-11-12 17:56 ` Borislav Petkov
2018-11-12 19:25 ` Moger, Babu
2018-11-12 19:46 ` Borislav Petkov
2018-11-12 20:51 ` Moger, Babu
2018-11-09 20:52 ` [PATCH v7 03/13] arch/x86: Re-arrange RDT init code Moger, Babu
2018-11-14 19:05 ` Borislav Petkov
2018-11-14 20:07 ` Moger, Babu
2018-11-09 20:52 ` [PATCH v7 04/13] arch/x86: Bring all the macros to resctrl.h Moger, Babu
2018-11-09 20:52 ` [PATCH v7 05/13] arch/x86: Rename config parameter INTEL_RDT to RESCTRL Moger, Babu
2018-11-09 20:52 ` [PATCH v7 06/13] arch/x86: Initialize the resource functions that are different Moger, Babu
2018-11-09 20:52 ` [PATCH v7 07/13] arch/x86: Bring cbm_validate function into the resource structure Moger, Babu
2018-11-09 20:52 ` [PATCH v7 08/13] arch/x86: Add vendor check for MBA software controller Moger, Babu
2018-11-09 20:52 ` [PATCH v7 09/13] arch/x86: Update the RESCTRL config parameter Moger, Babu
2018-11-09 20:52 ` [PATCH v7 10/13] arch/x86: Add AMD feature bit X86_FEATURE_MBA in cpuid bits array Moger, Babu
2018-11-09 20:52 ` Moger, Babu [this message]
2018-11-13 21:49 ` [PATCH v7 11/13] arch/x86: Introduce QOS feature for AMD Yu, Fenghua
2018-11-14 16:38 ` Moger, Babu
2018-11-14 18:17 ` Moger, Babu
2018-11-14 19:31 ` Yu, Fenghua
2018-11-09 20:52 ` [PATCH v7 12/13] Documentation/x86: Rename and update intel_rdt_ui.txt Moger, Babu
2018-11-09 20:52 ` [PATCH v7 13/13] MAINTAINERS: Update the file and documentation names in arch/x86 Moger, Babu
2018-11-19 23:58 ` [PATCH v7 00/13] arch/x86: AMD QoS support Pavel Machek
2018-11-20 2:20 ` Yu, Fenghua
2018-11-20 9:13 ` Pavel Machek
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181109205153.14811-12-babu.moger@amd.com \
--to=babu.moger@amd.com \
--cc=Thomas.Lendacky@amd.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=bp@alien8.de \
--cc=brijesh.singh@amd.com \
--cc=corbet@lwn.net \
--cc=davem@davemloft.net \
--cc=dwmw2@infradead.org \
--cc=fenghua.yu@intel.com \
--cc=gregkh@linuxfoundation.org \
--cc=hpa@zytor.com \
--cc=jannh@google.com \
--cc=joro@8bytes.org \
--cc=jpoimboe@redhat.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kstewart@linuxfoundation.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=luto@kernel.org \
--cc=mchehab+samsung@kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=pombredanne@nexb.com \
--cc=qianyue.zj@alibaba-inc.com \
--cc=rafael@kernel.org \
--cc=reinette.chatre@intel.com \
--cc=rian@alum.mit.edu \
--cc=sherry.hurwitz@amd.com \
--cc=tglx@linutronix.de \
--cc=tony.luck@intel.com \
--cc=vkuznets@redhat.com \
--cc=x86@kernel.org \
--cc=xiaochen.shen@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).