From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752487AbdFZS6H (ORCPT ); Mon, 26 Jun 2017 14:58:07 -0400 Received: from mga09.intel.com ([134.134.136.24]:62803 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751335AbdFZSzU (ORCPT ); Mon, 26 Jun 2017 14:55:20 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.39,397,1493708400"; d="scan'208";a="985411479" From: Vikas Shivappa To: x86@kernel.org, linux-kernel@vger.kernel.org, tglx@linutronix.de Cc: hpa@zytor.com, peterz@infradead.org, ravi.v.shankar@intel.com, vikas.shivappa@intel.com, tony.luck@intel.com, fenghua.yu@intel.com, andi.kleen@intel.com Subject: [PATCH 06/21] x86/intel_rdt: Cleanup namespace to support RDT monitoring Date: Mon, 26 Jun 2017 11:55:53 -0700 Message-Id: <1498503368-20173-7-git-send-email-vikas.shivappa@linux.intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1498503368-20173-1-git-send-email-vikas.shivappa@linux.intel.com> References: <1498503368-20173-1-git-send-email-vikas.shivappa@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Few of the data-structures have generic names although they are RDT allocation specific. Rename them to be allocation specific to accommodate RDT monitoring. E.g. s/enabled/alloc_enabled/ No functional change. Signed-off-by: Vikas Shivappa --- arch/x86/include/asm/intel_rdt_sched.h | 4 ++-- arch/x86/kernel/cpu/intel_rdt.c | 24 +++++++++++------------ arch/x86/kernel/cpu/intel_rdt.h | 18 ++++++++--------- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 33 ++++++++++++++++---------------- arch/x86/kernel/cpu/intel_rdt_schemata.c | 8 ++++---- 5 files changed, 44 insertions(+), 43 deletions(-) diff --git a/arch/x86/include/asm/intel_rdt_sched.h b/arch/x86/include/asm/intel_rdt_sched.h index 62a70bc..4dee77b 100644 --- a/arch/x86/include/asm/intel_rdt_sched.h +++ b/arch/x86/include/asm/intel_rdt_sched.h @@ -27,7 +27,7 @@ struct intel_pqr_state { DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid); -DECLARE_STATIC_KEY_FALSE(rdt_enable_key); +DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); /* * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR @@ -44,7 +44,7 @@ struct intel_pqr_state { */ static inline void intel_rdt_sched_in(void) { - if (static_branch_likely(&rdt_enable_key)) { + if (static_branch_likely(&rdt_alloc_enable_key)) { struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); int closid; diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 08872e9..59500f9 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -173,8 +173,8 @@ static inline bool cache_alloc_hsw_probe(void) r->default_ctrl = max_cbm; r->cache.cbm_len = 20; r->cache.min_cbm_bits = 2; - r->capable = true; - r->enabled = true; + r->alloc_capable = true; + r->alloc_enabled = true; return true; } @@ -224,8 +224,8 @@ static bool rdt_get_mem_config(struct rdt_resource *r) r->data_width = 3; rdt_get_mba_infofile(r); - r->capable = true; - r->enabled = true; + r->alloc_capable = true; + r->alloc_enabled = true; return true; } @@ -242,8 +242,8 @@ static void rdt_get_cache_config(int idx, struct rdt_resource *r) r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->data_width = (r->cache.cbm_len + 3) / 4; rdt_get_cache_infofile(r); - r->capable = true; - r->enabled = true; + r->alloc_capable = true; + r->alloc_enabled = true; } static void rdt_get_cdp_l3_config(int type) @@ -255,12 +255,12 @@ static void rdt_get_cdp_l3_config(int type) r->cache.cbm_len = r_l3->cache.cbm_len; r->default_ctrl = r_l3->default_ctrl; r->data_width = (r->cache.cbm_len + 3) / 4; - r->capable = true; + r->alloc_capable = true; /* * By default, CDP is disabled. CDP can be enabled by mount parameter * "cdp" during resctrl file system mount time. */ - r->enabled = false; + r->alloc_enabled = false; } static int get_cache_id(int cpu, int level) @@ -464,7 +464,7 @@ static int intel_rdt_online_cpu(unsigned int cpu) struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); - for_each_capable_rdt_resource(r) + for_each_alloc_capable_rdt_resource(r) domain_add_cpu(cpu, r); /* The cpu is set in default rdtgroup after online. */ cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); @@ -480,7 +480,7 @@ static int intel_rdt_offline_cpu(unsigned int cpu) struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); - for_each_capable_rdt_resource(r) + for_each_alloc_capable_rdt_resource(r) domain_remove_cpu(cpu, r); list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) @@ -501,7 +501,7 @@ static __init void rdt_init_padding(void) struct rdt_resource *r; int cl; - for_each_capable_rdt_resource(r) { + for_each_alloc_capable_rdt_resource(r) { cl = strlen(r->name); if (cl > max_name_width) max_name_width = cl; @@ -565,7 +565,7 @@ static int __init intel_rdt_late_init(void) return ret; } - for_each_capable_rdt_resource(r) + for_each_alloc_capable_rdt_resource(r) pr_info("Intel RDT %s allocation detected\n", r->name); return 0; diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 0e4852d..29630af 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h @@ -135,8 +135,8 @@ struct rdt_membw { /** * struct rdt_resource - attributes of an RDT resource - * @enabled: Is this feature enabled on this machine - * @capable: Is this feature available on this machine + * @alloc_enabled: Is allocation enabled on this machine + * @alloc_capable: Is allocation available on this machine * @name: Name to use in "schemata" file * @num_closid: Number of CLOSIDs available * @cache_level: Which cache level defines scope of this resource @@ -152,8 +152,8 @@ struct rdt_membw { * @parse_ctrlval: Per resource function pointer to parse control values */ struct rdt_resource { - bool enabled; - bool capable; + bool alloc_enabled; + bool alloc_capable; char *name; int num_closid; int cache_level; @@ -181,7 +181,7 @@ struct rdt_resource { extern struct rdt_resource rdt_resources_all[]; extern struct rdtgroup rdtgroup_default; -DECLARE_STATIC_KEY_FALSE(rdt_enable_key); +DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); int __init rdtgroup_init(void); @@ -196,15 +196,15 @@ enum { RDT_NUM_RESOURCES, }; -#define for_each_capable_rdt_resource(r) \ +#define for_each_alloc_capable_rdt_resource(r) \ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ r++) \ - if (r->capable) + if (r->alloc_capable) -#define for_each_enabled_rdt_resource(r) \ +#define for_each_alloc_enabled_rdt_resource(r) \ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ r++) \ - if (r->enabled) + if (r->alloc_enabled) /* CPUID.(EAX=10H, ECX=ResID=1).EAX */ union cpuid_0x10_1_eax { diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index fab8811..8ef9390 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -35,7 +35,7 @@ #include #include "intel_rdt.h" -DEFINE_STATIC_KEY_FALSE(rdt_enable_key); +DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); struct kernfs_root *rdt_root; struct rdtgroup rdtgroup_default; LIST_HEAD(rdt_all_groups); @@ -66,7 +66,7 @@ static void closid_init(void) int rdt_min_closid = 32; /* Compute rdt_min_closid across all resources */ - for_each_enabled_rdt_resource(r) + for_each_alloc_enabled_rdt_resource(r) rdt_min_closid = min(rdt_min_closid, r->num_closid); closid_free_map = BIT_MASK(rdt_min_closid) - 1; @@ -638,7 +638,7 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) return PTR_ERR(kn_info); kernfs_get(kn_info); - for_each_enabled_rdt_resource(r) { + for_each_alloc_enabled_rdt_resource(r) { kn_subdir = kernfs_create_dir(kn_info, r->name, kn_info->mode, r); if (IS_ERR(kn_subdir)) { @@ -718,14 +718,15 @@ static int cdp_enable(void) struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3]; int ret; - if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable) + if (!r_l3->alloc_capable || !r_l3data->alloc_capable || + !r_l3code->alloc_capable) return -EINVAL; ret = set_l3_qos_cfg(r_l3, true); if (!ret) { - r_l3->enabled = false; - r_l3data->enabled = true; - r_l3code->enabled = true; + r_l3->alloc_enabled = false; + r_l3data->alloc_enabled = true; + r_l3code->alloc_enabled = true; } return ret; } @@ -734,11 +735,11 @@ static void cdp_disable(void) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; - r->enabled = r->capable; + r->alloc_enabled = r->alloc_capable; - if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) { - rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false; - rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false; + if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) { + rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false; + rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false; set_l3_qos_cfg(r, false); } } @@ -834,7 +835,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, /* * resctrl file system can only be mounted once. */ - if (static_branch_unlikely(&rdt_enable_key)) { + if (static_branch_unlikely(&rdt_alloc_enable_key)) { dentry = ERR_PTR(-EBUSY); goto out; } @@ -858,7 +859,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, if (IS_ERR(dentry)) goto out_destroy; - static_branch_enable(&rdt_enable_key); + static_branch_enable(&rdt_alloc_enable_key); goto out; out_destroy: @@ -986,11 +987,11 @@ static void rdt_kill_sb(struct super_block *sb) mutex_lock(&rdtgroup_mutex); /*Put everything back to default values. */ - for_each_enabled_rdt_resource(r) + for_each_alloc_enabled_rdt_resource(r) reset_all_ctrls(r); cdp_disable(); rmdir_all_sub(); - static_branch_disable(&rdt_enable_key); + static_branch_disable(&rdt_alloc_enable_key); kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); } @@ -1129,7 +1130,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) { - if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) + if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) seq_puts(seq, ",cdp"); return 0; } diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c index 8cef1c8..952156c 100644 --- a/arch/x86/kernel/cpu/intel_rdt_schemata.c +++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c @@ -192,7 +192,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, int closid) { struct rdt_resource *r; - for_each_enabled_rdt_resource(r) { + for_each_alloc_enabled_rdt_resource(r) { if (!strcmp(resname, r->name) && closid < r->num_closid) return parse_line(tok, r); } @@ -221,7 +221,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, closid = rdtgrp->closid; - for_each_enabled_rdt_resource(r) { + for_each_alloc_enabled_rdt_resource(r) { list_for_each_entry(dom, &r->domains, list) dom->have_new_ctrl = false; } @@ -237,7 +237,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, goto out; } - for_each_enabled_rdt_resource(r) { + for_each_alloc_enabled_rdt_resource(r) { ret = update_domains(r, closid); if (ret) goto out; @@ -274,7 +274,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, rdtgrp = rdtgroup_kn_lock_live(of->kn); if (rdtgrp) { closid = rdtgrp->closid; - for_each_enabled_rdt_resource(r) { + for_each_alloc_enabled_rdt_resource(r) { if (closid < r->num_closid) show_doms(s, r, closid); } -- 1.9.1