[v3,08/11] arch/x86: Bring few more functions into the resource structure
diff mbox series

Message ID 20181011203223.18157-9-babu.moger@amd.com
State New
Headers show
Series
  • arch/x86: AMD QoS support
Related show

Commit Message

Moger, Babu Oct. 11, 2018, 8:33 p.m. UTC
Bring all resource functions that are different between the vendors
into resource structure and initialize them dynamically.
Add _intel suffix to Intel specific functions.

Implement these functions separately for each vendors.
update_mba_bw : Feedback loop bandwidth update functionality is not
                needed for AMD.
cbm_validate  : Cache bitmask validate function. AMD allows
                non-contiguous masks. So, use separate functions for
                Intel and AMD.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
 arch/x86/kernel/cpu/resctrl.c             | 10 +++++++++-
 arch/x86/kernel/cpu/resctrl.h             | 15 +++++++++++----
 arch/x86/kernel/cpu/resctrl_ctrlmondata.c |  4 ++--
 arch/x86/kernel/cpu/resctrl_monitor.c     | 10 +++++++---
 4 files changed, 29 insertions(+), 10 deletions(-)

Patch
diff mbox series

diff --git a/arch/x86/kernel/cpu/resctrl.c b/arch/x86/kernel/cpu/resctrl.c
index cff5564921c3..9eb5c102c300 100644
--- a/arch/x86/kernel/cpu/resctrl.c
+++ b/arch/x86/kernel/cpu/resctrl.c
@@ -872,10 +872,18 @@  static __init void rdt_init_res_defs_intel(void)
 	struct rdt_resource *r;
 
 	for_each_rdt_resource(r) {
-		if (r->rid == RDT_RESOURCE_MBA) {
+		if ((r->rid == RDT_RESOURCE_L3) ||
+		    (r->rid == RDT_RESOURCE_L3DATA) ||
+		    (r->rid == RDT_RESOURCE_L3CODE) ||
+		    (r->rid == RDT_RESOURCE_L2) ||
+		    (r->rid == RDT_RESOURCE_L2DATA) ||
+		    (r->rid == RDT_RESOURCE_L2CODE))
+			r->cbm_validate = cbm_validate_intel;
+		else if (r->rid == RDT_RESOURCE_MBA) {
 			r->msr_base = IA32_MBA_THRTL_BASE;
 			r->msr_update = mba_wrmsr_intel;
 			r->parse_ctrlval = parse_bw_intel;
+			r->update_mba_bw = update_mba_bw_intel;
 		}
 	}
 }
diff --git a/arch/x86/kernel/cpu/resctrl.h b/arch/x86/kernel/cpu/resctrl.h
index 8731b7c91c28..825d5571539e 100644
--- a/arch/x86/kernel/cpu/resctrl.h
+++ b/arch/x86/kernel/cpu/resctrl.h
@@ -410,10 +410,12 @@  struct rdt_parse_data {
  * @cache:		Cache allocation related data
  * @format_str:		Per resource format string to show domain value
  * @parse_ctrlval:	Per resource function pointer to parse control values
- * @evt_list:			List of monitoring events
- * @num_rmid:			Number of RMIDs available
- * @mon_scale:			cqm counter * mon_scale = occupancy in bytes
- * @fflags:			flags to choose base and info files
+ * @update_mba_bw:	Feedback loop for MBA software controller function
+ * @cbm_validate	Cache bitmask validate function
+ * @evt_list:		List of monitoring events
+ * @num_rmid:		Number of RMIDs available
+ * @mon_scale:		cqm counter * mon_scale = occupancy in bytes
+ * @fflags:		flags to choose base and info files
  */
 struct rdt_resource {
 	int			rid;
@@ -436,6 +438,9 @@  struct rdt_resource {
 	int (*parse_ctrlval)(struct rdt_parse_data *data,
 			     struct rdt_resource *r,
 			     struct rdt_domain *d);
+	void (*update_mba_bw)(struct rdtgroup *rgrp,
+			      struct rdt_domain *dom_mbm);
+	bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r);
 	struct list_head	evt_list;
 	int			num_rmid;
 	unsigned int		mon_scale;
@@ -576,5 +581,7 @@  void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
 void cqm_handle_limbo(struct work_struct *work);
 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
 void __check_limbo(struct rdt_domain *d, bool force_free);
+void update_mba_bw_intel(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm);
+bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
 
 #endif /* _ASM_X86_RESCTRL_H */
diff --git a/arch/x86/kernel/cpu/resctrl_ctrlmondata.c b/arch/x86/kernel/cpu/resctrl_ctrlmondata.c
index 1da343b69f6e..867da06223b5 100644
--- a/arch/x86/kernel/cpu/resctrl_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl_ctrlmondata.c
@@ -88,7 +88,7 @@  int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
  *	are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
  * Additionally Haswell requires at least two bits set.
  */
-static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
+bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
 {
 	unsigned long first_bit, zero_bit, val;
 	unsigned int cbm_len = r->cache.cbm_len;
@@ -148,7 +148,7 @@  int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
 		return -EINVAL;
 	}
 
-	if (!cbm_validate(data->buf, &cbm_val, r))
+	if (r->cbm_validate && !r->cbm_validate(data->buf, &cbm_val, r))
 		return -EINVAL;
 
 	if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
diff --git a/arch/x86/kernel/cpu/resctrl_monitor.c b/arch/x86/kernel/cpu/resctrl_monitor.c
index ad0107bc16a0..3c189e8624b9 100644
--- a/arch/x86/kernel/cpu/resctrl_monitor.c
+++ b/arch/x86/kernel/cpu/resctrl_monitor.c
@@ -358,7 +358,7 @@  void mon_event_count(void *info)
  * throttle MSRs already have low percentage values.  To avoid
  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
  */
-static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+void update_mba_bw_intel(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
 	u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
 	struct mbm_state *pmbm_data, *cmbm_data;
@@ -517,6 +517,7 @@  void mbm_handle_overflow(struct work_struct *work)
 	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
 	struct rdtgroup *prgrp, *crgrp;
 	int cpu = smp_processor_id();
+	struct rdt_resource *r_mba;
 	struct list_head *head;
 	struct rdt_domain *d;
 
@@ -536,8 +537,11 @@  void mbm_handle_overflow(struct work_struct *work)
 		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
 			mbm_update(d, crgrp->mon.rmid);
 
-		if (is_mba_sc(NULL))
-			update_mba_bw(prgrp, d);
+		if (is_mba_sc(NULL)) {
+			r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+			if (r_mba->update_mba_bw)
+				r_mba->update_mba_bw(prgrp, d);
+		}
 	}
 
 	schedule_delayed_work_on(cpu, &d->mbm_over, delay);