[RESEND,3/6] perf/x86/intel/uncore: Extract codes of box ref/unref
diff mbox series

Message ID 1556672028-119221-4-git-send-email-kan.liang@linux.intel.com
State New
Headers show
Series
  • Perf uncore support for Snow Ridge server
Related show

Commit Message

Liang, Kan May 1, 2019, 12:53 a.m. UTC
From: Kan Liang <kan.liang@linux.intel.com>

For uncore box which can only be accessed by MSR, its reference
box->refcnt is updated in CPU hot plug. The uncore boxes needs to be
init/exit accordingly for the first/last CPU of a socket.
Starts from Snow Ridge server, a new type of uncore box is introduced,
which can only be accessed by MMIO. The driver needs to map/unmap
MMIO space for the first/last CPU of a socket.

Extract the codes of box ref/unref and init/exit for reuse later.

There is no functional change.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
 arch/x86/events/intel/uncore.c | 55 +++++++++++++++++++++++++++---------------
 1 file changed, 36 insertions(+), 19 deletions(-)

Comments

Peter Zijlstra May 8, 2019, 11:51 a.m. UTC | #1
On Tue, Apr 30, 2019 at 05:53:45PM -0700, kan.liang@linux.intel.com wrote:
> +static void uncore_box_unref(struct intel_uncore_type **types, int id)
>  {
> +	struct intel_uncore_type *type;
>  	struct intel_uncore_pmu *pmu;
>  	struct intel_uncore_box *box;
> +	int i;
> +
> +	for (; *types; types++) {
> +		type = *types;
> +		pmu = type->pmus;
> +		for (i = 0; i < type->num_boxes; i++, pmu++) {
> +			box = pmu->boxes[id];
> +			if (box && atomic_dec_return(&box->refcnt) == 0)
> +				uncore_box_exit(box);
> +		}
> +	}
> +}

> +static int uncore_box_ref(struct intel_uncore_type **types,
> +			  int id, unsigned int cpu)
>  {
> +	struct intel_uncore_type *type;
>  	struct intel_uncore_pmu *pmu;
>  	struct intel_uncore_box *box;
> +	int i, ret;
>  
> +	ret = allocate_boxes(types, id, cpu);
>  	if (ret)
>  		return ret;
>  
> @@ -1232,11 +1238,22 @@ static int uncore_event_cpu_online(unsigned int cpu)
>  		type = *types;
>  		pmu = type->pmus;
>  		for (i = 0; i < type->num_boxes; i++, pmu++) {
> +			box = pmu->boxes[id];
>  			if (box && atomic_inc_return(&box->refcnt) == 1)
>  				uncore_box_init(box);
>  		}
>  	}
> +	return 0;
> +}

This relies on all online/offline events to be globally serialized, and
they are. But that does make me wonder why we're using atomic_t.

Without the serialization there is an online-online race where the first
online sets the refcount to 1, lets the second online continue without
the first having completed the init().

Anyway, the code isn't wrong, just weird.

Patch
diff mbox series

diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index ee23b50..0b72ca5 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1140,12 +1140,27 @@  static void uncore_change_context(struct intel_uncore_type **uncores,
 		uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
 }
 
-static int uncore_event_cpu_offline(unsigned int cpu)
+static void uncore_box_unref(struct intel_uncore_type **types, int id)
 {
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_type *type;
 	struct intel_uncore_pmu *pmu;
 	struct intel_uncore_box *box;
-	int i, pkg, target;
+	int i;
+
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[id];
+			if (box && atomic_dec_return(&box->refcnt) == 0)
+				uncore_box_exit(box);
+		}
+	}
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+	int pkg, target;
 
 	/* Check if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
@@ -1165,15 +1180,7 @@  static int uncore_event_cpu_offline(unsigned int cpu)
 unref:
 	/* Clear the references */
 	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box && atomic_dec_return(&box->refcnt) == 0)
-				uncore_box_exit(box);
-		}
-	}
+	uncore_box_unref(uncore_msr_uncores, pkg);
 	return 0;
 }
 
@@ -1215,16 +1222,15 @@  static int allocate_boxes(struct intel_uncore_type **types,
 	}
 	return -ENOMEM;
 }
-
-static int uncore_event_cpu_online(unsigned int cpu)
+static int uncore_box_ref(struct intel_uncore_type **types,
+			  int id, unsigned int cpu)
 {
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_type *type;
 	struct intel_uncore_pmu *pmu;
 	struct intel_uncore_box *box;
-	int i, ret, pkg, target;
+	int i, ret;
 
-	pkg = topology_logical_package_id(cpu);
-	ret = allocate_boxes(types, pkg, cpu);
+	ret = allocate_boxes(types, id, cpu);
 	if (ret)
 		return ret;
 
@@ -1232,11 +1238,22 @@  static int uncore_event_cpu_online(unsigned int cpu)
 		type = *types;
 		pmu = type->pmus;
 		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
+			box = pmu->boxes[id];
 			if (box && atomic_inc_return(&box->refcnt) == 1)
 				uncore_box_init(box);
 		}
 	}
+	return 0;
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+	int ret, pkg, target;
+
+	pkg = topology_logical_package_id(cpu);
+	ret = uncore_box_ref(uncore_msr_uncores, pkg, cpu);
+	if (ret)
+		return ret;
 
 	/*
 	 * Check if there is an online cpu in the package