All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/6] Intel P states enhancements
@ 2015-09-01 11:53 Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 1/6] cpufreq: intel_p_state: Fix limiting turbo sub states Srinivas Pandruvada
                   ` (5 more replies)
  0 siblings, 6 replies; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:53 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

This series enhances Intel P state drivers with the following features:
- When max_perf_pct is reduced in turbo range, we can change the turbo ratios
when platform allows. This is particularly useful in limiting performance with
HWP where whole range is turbo.
- Use Turbo Activation Ratio, when calculating max non turbo P state. This will
show now correct percentage in turbo range
- To calculate busy percent, the estimate is not correct when the max non turbo
is limited by tar, hence using physical max non turbo as before.
- Use ACPI _PSS and _PPC in intel_ptate driver.
- Avoid calculation for P state control value when cpufreq policy requests
frequency limits when matched in _PSS. Sometime calculations causes reduced
control value in boundary conditions.
Although they are independent patches, sending as series to help applying and
testing.
I appreciate review and testing on multiple platforms.

v2:
- When CONFIG_ACPI not defined, then acpi/processor.h can't be included.
Also some variables will be unused when CONFIG_ACPI is not defined, so there
were warnings. Fixed all these compile issues.

v1:
- Change the command line option to "no_acpi"
- changed kernel parameter documentation file and added "no_acpi" parameter
- pr_debug prefixed with "intel_pstate" as suggested by Doug
- Changed the logic to determine turbo freq in _PSS using odd/even convention,
although this is the way it is defined in _PSS. But atleast two reviewers has
questioned the source of this. This is defined usually in non public documents
like BIOS writer guides. Now using the control field value to determine the
turbo and non turbo max.
- Fix the Kconfig dependency on ACPI for ACPI_PROCESSOR
- multi line comment styles

v0:
Base version

Srinivas Pandruvada (6):
  cpufreq: intel_p_state: Fix limiting turbo sub states
  cpufreq: intel_pstate: get P1 from TAR when available
  cpufreq: intel-pstate: Use separate max pstate for scaling
  cpufreq: intel_pstate: Use ACPI perf configuration
  Documentation: kernel_parameters for Intel P state driver
  cpufreq: intel_pstate: Avoid calculation for max/min

 Documentation/kernel-parameters.txt |   3 +
 arch/x86/include/asm/msr-index.h    |   7 +
 drivers/cpufreq/Kconfig.x86         |   1 +
 drivers/cpufreq/intel_pstate.c      | 341 ++++++++++++++++++++++++++++++++++--
 4 files changed, 341 insertions(+), 11 deletions(-)

-- 
2.4.3


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 1/6] cpufreq: intel_p_state: Fix limiting turbo sub states
  2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
@ 2015-09-01 11:53 ` Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 2/6] cpufreq: intel_pstate: get P1 from TAR when available Srinivas Pandruvada
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:53 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

Although the max_perf_pct reflects sub states in turbo range, we can't
really restrict to those states. This gives wrong impression that the
performance is reduced.
This can be achieved by restricting turbo ratio limits (MSR 0x1AD),
when bit 28 of platform info MSR allows (MSR 0xCE) is 1.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
---
 drivers/cpufreq/intel_pstate.c | 93 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 92 insertions(+), 1 deletion(-)

diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index fcb929e..bf5b9d9 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -80,6 +80,7 @@ struct pstate_data {
 	int	max_pstate;
 	int	scaling;
 	int	turbo_pstate;
+	u64	turbo_ratio_limit;
 };
 
 struct vid_data {
@@ -132,6 +133,8 @@ struct pstate_funcs {
 	int (*get_scaling)(void);
 	void (*set)(struct cpudata*, int pstate);
 	void (*get_vid)(struct cpudata *);
+	u64 (*get_turbo_ratio_limit)(struct cpudata *);
+	int (*set_turbo_ratio_limit)(struct cpudata *, u64, u64);
 };
 
 struct cpu_defaults {
@@ -425,6 +428,23 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 	limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
 	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
+	if (pstate_funcs.set_turbo_ratio_limit) {
+		int max_perf_adj;
+		struct cpudata *cpu = all_cpu_data[0];
+
+		if (limits.max_sysfs_pct == 100)
+			max_perf_adj = cpu->pstate.turbo_ratio_limit;
+		else
+			max_perf_adj = fp_toint(mul_fp(int_tofp(
+					cpu->pstate.turbo_ratio_limit & 0xff),
+					limits.max_perf));
+
+		if (max_perf_adj > cpu->pstate.max_pstate)
+			pstate_funcs.set_turbo_ratio_limit(cpu,
+						cpu->pstate.turbo_ratio_limit,
+						max_perf_adj);
+	}
+
 	if (hwp_active)
 		intel_pstate_hwp_set();
 	return count;
@@ -618,6 +638,55 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
 	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
+static u64 core_get_turbo_ratio_limit(struct cpudata *cpudata)
+{
+	u64 value;
+
+	rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+
+	return value;
+}
+
+static int core_set_turbo_ratio_limit(struct cpudata *cpudata, u64 def_ratio,
+			       u64 new_ratio)
+{
+	u64 value;
+
+	rdmsrl(MSR_PLATFORM_INFO, value);
+	if (value & BIT(28)) {
+		u64 ratio = 0;
+		u64 out_ratio = 0;
+		u8 max_ratio = new_ratio & 0xff;
+		int i;
+		/*
+		 * If caller provided reduced max ratio (one core active)
+		 * then use this for all other ratios, which are more
+		 * than the default ratio for those many cores active
+		 * for example if default ratio is 0x1a1b1c1d and new ratio
+		 * is 0x1b, then resultant ratio will be 0x1a1b1b1b
+		 */
+		for (i = 0; i < sizeof(def_ratio); ++i) {
+			if (def_ratio & 0xff) {
+				if (new_ratio & 0xff)
+					ratio = new_ratio & 0xff;
+				else {
+					if ((def_ratio & 0xff) > max_ratio)
+						ratio = max_ratio;
+					else
+						ratio = def_ratio & 0xff;
+				}
+				out_ratio |= (ratio << (i * 8));
+			}
+			def_ratio >>= 8;
+			new_ratio >>= 8;
+		}
+		wrmsrl(MSR_NHM_TURBO_RATIO_LIMIT, out_ratio);
+		return 0;
+	}
+
+	return -EPERM;
+}
+
 static int knl_get_turbo_pstate(void)
 {
 	u64 value;
@@ -646,6 +715,8 @@ static struct cpu_defaults core_params = {
 		.get_turbo = core_get_turbo_pstate,
 		.get_scaling = core_get_scaling,
 		.set = core_set_pstate,
+		.get_turbo_ratio_limit = core_get_turbo_ratio_limit,
+		.set_turbo_ratio_limit = core_set_turbo_ratio_limit,
 	},
 };
 
@@ -735,7 +806,10 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 	cpu->pstate.max_pstate = pstate_funcs.get_max();
 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
 	cpu->pstate.scaling = pstate_funcs.get_scaling();
-
+	if (pstate_funcs.get_turbo_ratio_limit &&
+	    !cpu->pstate.turbo_ratio_limit)
+		cpu->pstate.turbo_ratio_limit =
+			pstate_funcs.get_turbo_ratio_limit(cpu);
 	if (pstate_funcs.get_vid)
 		pstate_funcs.get_vid(cpu);
 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
@@ -935,6 +1009,21 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 	cpu->cpu = cpunum;
 	intel_pstate_get_cpu_pstates(cpu);
 
+	/* readjust turbo limit ratio after resume or hotplug */
+	if (limits.max_sysfs_pct != 100 &&
+	    pstate_funcs.set_turbo_ratio_limit) {
+		int max_perf_adj;
+
+		max_perf_adj = fp_toint(mul_fp(int_tofp(
+					cpu->pstate.turbo_ratio_limit & 0xff),
+					limits.max_perf));
+
+		if (max_perf_adj > cpu->pstate.max_pstate)
+			pstate_funcs.set_turbo_ratio_limit(cpu,
+						cpu->pstate.turbo_ratio_limit,
+						max_perf_adj);
+	}
+
 	init_timer_deferrable(&cpu->timer);
 	cpu->timer.data = (unsigned long)cpu;
 	cpu->timer.expires = jiffies + HZ/100;
@@ -1096,6 +1185,8 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 	pstate_funcs.get_scaling = funcs->get_scaling;
 	pstate_funcs.set       = funcs->set;
 	pstate_funcs.get_vid   = funcs->get_vid;
+	pstate_funcs.set_turbo_ratio_limit = funcs->set_turbo_ratio_limit;
+	pstate_funcs.get_turbo_ratio_limit = funcs->get_turbo_ratio_limit;
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
-- 
2.4.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/6] cpufreq: intel_pstate: get P1 from TAR when available
  2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 1/6] cpufreq: intel_p_state: Fix limiting turbo sub states Srinivas Pandruvada
@ 2015-09-01 11:53 ` Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 3/6] cpufreq: intel-pstate: Use separate max pstate for scaling Srinivas Pandruvada
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:53 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

After Ivybridge, the max non turbo ratio obtained from platform info msr
is not always guaranteed P1 on client platforms. The max non turbo
activation ratio (TAR), determines the max for the current level of TDP.
The ratio in platform info is physical max. The TAR MSR can be locked,
so updating this value is not possible on all platforms.
This change gets this ratio from MSR TURBO_ACTIVATION_RATIO if
available,
but also do some sanity checking to make sure that this value is
correct.
The sanity check involves reading the TDP ratio for the current tdp
control value when platform has configurable TDP present and matching
TAC
with this.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
---
 arch/x86/include/asm/msr-index.h |  7 +++++++
 drivers/cpufreq/intel_pstate.c   | 39 +++++++++++++++++++++++++++++++++++----
 2 files changed, 42 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9ebc3d0..ae868de 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -184,6 +184,13 @@
 #define MSR_GFX_PERF_LIMIT_REASONS	0x000006B0
 #define MSR_RING_PERF_LIMIT_REASONS	0x000006B1
 
+/* Config TDP MSRs */
+#define MSR_CONFIG_TDP_NOMINAL		0x00000648
+#define MSR_CONFIG_TDP_LEVEL1		0x00000649
+#define MSR_CONFIG_TDP_LEVEL2		0x0000064A
+#define MSR_CONFIG_TDP_CONTROL		0x0000064B
+#define MSR_TURBO_ACTIVATION_RATIO	0x0000064C
+
 /* Hardware P state interface */
 #define MSR_PPERF			0x0000064e
 #define MSR_PERF_LIMIT_REASONS		0x0000064f
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index bf5b9d9..6260cc7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -43,7 +43,6 @@
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
 #define fp_toint(X) ((X) >> FRAC_BITS)
 
-
 static inline int32_t mul_fp(int32_t x, int32_t y)
 {
 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -603,10 +602,42 @@ static int core_get_min_pstate(void)
 
 static int core_get_max_pstate(void)
 {
-	u64 value;
+	u64 tar;
+	u64 plat_info;
+	int max_pstate;
+	int err;
+
+	rdmsrl(MSR_PLATFORM_INFO, plat_info);
+	max_pstate = (plat_info >> 8) & 0xFF;
+
+	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
+	if (!err) {
+		/* Do some sanity checking for safety */
+		if (plat_info & 0x600000000) {
+			u64 tdp_ctrl;
+			u64 tdp_ratio;
+			int tdp_msr;
+
+			err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+			if (err)
+				goto skip_tar;
+
+			tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
+			err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+			if (err)
+				goto skip_tar;
+
+			if (tdp_ratio - 1 == tar) {
+				max_pstate = tar;
+				pr_debug("max_pstate=TAC %x\n", max_pstate);
+			} else {
+				goto skip_tar;
+			}
+		}
+	}
 
-	rdmsrl(MSR_PLATFORM_INFO, value);
-	return (value >> 8) & 0xFF;
+skip_tar:
+	return max_pstate;
 }
 
 static int core_get_turbo_pstate(void)
-- 
2.4.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 3/6] cpufreq: intel-pstate: Use separate max pstate for scaling
  2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 1/6] cpufreq: intel_p_state: Fix limiting turbo sub states Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 2/6] cpufreq: intel_pstate: get P1 from TAR when available Srinivas Pandruvada
@ 2015-09-01 11:53 ` Srinivas Pandruvada
  2015-09-01 11:53 ` [PATCH v2 4/6] cpufreq: intel_pstate: Use ACPI perf configuration Srinivas Pandruvada
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:53 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

Systems with configurable TDP have multiple max non turbo p state. Intel
P state uses max non turbo P state for scaling. But using the real max
non turbo p state causes underestimation of next P state. So using
the physical max non turbo P state as before for scaling.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
---
 drivers/cpufreq/intel_pstate.c | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6260cc7..e92a59f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -77,6 +77,7 @@ struct pstate_data {
 	int	current_pstate;
 	int	min_pstate;
 	int	max_pstate;
+	int	max_pstate_physical;
 	int	scaling;
 	int	turbo_pstate;
 	u64	turbo_ratio_limit;
@@ -127,6 +128,7 @@ struct pstate_adjust_policy {
 
 struct pstate_funcs {
 	int (*get_max)(void);
+	int (*get_max_physical)(void);
 	int (*get_min)(void);
 	int (*get_turbo)(void);
 	int (*get_scaling)(void);
@@ -600,6 +602,14 @@ static int core_get_min_pstate(void)
 	return (value >> 40) & 0xFF;
 }
 
+static int core_get_max_pstate_physical(void)
+{
+	u64 value;
+
+	rdmsrl(MSR_PLATFORM_INFO, value);
+	return (value >> 8) & 0xFF;
+}
+
 static int core_get_max_pstate(void)
 {
 	u64 tar;
@@ -742,6 +752,7 @@ static struct cpu_defaults core_params = {
 	},
 	.funcs = {
 		.get_max = core_get_max_pstate,
+		.get_max_physical = core_get_max_pstate_physical,
 		.get_min = core_get_min_pstate,
 		.get_turbo = core_get_turbo_pstate,
 		.get_scaling = core_get_scaling,
@@ -762,6 +773,7 @@ static struct cpu_defaults byt_params = {
 	},
 	.funcs = {
 		.get_max = byt_get_max_pstate,
+		.get_max_physical = byt_get_max_pstate,
 		.get_min = byt_get_min_pstate,
 		.get_turbo = byt_get_turbo_pstate,
 		.set = byt_set_pstate,
@@ -781,6 +793,7 @@ static struct cpu_defaults knl_params = {
 	},
 	.funcs = {
 		.get_max = core_get_max_pstate,
+		.get_max_physical = core_get_max_pstate_physical,
 		.get_min = core_get_min_pstate,
 		.get_turbo = knl_get_turbo_pstate,
 		.get_scaling = core_get_scaling,
@@ -835,6 +848,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 {
 	cpu->pstate.min_pstate = pstate_funcs.get_min();
 	cpu->pstate.max_pstate = pstate_funcs.get_max();
+	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
 	cpu->pstate.scaling = pstate_funcs.get_scaling();
 	if (pstate_funcs.get_turbo_ratio_limit &&
@@ -856,7 +870,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
 
 	sample->freq = fp_toint(
 		mul_fp(int_tofp(
-			cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
+			cpu->pstate.max_pstate_physical *
+			cpu->pstate.scaling / 100),
 			core_pct));
 
 	sample->core_pct_busy = (int32_t)core_pct;
@@ -924,7 +939,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 	 * specified pstate.
 	 */
 	core_busy = cpu->sample.core_pct_busy;
-	max_pstate = int_tofp(cpu->pstate.max_pstate);
+	max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
 	current_pstate = int_tofp(cpu->pstate.current_pstate);
 	core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 
@@ -1211,6 +1226,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
 static void copy_cpu_funcs(struct pstate_funcs *funcs)
 {
 	pstate_funcs.get_max   = funcs->get_max;
+	pstate_funcs.get_max_physical = funcs->get_max_physical;
 	pstate_funcs.get_min   = funcs->get_min;
 	pstate_funcs.get_turbo = funcs->get_turbo;
 	pstate_funcs.get_scaling = funcs->get_scaling;
-- 
2.4.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 4/6] cpufreq: intel_pstate: Use ACPI perf configuration
  2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
                   ` (2 preceding siblings ...)
  2015-09-01 11:53 ` [PATCH v2 3/6] cpufreq: intel-pstate: Use separate max pstate for scaling Srinivas Pandruvada
@ 2015-09-01 11:53 ` Srinivas Pandruvada
  2015-09-26  0:47   ` Rafael J. Wysocki
  2015-09-01 11:54 ` [PATCH v2 5/6] Documentation: kernel_parameters for Intel P state driver Srinivas Pandruvada
  2015-09-01 11:54 ` [PATCH v2 6/6] cpufreq: intel_pstate: Avoid calculation for max/min Srinivas Pandruvada
  5 siblings, 1 reply; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:53 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

Use ACPI _PSS to limit the Intel P State turbo, max and min ratios.
This driver uses acpi processor perf lib calls to register performance.
The following logic is used to adjust Intel P state driver limits:
- If there is no turbo entry in _PSS, then disable Intel P state turbo
and limit to non turbo max
- If the non turbo max ratio is more than _PSS max non turbo value, then
set the max non turbo ratio to _PSS non turbo max
- If the min ratio is less than _PSS min then change the min ratio
matching _PSS min
- Scale the _PSS turbo frequency to max turbo frequency based on control
value.
This feature can be disabled by using kernel parameters:
intel_pstate=no_acpi

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
---
 drivers/cpufreq/Kconfig.x86    |   1 +
 drivers/cpufreq/intel_pstate.c | 142 ++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 142 insertions(+), 1 deletion(-)

diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index c59bdcb..adbd1de 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,6 +5,7 @@
 config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
+       select ACPI_PROCESSOR if ACPI
        help
           This driver provides a P state for Intel core processors.
 	  The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e92a59f..f5aa1da 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,6 +34,10 @@
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
 
+#if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
+#endif
+
 #define BYT_RATIOS		0x66a
 #define BYT_VIDS		0x66b
 #define BYT_TURBO_RATIOS	0x66c
@@ -114,6 +118,9 @@ struct cpudata {
 	u64	prev_mperf;
 	u64	prev_tsc;
 	struct sample sample;
+#if IS_ENABLED(CONFIG_ACPI)
+	struct acpi_processor_performance acpi_perf_data;
+#endif
 };
 
 static struct cpudata **all_cpu_data;
@@ -146,6 +153,7 @@ struct cpu_defaults {
 static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
+static int no_acpi_perf;
 
 struct perf_limits {
 	int no_turbo;
@@ -173,6 +181,124 @@ static struct perf_limits limits = {
 	.min_sysfs_pct = 0,
 };
 
+#if IS_ENABLED(CONFIG_ACPI)
+static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
+{
+	struct cpudata *cpu;
+	int ret;
+	int min_perf;
+	bool turbo_absent = false;
+	int max_pstate_index;
+	int i;
+
+	cpu = all_cpu_data[policy->cpu];
+
+	if (!cpu->acpi_perf_data.shared_cpu_map &&
+	    zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
+				    GFP_KERNEL, cpu_to_node(policy->cpu))) {
+		return -ENOMEM;
+	}
+
+	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+						  policy->cpu);
+	if (ret)
+		return ret;
+
+	/* Check if the control value in _PSS is for PERF_CTL MSR */
+	if (cpu->acpi_perf_data.control_register.space_id !=
+						ACPI_ADR_SPACE_FIXED_HARDWARE)
+		return -EIO;
+
+	pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
+	for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
+		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
+			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
+			 (u32) cpu->acpi_perf_data.states[i].power,
+			 (u32) cpu->acpi_perf_data.states[i].control);
+
+	/*
+	 * If there is only one entry _PSS, simply ignore _PSS and continue as
+	 * usual without taking _PSS into account
+	 */
+	if (cpu->acpi_perf_data.state_count < 2)
+		return 0;
+
+	/* Check if there is a turbo freq in _PSS */
+	if ((cpu->acpi_perf_data.states[0].control >> 8) <=
+	    cpu->pstate.max_pstate) {
+		pr_debug("intel_pstate: no turbo range exists in _PSS\n");
+		limits.no_turbo = limits.turbo_disabled = 1;
+		cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
+		turbo_absent = true;
+	}
+
+	/* Check if the max non turbo p state < Intel P state max */
+	if (turbo_absent)
+		max_pstate_index = 0;
+	else
+		max_pstate_index = 1;
+	if ((cpu->acpi_perf_data.states[max_pstate_index].control >> 8) <
+	    cpu->pstate.max_pstate)
+		cpu->pstate.max_pstate =
+			cpu->acpi_perf_data.states[max_pstate_index].control;
+
+	/* check If min perf > Intel P State min */
+	min_perf = cpu->acpi_perf_data.states[
+			cpu->acpi_perf_data.state_count - 1].control >> 8;
+	if (min_perf > cpu->pstate.min_pstate &&
+	    min_perf < cpu->pstate.max_pstate) {
+		cpu->pstate.min_pstate = min_perf;
+		policy->cpuinfo.min_freq = cpu->pstate.min_pstate *
+						cpu->pstate.scaling;
+	}
+
+	if (turbo_absent)
+		policy->cpuinfo.max_freq =
+			cpu->pstate.max_pstate * cpu->pstate.scaling;
+	else {
+		policy->cpuinfo.max_freq =
+			cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+		/* scale freq to intel pstate turbo scale */
+		cpu->acpi_perf_data.states[0].core_frequency =
+				cpu->pstate.scaling *
+				(cpu->acpi_perf_data.states[0].control >> 8);
+	}
+
+	pr_debug("intel_pstate: Updated limits 0x%x 0x%x 0x%x\n",
+		 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
+		 cpu->pstate.turbo_pstate);
+	pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
+		 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
+
+	return 0;
+}
+
+static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+	struct cpudata *cpu;
+
+	if (!no_acpi_perf)
+		return 0;
+
+	cpu = all_cpu_data[policy->cpu];
+	acpi_processor_unregister_performance(&cpu->acpi_perf_data,
+					      policy->cpu);
+	return 0;
+}
+
+#else
+static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
+{
+	return 0;
+}
+
+static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+	return 0;
+}
+#endif
+
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
 			     int deadband, int integral) {
 	pid->setpoint = setpoint;
@@ -1182,18 +1308,30 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
 	policy->cpuinfo.max_freq =
 		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+	if (!no_acpi_perf)
+		intel_pstate_init_perf_limits(policy);
+	/*
+	 * If there is no acpi perf data or error, we ignore and use Intel P
+	 * state calculated limits, So this is not fatal error.
+	 */
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	cpumask_set_cpu(policy->cpu, policy->cpus);
 
 	return 0;
 }
 
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+	return intel_pstate_exit_perf_limits(policy);
+}
+
 static struct cpufreq_driver intel_pstate_driver = {
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.verify		= intel_pstate_verify_policy,
 	.setpolicy	= intel_pstate_set_policy,
 	.get		= intel_pstate_get,
 	.init		= intel_pstate_cpu_init,
+	.exit		= intel_pstate_cpu_exit,
 	.stop_cpu	= intel_pstate_stop_cpu,
 	.name		= "intel_pstate",
 };
@@ -1237,7 +1375,6 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1425,6 +1562,9 @@ static int __init intel_pstate_setup(char *str)
 		force_load = 1;
 	if (!strcmp(str, "hwp_only"))
 		hwp_only = 1;
+	if (!strcmp(str, "no_acpi"))
+		no_acpi_perf = 1;
+
 	return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
-- 
2.4.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 5/6] Documentation: kernel_parameters for Intel P state driver
  2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
                   ` (3 preceding siblings ...)
  2015-09-01 11:53 ` [PATCH v2 4/6] cpufreq: intel_pstate: Use ACPI perf configuration Srinivas Pandruvada
@ 2015-09-01 11:54 ` Srinivas Pandruvada
  2015-09-01 11:54 ` [PATCH v2 6/6] cpufreq: intel_pstate: Avoid calculation for max/min Srinivas Pandruvada
  5 siblings, 0 replies; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:54 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

Added new option "no_acpi" for not using ACPI processor performance
control objects in Intel P state driver.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
---
 Documentation/kernel-parameters.txt | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1d6f045..1c923c1 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1540,6 +1540,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 		hwp_only
 			Only load intel_pstate on systems which support
 			hardware P state control (HWP) if available.
+		no_acpi
+			Don't use ACPI processor performance control objects
+			_PSS and _PPC specified limits.
 
 	intremap=	[X86-64, Intel-IOMMU]
 			on	enable Interrupt Remapping (default)
-- 
2.4.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 6/6] cpufreq: intel_pstate: Avoid calculation for max/min
  2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
                   ` (4 preceding siblings ...)
  2015-09-01 11:54 ` [PATCH v2 5/6] Documentation: kernel_parameters for Intel P state driver Srinivas Pandruvada
@ 2015-09-01 11:54 ` Srinivas Pandruvada
  5 siblings, 0 replies; 8+ messages in thread
From: Srinivas Pandruvada @ 2015-09-01 11:54 UTC (permalink / raw)
  To: kristen.c.accardi, rafael.j.wysocki; +Cc: linux-pm, Srinivas Pandruvada

When requested from cpufreq to set policy, look into _pss and get
control values, instead of using max/min perf calculations. These
calculation misses next control state in boundary conditions.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
---
 drivers/cpufreq/intel_pstate.c | 51 +++++++++++++++++++++++++++++++++++++-----
 1 file changed, 46 insertions(+), 5 deletions(-)

diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f5aa1da..e2acd69 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -166,6 +166,8 @@ struct perf_limits {
 	int max_sysfs_pct;
 	int min_policy_pct;
 	int min_sysfs_pct;
+	int max_perf_ctl;
+	int min_perf_ctl;
 };
 
 static struct perf_limits limits = {
@@ -179,6 +181,8 @@ static struct perf_limits limits = {
 	.max_sysfs_pct = 100,
 	.min_policy_pct = 0,
 	.min_sysfs_pct = 0,
+	.max_perf_ctl = 0,
+	.min_perf_ctl = 0,
 };
 
 #if IS_ENABLED(CONFIG_ACPI)
@@ -941,12 +945,23 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 	 * policy, or by cpu specific default values determined through
 	 * experimentation.
 	 */
-	max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
-	*max = clamp_t(int, max_perf_adj,
-			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
+	if (limits.max_perf_ctl && limits.max_sysfs_pct >=
+						limits.max_policy_pct) {
+		*max = limits.max_perf_ctl;
+	} else {
+		max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
+					limits.max_perf));
+		*max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
+			       cpu->pstate.turbo_pstate);
+	}
 
-	min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
-	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
+	if (limits.min_perf_ctl) {
+		*min = limits.min_perf_ctl;
+	} else {
+		min_perf = fp_toint(mul_fp(int_tofp(max_perf),
+				    limits.min_perf));
+		*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
+	}
 }
 
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1229,6 +1244,12 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
+#if IS_ENABLED(CONFIG_ACPI)
+	struct cpudata *cpu;
+	int i;
+#endif
+	pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
+		 policy->cpuinfo.max_freq, policy->max);
 	if (!policy->cpuinfo.max_freq)
 		return -ENODEV;
 
@@ -1241,6 +1262,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 		limits.max_perf_pct = 100;
 		limits.max_perf = int_tofp(1);
 		limits.no_turbo = 0;
+		limits.max_perf_ctl = 0;
+		limits.min_perf_ctl = 0;
 		return 0;
 	}
 
@@ -1254,6 +1277,24 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 	limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
 	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
+#if IS_ENABLED(CONFIG_ACPI)
+	cpu = all_cpu_data[policy->cpu];
+	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+		if ((cpu->acpi_perf_data.states[i].control >> 8) *
+		    cpu->pstate.scaling == policy->max)
+			limits.max_perf_ctl =
+				cpu->acpi_perf_data.states[i].control >> 8;
+		if ((cpu->acpi_perf_data.states[i].control >> 8) *
+		    cpu->pstate.scaling == policy->min)
+			limits.min_perf_ctl =
+				cpu->acpi_perf_data.states[i].control >> 8;
+	}
+
+	pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
+		 policy->cpuinfo.max_freq, policy->max, limits.min_perf_ctl,
+		 limits.max_perf_ctl);
+#endif
+
 	if (hwp_active)
 		intel_pstate_hwp_set();
 
-- 
2.4.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 4/6] cpufreq: intel_pstate: Use ACPI perf configuration
  2015-09-01 11:53 ` [PATCH v2 4/6] cpufreq: intel_pstate: Use ACPI perf configuration Srinivas Pandruvada
@ 2015-09-26  0:47   ` Rafael J. Wysocki
  0 siblings, 0 replies; 8+ messages in thread
From: Rafael J. Wysocki @ 2015-09-26  0:47 UTC (permalink / raw)
  To: Srinivas Pandruvada; +Cc: kristen.c.accardi, rafael.j.wysocki, linux-pm

On Tuesday, September 01, 2015 04:53:59 AM Srinivas Pandruvada wrote:
> Use ACPI _PSS to limit the Intel P State turbo, max and min ratios.
> This driver uses acpi processor perf lib calls to register performance.
> The following logic is used to adjust Intel P state driver limits:
> - If there is no turbo entry in _PSS, then disable Intel P state turbo
> and limit to non turbo max
> - If the non turbo max ratio is more than _PSS max non turbo value, then
> set the max non turbo ratio to _PSS non turbo max
> - If the min ratio is less than _PSS min then change the min ratio
> matching _PSS min
> - Scale the _PSS turbo frequency to max turbo frequency based on control
> value.
> This feature can be disabled by using kernel parameters:
> intel_pstate=no_acpi
> 
> Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
> ---
>  drivers/cpufreq/Kconfig.x86    |   1 +
>  drivers/cpufreq/intel_pstate.c | 142 ++++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 142 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
> index c59bdcb..adbd1de 100644
> --- a/drivers/cpufreq/Kconfig.x86
> +++ b/drivers/cpufreq/Kconfig.x86
> @@ -5,6 +5,7 @@
>  config X86_INTEL_PSTATE
>         bool "Intel P state control"
>         depends on X86
> +       select ACPI_PROCESSOR if ACPI
>         help
>            This driver provides a P state for Intel core processors.
>  	  The driver implements an internal governor and will become
> diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
> index e92a59f..f5aa1da 100644
> --- a/drivers/cpufreq/intel_pstate.c
> +++ b/drivers/cpufreq/intel_pstate.c
> @@ -34,6 +34,10 @@
>  #include <asm/cpu_device_id.h>
>  #include <asm/cpufeature.h>
>  
> +#if IS_ENABLED(CONFIG_ACPI)
> +#include <acpi/processor.h>
> +#endif
> +
>  #define BYT_RATIOS		0x66a
>  #define BYT_VIDS		0x66b
>  #define BYT_TURBO_RATIOS	0x66c
> @@ -114,6 +118,9 @@ struct cpudata {
>  	u64	prev_mperf;
>  	u64	prev_tsc;
>  	struct sample sample;
> +#if IS_ENABLED(CONFIG_ACPI)
> +	struct acpi_processor_performance acpi_perf_data;
> +#endif
>  };
>  
>  static struct cpudata **all_cpu_data;
> @@ -146,6 +153,7 @@ struct cpu_defaults {
>  static struct pstate_adjust_policy pid_params;
>  static struct pstate_funcs pstate_funcs;
>  static int hwp_active;
> +static int no_acpi_perf;
>  
>  struct perf_limits {
>  	int no_turbo;
> @@ -173,6 +181,124 @@ static struct perf_limits limits = {
>  	.min_sysfs_pct = 0,
>  };
>  
> +#if IS_ENABLED(CONFIG_ACPI)
> +static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
> +{
> +	struct cpudata *cpu;
> +	int ret;
> +	int min_perf;
> +	bool turbo_absent = false;
> +	int max_pstate_index;
> +	int i;
> +
> +	cpu = all_cpu_data[policy->cpu];
> +
> +	if (!cpu->acpi_perf_data.shared_cpu_map &&
> +	    zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
> +				    GFP_KERNEL, cpu_to_node(policy->cpu))) {
> +		return -ENOMEM;
> +	}
> +
> +	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
> +						  policy->cpu);
> +	if (ret)
> +		return ret;
> +
> +	/* Check if the control value in _PSS is for PERF_CTL MSR */

I'd add

	"which should guarantee that the states returned by it map to the states
	 in our list directly."

> +	if (cpu->acpi_perf_data.control_register.space_id !=
> +						ACPI_ADR_SPACE_FIXED_HARDWARE)
> +		return -EIO;
> +
> +	pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
> +	for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
> +		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
> +			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
> +			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
> +			 (u32) cpu->acpi_perf_data.states[i].power,
> +			 (u32) cpu->acpi_perf_data.states[i].control);
> +
> +	/*
> +	 * If there is only one entry _PSS, simply ignore _PSS and continue as
> +	 * usual without taking _PSS into account
> +	 */
> +	if (cpu->acpi_perf_data.state_count < 2)
> +		return 0;
> +
> +	/* Check if there is a turbo freq in _PSS */
> +	if ((cpu->acpi_perf_data.states[0].control >> 8) <=

I'd like the formula

	cpu->acpi_perf_data.states[i].control >> 8

to go into a separate function taking cpu and i as args and having a comment
explaining where this formula comes from next to it. 

> +	    cpu->pstate.max_pstate) {
> +		pr_debug("intel_pstate: no turbo range exists in _PSS\n");
> +		limits.no_turbo = limits.turbo_disabled = 1;
> +		cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
> +		turbo_absent = true;
> +	}
> +
> +	/* Check if the max non turbo p state < Intel P state max */
> +	if (turbo_absent)
> +		max_pstate_index = 0;
> +	else
> +		max_pstate_index = 1;

What about

	max_pstate_index = turbo_absent ? 0 : 1;

> +	if ((cpu->acpi_perf_data.states[max_pstate_index].control >> 8) <
> +	    cpu->pstate.max_pstate)
> +		cpu->pstate.max_pstate =
> +			cpu->acpi_perf_data.states[max_pstate_index].control;

Something is suspicious here.  Did you forget about the >> 8?

If so, I'd probably use an auxiliary variable for that, ie

	acpi_perf_max_pstate = cpu->acpi_perf_data.states[max_pstate_index].control >> 8;
	if (acpi_perf_max_pstate < cpu->pstate.max_pstate)
		cpu->pstate.max_pstate = acpi_perf_max_pstate;

> +
> +	/* check If min perf > Intel P State min */
> +	min_perf = cpu->acpi_perf_data.states[
> +			cpu->acpi_perf_data.state_count - 1].control >> 8;
> +	if (min_perf > cpu->pstate.min_pstate &&
> +	    min_perf < cpu->pstate.max_pstate) {
> +		cpu->pstate.min_pstate = min_perf;
> +		policy->cpuinfo.min_freq = cpu->pstate.min_pstate *
> +						cpu->pstate.scaling;
> +	}
> +
> +	if (turbo_absent)
> +		policy->cpuinfo.max_freq =
> +			cpu->pstate.max_pstate * cpu->pstate.scaling;
> +	else {
> +		policy->cpuinfo.max_freq =
> +			cpu->pstate.turbo_pstate * cpu->pstate.scaling;
> +		/* scale freq to intel pstate turbo scale */
> +		cpu->acpi_perf_data.states[0].core_frequency =
> +				cpu->pstate.scaling *
> +				(cpu->acpi_perf_data.states[0].control >> 8);
> +	}
> +
> +	pr_debug("intel_pstate: Updated limits 0x%x 0x%x 0x%x\n",
> +		 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
> +		 cpu->pstate.turbo_pstate);
> +	pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
> +		 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
> +
> +	return 0;
> +}
> +
> +static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
> +{
> +	struct cpudata *cpu;
> +
> +	if (!no_acpi_perf)
> +		return 0;
> +
> +	cpu = all_cpu_data[policy->cpu];
> +	acpi_processor_unregister_performance(&cpu->acpi_perf_data,
> +					      policy->cpu);
> +	return 0;
> +}
> +
> +#else
> +static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
> +{
> +	return 0;
> +}
> +
> +static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
> +{
> +	return 0;
> +}
> +#endif
> +
>  static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
>  			     int deadband, int integral) {
>  	pid->setpoint = setpoint;
> @@ -1182,18 +1308,30 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
>  	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
>  	policy->cpuinfo.max_freq =
>  		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
> +	if (!no_acpi_perf)
> +		intel_pstate_init_perf_limits(policy);
> +	/*
> +	 * If there is no acpi perf data or error, we ignore and use Intel P
> +	 * state calculated limits, So this is not fatal error.
> +	 */
>  	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
>  	cpumask_set_cpu(policy->cpu, policy->cpus);
>  
>  	return 0;
>  }
>  
> +static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
> +{
> +	return intel_pstate_exit_perf_limits(policy);
> +}
> +
>  static struct cpufreq_driver intel_pstate_driver = {
>  	.flags		= CPUFREQ_CONST_LOOPS,
>  	.verify		= intel_pstate_verify_policy,
>  	.setpolicy	= intel_pstate_set_policy,
>  	.get		= intel_pstate_get,
>  	.init		= intel_pstate_cpu_init,
> +	.exit		= intel_pstate_cpu_exit,
>  	.stop_cpu	= intel_pstate_stop_cpu,
>  	.name		= "intel_pstate",
>  };
> @@ -1237,7 +1375,6 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
>  }
>  
>  #if IS_ENABLED(CONFIG_ACPI)
> -#include <acpi/processor.h>
>  
>  static bool intel_pstate_no_acpi_pss(void)
>  {
> @@ -1425,6 +1562,9 @@ static int __init intel_pstate_setup(char *str)
>  		force_load = 1;
>  	if (!strcmp(str, "hwp_only"))
>  		hwp_only = 1;
> +	if (!strcmp(str, "no_acpi"))
> +		no_acpi_perf = 1;
> +
>  	return 0;
>  }
>  early_param("intel_pstate", intel_pstate_setup);

The other patches in this series need ACKs from Kristen.

Thanks,
Rafael


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-09-26  0:19 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-01 11:53 [PATCH v2 0/6] Intel P states enhancements Srinivas Pandruvada
2015-09-01 11:53 ` [PATCH v2 1/6] cpufreq: intel_p_state: Fix limiting turbo sub states Srinivas Pandruvada
2015-09-01 11:53 ` [PATCH v2 2/6] cpufreq: intel_pstate: get P1 from TAR when available Srinivas Pandruvada
2015-09-01 11:53 ` [PATCH v2 3/6] cpufreq: intel-pstate: Use separate max pstate for scaling Srinivas Pandruvada
2015-09-01 11:53 ` [PATCH v2 4/6] cpufreq: intel_pstate: Use ACPI perf configuration Srinivas Pandruvada
2015-09-26  0:47   ` Rafael J. Wysocki
2015-09-01 11:54 ` [PATCH v2 5/6] Documentation: kernel_parameters for Intel P state driver Srinivas Pandruvada
2015-09-01 11:54 ` [PATCH v2 6/6] cpufreq: intel_pstate: Avoid calculation for max/min Srinivas Pandruvada

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.