All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-12 19:00 ` Russell King
  0 siblings, 0 replies; 16+ messages in thread
From: Russell King @ 2017-03-12 19:00 UTC (permalink / raw)
  To: linux-arm-kernel

Each Vivante GPU contains a clock divider which can divide the GPU clock
by 2^n, which can lower the power dissipation from the GPU.  It has been
suggested that the GC600 on Dove is responsible for 20-30% of the power
dissipation from the SoC, so lowering the GPU clock rate provides a way
to throttle the power dissiptation, and reduce the temperature when the
SoC gets hot.

This patch hooks the Etnaviv driver into the kernel's thermal management
to allow the GPUs to be throttled when necessary, allowing a reduction in
GPU clock rate from /1 to /64 in power of 2 steps.

Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 84 ++++++++++++++++++++++++++++-------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h |  2 +
 2 files changed, 71 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 130d7d517a19..bd95182d0852 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,7 @@
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
+#include <linux/thermal.h>
 
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 }
 
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+	unsigned int fscale = 1 << (6 - gpu->freq_scale);
+	u32 clock;
+
+	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+	etnaviv_gpu_load_clock(gpu, clock);
+}
+
 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 {
 	u32 control, idle;
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 	timeout = jiffies + msecs_to_jiffies(1000);
 
 	while (time_is_after_jiffies(timeout)) {
-		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
 		/* enable clock */
-		etnaviv_gpu_load_clock(gpu, control);
+		etnaviv_gpu_update_clock(gpu);
+
+		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 
 		/* Wait for stable clock.  Vivante's code waited for 1ms */
 		usleep_range(1000, 10000);
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 	}
 
 	/* We rely on the GPU running, so program the clock */
-	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-	/* enable clock */
-	etnaviv_gpu_load_clock(gpu, control);
+	etnaviv_gpu_update_clock(gpu);
 
 	return 0;
 }
@@ -1526,17 +1533,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 #ifdef CONFIG_PM
 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 {
-	u32 clock;
 	int ret;
 
 	ret = mutex_lock_killable(&gpu->lock);
 	if (ret)
 		return ret;
 
-	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-	etnaviv_gpu_load_clock(gpu, clock);
+	etnaviv_gpu_update_clock(gpu);
 	etnaviv_gpu_hw_init(gpu);
 
 	gpu->switch_context = true;
@@ -1548,6 +1551,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 }
 #endif
 
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+				  unsigned long *state)
+{
+	*state = 6;
+
+	return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+				  unsigned long *state)
+{
+	struct etnaviv_gpu *gpu = cdev->devdata;
+
+	*state = gpu->freq_scale;
+
+	return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+				  unsigned long state)
+{
+	struct etnaviv_gpu *gpu = cdev->devdata;
+
+	mutex_lock(&gpu->lock);
+	gpu->freq_scale = state;
+	if (!pm_runtime_suspended(gpu->dev))
+		etnaviv_gpu_update_clock(gpu);
+	mutex_unlock(&gpu->lock);
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+	.get_max_state = etnaviv_gpu_cooling_get_max_state,
+	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	void *data)
 {
@@ -1556,13 +1600,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
 	int ret;
 
+	gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+				(char *)dev_name(dev), gpu, &cooling_ops);
+	if (IS_ERR(gpu->cooling))
+		return PTR_ERR(gpu->cooling);
+
 #ifdef CONFIG_PM
 	ret = pm_runtime_get_sync(gpu->dev);
 #else
 	ret = etnaviv_gpu_clk_enable(gpu);
 #endif
-	if (ret < 0)
+	if (ret < 0) {
+		thermal_cooling_device_unregister(gpu->cooling);
 		return ret;
+	}
 
 	gpu->drm = drm;
 	gpu->fence_context = dma_fence_context_alloc(1);
@@ -1616,6 +1667,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 	}
 
 	gpu->drm = NULL;
+
+	thermal_cooling_device_unregister(gpu->cooling);
+	gpu->cooling = NULL;
 }
 
 static const struct component_ops gpu_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c0606ea7d5e..6a1e68eec24c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
 
 struct etnaviv_gpu {
 	struct drm_device *drm;
+	struct thermal_cooling_device *cooling;
 	struct device *dev;
 	struct mutex lock;
 	struct etnaviv_chip_identity identity;
@@ -150,6 +151,7 @@ struct etnaviv_gpu {
 	u32 hangcheck_fence;
 	u32 hangcheck_dma_addr;
 	struct work_struct recover_work;
+	unsigned int freq_scale;
 };
 
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-12 19:00 ` Russell King
  0 siblings, 0 replies; 16+ messages in thread
From: Russell King @ 2017-03-12 19:00 UTC (permalink / raw)
  To: Lucas Stach, Christian Gmeiner
  Cc: David Airlie, etnaviv, dri-devel, linux-arm-kernel

Each Vivante GPU contains a clock divider which can divide the GPU clock
by 2^n, which can lower the power dissipation from the GPU.  It has been
suggested that the GC600 on Dove is responsible for 20-30% of the power
dissipation from the SoC, so lowering the GPU clock rate provides a way
to throttle the power dissiptation, and reduce the temperature when the
SoC gets hot.

This patch hooks the Etnaviv driver into the kernel's thermal management
to allow the GPUs to be throttled when necessary, allowing a reduction in
GPU clock rate from /1 to /64 in power of 2 steps.

Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 84 ++++++++++++++++++++++++++++-------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h |  2 +
 2 files changed, 71 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 130d7d517a19..bd95182d0852 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,7 @@
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
+#include <linux/thermal.h>
 
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 }
 
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+	unsigned int fscale = 1 << (6 - gpu->freq_scale);
+	u32 clock;
+
+	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+	etnaviv_gpu_load_clock(gpu, clock);
+}
+
 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 {
 	u32 control, idle;
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 	timeout = jiffies + msecs_to_jiffies(1000);
 
 	while (time_is_after_jiffies(timeout)) {
-		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
 		/* enable clock */
-		etnaviv_gpu_load_clock(gpu, control);
+		etnaviv_gpu_update_clock(gpu);
+
+		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 
 		/* Wait for stable clock.  Vivante's code waited for 1ms */
 		usleep_range(1000, 10000);
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 	}
 
 	/* We rely on the GPU running, so program the clock */
-	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-	/* enable clock */
-	etnaviv_gpu_load_clock(gpu, control);
+	etnaviv_gpu_update_clock(gpu);
 
 	return 0;
 }
@@ -1526,17 +1533,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 #ifdef CONFIG_PM
 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 {
-	u32 clock;
 	int ret;
 
 	ret = mutex_lock_killable(&gpu->lock);
 	if (ret)
 		return ret;
 
-	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-	etnaviv_gpu_load_clock(gpu, clock);
+	etnaviv_gpu_update_clock(gpu);
 	etnaviv_gpu_hw_init(gpu);
 
 	gpu->switch_context = true;
@@ -1548,6 +1551,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 }
 #endif
 
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+				  unsigned long *state)
+{
+	*state = 6;
+
+	return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+				  unsigned long *state)
+{
+	struct etnaviv_gpu *gpu = cdev->devdata;
+
+	*state = gpu->freq_scale;
+
+	return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+				  unsigned long state)
+{
+	struct etnaviv_gpu *gpu = cdev->devdata;
+
+	mutex_lock(&gpu->lock);
+	gpu->freq_scale = state;
+	if (!pm_runtime_suspended(gpu->dev))
+		etnaviv_gpu_update_clock(gpu);
+	mutex_unlock(&gpu->lock);
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+	.get_max_state = etnaviv_gpu_cooling_get_max_state,
+	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	void *data)
 {
@@ -1556,13 +1600,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
 	int ret;
 
+	gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+				(char *)dev_name(dev), gpu, &cooling_ops);
+	if (IS_ERR(gpu->cooling))
+		return PTR_ERR(gpu->cooling);
+
 #ifdef CONFIG_PM
 	ret = pm_runtime_get_sync(gpu->dev);
 #else
 	ret = etnaviv_gpu_clk_enable(gpu);
 #endif
-	if (ret < 0)
+	if (ret < 0) {
+		thermal_cooling_device_unregister(gpu->cooling);
 		return ret;
+	}
 
 	gpu->drm = drm;
 	gpu->fence_context = dma_fence_context_alloc(1);
@@ -1616,6 +1667,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 	}
 
 	gpu->drm = NULL;
+
+	thermal_cooling_device_unregister(gpu->cooling);
+	gpu->cooling = NULL;
 }
 
 static const struct component_ops gpu_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c0606ea7d5e..6a1e68eec24c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
 
 struct etnaviv_gpu {
 	struct drm_device *drm;
+	struct thermal_cooling_device *cooling;
 	struct device *dev;
 	struct mutex lock;
 	struct etnaviv_chip_identity identity;
@@ -150,6 +151,7 @@ struct etnaviv_gpu {
 	u32 hangcheck_fence;
 	u32 hangcheck_dma_addr;
 	struct work_struct recover_work;
+	unsigned int freq_scale;
 };
 
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-12 19:00 ` Russell King
@ 2017-03-15 13:03   ` Lucas Stach
  -1 siblings, 0 replies; 16+ messages in thread
From: Lucas Stach @ 2017-03-15 13:03 UTC (permalink / raw)
  To: linux-arm-kernel

Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
> Each Vivante GPU contains a clock divider which can divide the GPU clock
> by 2^n, which can lower the power dissipation from the GPU.  It has been
> suggested that the GC600 on Dove is responsible for 20-30% of the power
> dissipation from the SoC, so lowering the GPU clock rate provides a way
> to throttle the power dissiptation, and reduce the temperature when the
> SoC gets hot.
> 
> This patch hooks the Etnaviv driver into the kernel's thermal management
> to allow the GPUs to be throttled when necessary, allowing a reduction in
> GPU clock rate from /1 to /64 in power of 2 steps.

Are those power of 2 steps a hardware limitation, or is it something you
implemented this way to get a smaller number of steps, with a more
meaningful difference in clock speed?
My understanding was that the FSCALE value is just a regular divider
with all steps values in the range of 1-64 being usable.

Regards,
Lucas
> 
> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 84 ++++++++++++++++++++++++++++-------
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.h |  2 +
>  2 files changed, 71 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> index 130d7d517a19..bd95182d0852 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> @@ -18,6 +18,7 @@
>  #include <linux/dma-fence.h>
>  #include <linux/moduleparam.h>
>  #include <linux/of_device.h>
> +#include <linux/thermal.h>
>  
>  #include "etnaviv_cmdbuf.h"
>  #include "etnaviv_dump.h"
> @@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
>  	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
>  }
>  
> +static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
> +{
> +	unsigned int fscale = 1 << (6 - gpu->freq_scale);
> +	u32 clock;
> +
> +	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> +		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
> +
> +	etnaviv_gpu_load_clock(gpu, clock);
> +}
> +
>  static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
>  {
>  	u32 control, idle;
> @@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
>  	timeout = jiffies + msecs_to_jiffies(1000);
>  
>  	while (time_is_after_jiffies(timeout)) {
> -		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> -			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> -
>  		/* enable clock */
> -		etnaviv_gpu_load_clock(gpu, control);
> +		etnaviv_gpu_update_clock(gpu);
> +
> +		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
>  
>  		/* Wait for stable clock.  Vivante's code waited for 1ms */
>  		usleep_range(1000, 10000);
> @@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
>  	}
>  
>  	/* We rely on the GPU running, so program the clock */
> -	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> -		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> -
> -	/* enable clock */
> -	etnaviv_gpu_load_clock(gpu, control);
> +	etnaviv_gpu_update_clock(gpu);
>  
>  	return 0;
>  }
> @@ -1526,17 +1533,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
>  #ifdef CONFIG_PM
>  static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
>  {
> -	u32 clock;
>  	int ret;
>  
>  	ret = mutex_lock_killable(&gpu->lock);
>  	if (ret)
>  		return ret;
>  
> -	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> -		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> -
> -	etnaviv_gpu_load_clock(gpu, clock);
> +	etnaviv_gpu_update_clock(gpu);
>  	etnaviv_gpu_hw_init(gpu);
>  
>  	gpu->switch_context = true;
> @@ -1548,6 +1551,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
>  }
>  #endif
>  
> +static int
> +etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
> +				  unsigned long *state)
> +{
> +	*state = 6;
> +
> +	return 0;
> +}
> +
> +static int
> +etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
> +				  unsigned long *state)
> +{
> +	struct etnaviv_gpu *gpu = cdev->devdata;
> +
> +	*state = gpu->freq_scale;
> +
> +	return 0;
> +}
> +
> +static int
> +etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
> +				  unsigned long state)
> +{
> +	struct etnaviv_gpu *gpu = cdev->devdata;
> +
> +	mutex_lock(&gpu->lock);
> +	gpu->freq_scale = state;
> +	if (!pm_runtime_suspended(gpu->dev))
> +		etnaviv_gpu_update_clock(gpu);
> +	mutex_unlock(&gpu->lock);
> +
> +	return 0;
> +}
> +
> +static struct thermal_cooling_device_ops cooling_ops = {
> +	.get_max_state = etnaviv_gpu_cooling_get_max_state,
> +	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
> +	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
> +};
> +
>  static int etnaviv_gpu_bind(struct device *dev, struct device *master,
>  	void *data)
>  {
> @@ -1556,13 +1600,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
>  	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
>  	int ret;
>  
> +	gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
> +				(char *)dev_name(dev), gpu, &cooling_ops);
> +	if (IS_ERR(gpu->cooling))
> +		return PTR_ERR(gpu->cooling);
> +
>  #ifdef CONFIG_PM
>  	ret = pm_runtime_get_sync(gpu->dev);
>  #else
>  	ret = etnaviv_gpu_clk_enable(gpu);
>  #endif
> -	if (ret < 0)
> +	if (ret < 0) {
> +		thermal_cooling_device_unregister(gpu->cooling);
>  		return ret;
> +	}
>  
>  	gpu->drm = drm;
>  	gpu->fence_context = dma_fence_context_alloc(1);
> @@ -1616,6 +1667,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
>  	}
>  
>  	gpu->drm = NULL;
> +
> +	thermal_cooling_device_unregister(gpu->cooling);
> +	gpu->cooling = NULL;
>  }
>  
>  static const struct component_ops gpu_ops = {
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> index 1c0606ea7d5e..6a1e68eec24c 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> @@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
>  
>  struct etnaviv_gpu {
>  	struct drm_device *drm;
> +	struct thermal_cooling_device *cooling;
>  	struct device *dev;
>  	struct mutex lock;
>  	struct etnaviv_chip_identity identity;
> @@ -150,6 +151,7 @@ struct etnaviv_gpu {
>  	u32 hangcheck_fence;
>  	u32 hangcheck_dma_addr;
>  	struct work_struct recover_work;
> +	unsigned int freq_scale;
>  };
>  
>  static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-15 13:03   ` Lucas Stach
  0 siblings, 0 replies; 16+ messages in thread
From: Lucas Stach @ 2017-03-15 13:03 UTC (permalink / raw)
  To: Russell King; +Cc: etnaviv, dri-devel, linux-arm-kernel

Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
> Each Vivante GPU contains a clock divider which can divide the GPU clock
> by 2^n, which can lower the power dissipation from the GPU.  It has been
> suggested that the GC600 on Dove is responsible for 20-30% of the power
> dissipation from the SoC, so lowering the GPU clock rate provides a way
> to throttle the power dissiptation, and reduce the temperature when the
> SoC gets hot.
> 
> This patch hooks the Etnaviv driver into the kernel's thermal management
> to allow the GPUs to be throttled when necessary, allowing a reduction in
> GPU clock rate from /1 to /64 in power of 2 steps.

Are those power of 2 steps a hardware limitation, or is it something you
implemented this way to get a smaller number of steps, with a more
meaningful difference in clock speed?
My understanding was that the FSCALE value is just a regular divider
with all steps values in the range of 1-64 being usable.

Regards,
Lucas
> 
> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 84 ++++++++++++++++++++++++++++-------
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.h |  2 +
>  2 files changed, 71 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> index 130d7d517a19..bd95182d0852 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> @@ -18,6 +18,7 @@
>  #include <linux/dma-fence.h>
>  #include <linux/moduleparam.h>
>  #include <linux/of_device.h>
> +#include <linux/thermal.h>
>  
>  #include "etnaviv_cmdbuf.h"
>  #include "etnaviv_dump.h"
> @@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
>  	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
>  }
>  
> +static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
> +{
> +	unsigned int fscale = 1 << (6 - gpu->freq_scale);
> +	u32 clock;
> +
> +	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> +		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
> +
> +	etnaviv_gpu_load_clock(gpu, clock);
> +}
> +
>  static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
>  {
>  	u32 control, idle;
> @@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
>  	timeout = jiffies + msecs_to_jiffies(1000);
>  
>  	while (time_is_after_jiffies(timeout)) {
> -		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> -			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> -
>  		/* enable clock */
> -		etnaviv_gpu_load_clock(gpu, control);
> +		etnaviv_gpu_update_clock(gpu);
> +
> +		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
>  
>  		/* Wait for stable clock.  Vivante's code waited for 1ms */
>  		usleep_range(1000, 10000);
> @@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
>  	}
>  
>  	/* We rely on the GPU running, so program the clock */
> -	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> -		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> -
> -	/* enable clock */
> -	etnaviv_gpu_load_clock(gpu, control);
> +	etnaviv_gpu_update_clock(gpu);
>  
>  	return 0;
>  }
> @@ -1526,17 +1533,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
>  #ifdef CONFIG_PM
>  static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
>  {
> -	u32 clock;
>  	int ret;
>  
>  	ret = mutex_lock_killable(&gpu->lock);
>  	if (ret)
>  		return ret;
>  
> -	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> -		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> -
> -	etnaviv_gpu_load_clock(gpu, clock);
> +	etnaviv_gpu_update_clock(gpu);
>  	etnaviv_gpu_hw_init(gpu);
>  
>  	gpu->switch_context = true;
> @@ -1548,6 +1551,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
>  }
>  #endif
>  
> +static int
> +etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
> +				  unsigned long *state)
> +{
> +	*state = 6;
> +
> +	return 0;
> +}
> +
> +static int
> +etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
> +				  unsigned long *state)
> +{
> +	struct etnaviv_gpu *gpu = cdev->devdata;
> +
> +	*state = gpu->freq_scale;
> +
> +	return 0;
> +}
> +
> +static int
> +etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
> +				  unsigned long state)
> +{
> +	struct etnaviv_gpu *gpu = cdev->devdata;
> +
> +	mutex_lock(&gpu->lock);
> +	gpu->freq_scale = state;
> +	if (!pm_runtime_suspended(gpu->dev))
> +		etnaviv_gpu_update_clock(gpu);
> +	mutex_unlock(&gpu->lock);
> +
> +	return 0;
> +}
> +
> +static struct thermal_cooling_device_ops cooling_ops = {
> +	.get_max_state = etnaviv_gpu_cooling_get_max_state,
> +	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
> +	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
> +};
> +
>  static int etnaviv_gpu_bind(struct device *dev, struct device *master,
>  	void *data)
>  {
> @@ -1556,13 +1600,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
>  	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
>  	int ret;
>  
> +	gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
> +				(char *)dev_name(dev), gpu, &cooling_ops);
> +	if (IS_ERR(gpu->cooling))
> +		return PTR_ERR(gpu->cooling);
> +
>  #ifdef CONFIG_PM
>  	ret = pm_runtime_get_sync(gpu->dev);
>  #else
>  	ret = etnaviv_gpu_clk_enable(gpu);
>  #endif
> -	if (ret < 0)
> +	if (ret < 0) {
> +		thermal_cooling_device_unregister(gpu->cooling);
>  		return ret;
> +	}
>  
>  	gpu->drm = drm;
>  	gpu->fence_context = dma_fence_context_alloc(1);
> @@ -1616,6 +1667,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
>  	}
>  
>  	gpu->drm = NULL;
> +
> +	thermal_cooling_device_unregister(gpu->cooling);
> +	gpu->cooling = NULL;
>  }
>  
>  static const struct component_ops gpu_ops = {
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> index 1c0606ea7d5e..6a1e68eec24c 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> @@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
>  
>  struct etnaviv_gpu {
>  	struct drm_device *drm;
> +	struct thermal_cooling_device *cooling;
>  	struct device *dev;
>  	struct mutex lock;
>  	struct etnaviv_chip_identity identity;
> @@ -150,6 +151,7 @@ struct etnaviv_gpu {
>  	u32 hangcheck_fence;
>  	u32 hangcheck_dma_addr;
>  	struct work_struct recover_work;
> +	unsigned int freq_scale;
>  };
>  
>  static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)


_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-15 13:03   ` Lucas Stach
@ 2017-03-15 14:05     ` Russell King - ARM Linux
  -1 siblings, 0 replies; 16+ messages in thread
From: Russell King - ARM Linux @ 2017-03-15 14:05 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Mar 15, 2017 at 02:03:09PM +0100, Lucas Stach wrote:
> Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
> > Each Vivante GPU contains a clock divider which can divide the GPU clock
> > by 2^n, which can lower the power dissipation from the GPU.  It has been
> > suggested that the GC600 on Dove is responsible for 20-30% of the power
> > dissipation from the SoC, so lowering the GPU clock rate provides a way
> > to throttle the power dissiptation, and reduce the temperature when the
> > SoC gets hot.
> > 
> > This patch hooks the Etnaviv driver into the kernel's thermal management
> > to allow the GPUs to be throttled when necessary, allowing a reduction in
> > GPU clock rate from /1 to /64 in power of 2 steps.
> 
> Are those power of 2 steps a hardware limitation, or is it something you
> implemented this way to get a smaller number of steps, with a more
> meaningful difference in clock speed?
> My understanding was that the FSCALE value is just a regular divider
> with all steps values in the range of 1-64 being usable.

I don't share your understanding.  The Vivante GAL kernel driver only
ever sets power-of-two values.  I have no evidence to support your
suggestion.

There's evidence that says your understanding is incorrect however.
It isn't a divider.  A value of 0x40 gives the fastest clock rate,
a value of 0x01 gives the slowest.  If it were a binary divider,
a value of 0x7f would give the slowest rate - so why doesn't Vivante
use that in galcore when putting the GPU into idle/lower power - why
do they just 0x01.

This all leads me to believe that it's not a binary divider, but a
set of bits that select the clock from a set of divide-by-two stages,
and having more than one bit set is invalid.

However, without definitive information from Vivante, we'll never
really know.  We're unlikely to get that.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-15 14:05     ` Russell King - ARM Linux
  0 siblings, 0 replies; 16+ messages in thread
From: Russell King - ARM Linux @ 2017-03-15 14:05 UTC (permalink / raw)
  To: Lucas Stach
  Cc: David Airlie, Christian Gmeiner, etnaviv, dri-devel, linux-arm-kernel

On Wed, Mar 15, 2017 at 02:03:09PM +0100, Lucas Stach wrote:
> Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
> > Each Vivante GPU contains a clock divider which can divide the GPU clock
> > by 2^n, which can lower the power dissipation from the GPU.  It has been
> > suggested that the GC600 on Dove is responsible for 20-30% of the power
> > dissipation from the SoC, so lowering the GPU clock rate provides a way
> > to throttle the power dissiptation, and reduce the temperature when the
> > SoC gets hot.
> > 
> > This patch hooks the Etnaviv driver into the kernel's thermal management
> > to allow the GPUs to be throttled when necessary, allowing a reduction in
> > GPU clock rate from /1 to /64 in power of 2 steps.
> 
> Are those power of 2 steps a hardware limitation, or is it something you
> implemented this way to get a smaller number of steps, with a more
> meaningful difference in clock speed?
> My understanding was that the FSCALE value is just a regular divider
> with all steps values in the range of 1-64 being usable.

I don't share your understanding.  The Vivante GAL kernel driver only
ever sets power-of-two values.  I have no evidence to support your
suggestion.

There's evidence that says your understanding is incorrect however.
It isn't a divider.  A value of 0x40 gives the fastest clock rate,
a value of 0x01 gives the slowest.  If it were a binary divider,
a value of 0x7f would give the slowest rate - so why doesn't Vivante
use that in galcore when putting the GPU into idle/lower power - why
do they just 0x01.

This all leads me to believe that it's not a binary divider, but a
set of bits that select the clock from a set of divide-by-two stages,
and having more than one bit set is invalid.

However, without definitive information from Vivante, we'll never
really know.  We're unlikely to get that.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-15 14:05     ` Russell King - ARM Linux
@ 2017-03-19 20:03       ` Chris Healy
  -1 siblings, 0 replies; 16+ messages in thread
From: Chris Healy @ 2017-03-19 20:03 UTC (permalink / raw)
  To: linux-arm-kernel

I don't have any input on this binary divider subject but I do want to
bring up some observations regarding Etnaviv GPU power management that
seems relevant.

I've done some comparisons between the Freescale Vivante GPU driver
stack (1) and the Marvell PXA1928 Vivante GPU driver stack (2) and see
more functionality in the PXA1928 stack than the Freescale i.MX6 stack
that may be of value for Etnaviv.  When I look at the Marvell PXA1928
Vivante GPU driver stack, (2) I see "gpufreq" code (3) that includes
support for conservative, ondemand, performance, powersave, and
userspace governors.  Additionally, AFAIK the key feature needed to
support a gpufreq driver and associated governors is to be able to
know what the load is on the GPU.  When looking at the PXA1928 driver,
it seems that it is looking at some load counters within the GPU that
are likely to be common across platforms.  (Check
"gpufreq_get_gpu_load" (4) in gpufreq.c.)

Also, given the wealth of counters present in the 3DGPU and my
understanding that there are 3 different controllable GPU frequencies
(at least with the i.MX6), it seems that one could dynamically adjust
each of these 3 different controllable frequencies independently based
on associated load counters.  The i.MX6 has 3 different frequencies,
IIRC, AXI, 3DGPU core, and 3DGPU shader.  I believe there are counters
associated with each of these GPU sub-blocks so it seems feasible to
adjust each of the 3 buses based on the sub-block load.  (I'm no
expert by any means with any of this so this may be crazy talk...)

If my observations are correct that the gpufreq functionality present
in the PXA1928 driver is portable across SoC platforms with the
Vivante 3D GPUs, does it make sense to add a gpufreq driver with the
Etnaviv driver?

What are the benefits and drawbacks of implementing a gpufreq driver
with associated governors in comparison to adding this cooling device
driver functionality?  (It seems to me that a gpufreq driver is more
proactive and the cooling device is more reactive.)

Can and should gpufreq driver functionality (such as that present in
the PXA1928 driver) and the proposed cooling device functionality
co-exist?

(1) - https://github.com/etnaviv/vivante_kernel_drivers/tree/master/imx6_v4_0_0
(2) - https://github.com/etnaviv/vivante_kernel_drivers/tree/master/pxa1928
(3) - https://github.com/etnaviv/vivante_kernel_drivers/tree/master/pxa1928/hal/os/linux/kernel/gpufreq
(4) - https://github.com/etnaviv/vivante_kernel_drivers/blob/master/pxa1928/hal/os/linux/kernel/gpufreq/gpufreq.c#L1294

On Wed, Mar 15, 2017 at 7:05 AM, Russell King - ARM Linux
<linux@armlinux.org.uk> wrote:
> On Wed, Mar 15, 2017 at 02:03:09PM +0100, Lucas Stach wrote:
>> Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
>> > Each Vivante GPU contains a clock divider which can divide the GPU clock
>> > by 2^n, which can lower the power dissipation from the GPU.  It has been
>> > suggested that the GC600 on Dove is responsible for 20-30% of the power
>> > dissipation from the SoC, so lowering the GPU clock rate provides a way
>> > to throttle the power dissiptation, and reduce the temperature when the
>> > SoC gets hot.
>> >
>> > This patch hooks the Etnaviv driver into the kernel's thermal management
>> > to allow the GPUs to be throttled when necessary, allowing a reduction in
>> > GPU clock rate from /1 to /64 in power of 2 steps.
>>
>> Are those power of 2 steps a hardware limitation, or is it something you
>> implemented this way to get a smaller number of steps, with a more
>> meaningful difference in clock speed?
>> My understanding was that the FSCALE value is just a regular divider
>> with all steps values in the range of 1-64 being usable.
>
> I don't share your understanding.  The Vivante GAL kernel driver only
> ever sets power-of-two values.  I have no evidence to support your
> suggestion.
>
> There's evidence that says your understanding is incorrect however.
> It isn't a divider.  A value of 0x40 gives the fastest clock rate,
> a value of 0x01 gives the slowest.  If it were a binary divider,
> a value of 0x7f would give the slowest rate - so why doesn't Vivante
> use that in galcore when putting the GPU into idle/lower power - why
> do they just 0x01.
>
> This all leads me to believe that it's not a binary divider, but a
> set of bits that select the clock from a set of divide-by-two stages,
> and having more than one bit set is invalid.
>
> However, without definitive information from Vivante, we'll never
> really know.  We're unlikely to get that.
>
> --
> RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
> FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
> according to speedtest.net.
> _______________________________________________
> etnaviv mailing list
> etnaviv at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/etnaviv

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-19 20:03       ` Chris Healy
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Healy @ 2017-03-19 20:03 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: David Airlie, etnaviv, dri-devel, Gustavo Padovan,
	Christian Gmeiner, linux-arm-kernel, Lucas Stach

I don't have any input on this binary divider subject but I do want to
bring up some observations regarding Etnaviv GPU power management that
seems relevant.

I've done some comparisons between the Freescale Vivante GPU driver
stack (1) and the Marvell PXA1928 Vivante GPU driver stack (2) and see
more functionality in the PXA1928 stack than the Freescale i.MX6 stack
that may be of value for Etnaviv.  When I look at the Marvell PXA1928
Vivante GPU driver stack, (2) I see "gpufreq" code (3) that includes
support for conservative, ondemand, performance, powersave, and
userspace governors.  Additionally, AFAIK the key feature needed to
support a gpufreq driver and associated governors is to be able to
know what the load is on the GPU.  When looking at the PXA1928 driver,
it seems that it is looking at some load counters within the GPU that
are likely to be common across platforms.  (Check
"gpufreq_get_gpu_load" (4) in gpufreq.c.)

Also, given the wealth of counters present in the 3DGPU and my
understanding that there are 3 different controllable GPU frequencies
(at least with the i.MX6), it seems that one could dynamically adjust
each of these 3 different controllable frequencies independently based
on associated load counters.  The i.MX6 has 3 different frequencies,
IIRC, AXI, 3DGPU core, and 3DGPU shader.  I believe there are counters
associated with each of these GPU sub-blocks so it seems feasible to
adjust each of the 3 buses based on the sub-block load.  (I'm no
expert by any means with any of this so this may be crazy talk...)

If my observations are correct that the gpufreq functionality present
in the PXA1928 driver is portable across SoC platforms with the
Vivante 3D GPUs, does it make sense to add a gpufreq driver with the
Etnaviv driver?

What are the benefits and drawbacks of implementing a gpufreq driver
with associated governors in comparison to adding this cooling device
driver functionality?  (It seems to me that a gpufreq driver is more
proactive and the cooling device is more reactive.)

Can and should gpufreq driver functionality (such as that present in
the PXA1928 driver) and the proposed cooling device functionality
co-exist?

(1) - https://github.com/etnaviv/vivante_kernel_drivers/tree/master/imx6_v4_0_0
(2) - https://github.com/etnaviv/vivante_kernel_drivers/tree/master/pxa1928
(3) - https://github.com/etnaviv/vivante_kernel_drivers/tree/master/pxa1928/hal/os/linux/kernel/gpufreq
(4) - https://github.com/etnaviv/vivante_kernel_drivers/blob/master/pxa1928/hal/os/linux/kernel/gpufreq/gpufreq.c#L1294

On Wed, Mar 15, 2017 at 7:05 AM, Russell King - ARM Linux
<linux@armlinux.org.uk> wrote:
> On Wed, Mar 15, 2017 at 02:03:09PM +0100, Lucas Stach wrote:
>> Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
>> > Each Vivante GPU contains a clock divider which can divide the GPU clock
>> > by 2^n, which can lower the power dissipation from the GPU.  It has been
>> > suggested that the GC600 on Dove is responsible for 20-30% of the power
>> > dissipation from the SoC, so lowering the GPU clock rate provides a way
>> > to throttle the power dissiptation, and reduce the temperature when the
>> > SoC gets hot.
>> >
>> > This patch hooks the Etnaviv driver into the kernel's thermal management
>> > to allow the GPUs to be throttled when necessary, allowing a reduction in
>> > GPU clock rate from /1 to /64 in power of 2 steps.
>>
>> Are those power of 2 steps a hardware limitation, or is it something you
>> implemented this way to get a smaller number of steps, with a more
>> meaningful difference in clock speed?
>> My understanding was that the FSCALE value is just a regular divider
>> with all steps values in the range of 1-64 being usable.
>
> I don't share your understanding.  The Vivante GAL kernel driver only
> ever sets power-of-two values.  I have no evidence to support your
> suggestion.
>
> There's evidence that says your understanding is incorrect however.
> It isn't a divider.  A value of 0x40 gives the fastest clock rate,
> a value of 0x01 gives the slowest.  If it were a binary divider,
> a value of 0x7f would give the slowest rate - so why doesn't Vivante
> use that in galcore when putting the GPU into idle/lower power - why
> do they just 0x01.
>
> This all leads me to believe that it's not a binary divider, but a
> set of bits that select the clock from a set of divide-by-two stages,
> and having more than one bit set is invalid.
>
> However, without definitive information from Vivante, we'll never
> really know.  We're unlikely to get that.
>
> --
> RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
> FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
> according to speedtest.net.
> _______________________________________________
> etnaviv mailing list
> etnaviv@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/etnaviv

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-19 20:03       ` Chris Healy
@ 2017-03-19 20:50         ` Russell King - ARM Linux
  -1 siblings, 0 replies; 16+ messages in thread
From: Russell King - ARM Linux @ 2017-03-19 20:50 UTC (permalink / raw)
  To: linux-arm-kernel

On Sun, Mar 19, 2017 at 01:03:42PM -0700, Chris Healy wrote:
> I don't have any input on this binary divider subject but I do want to
> bring up some observations regarding Etnaviv GPU power management that
> seems relevant.

GPU cooling isn't really to do with GPU power management, it's more
to do with helping avoid the SoC overheating, and going into emergency
shutdown.

So, it's solving a completely different problem.

However, thank you for the links, it'll be something to be researched
at some point.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-19 20:50         ` Russell King - ARM Linux
  0 siblings, 0 replies; 16+ messages in thread
From: Russell King - ARM Linux @ 2017-03-19 20:50 UTC (permalink / raw)
  To: Chris Healy
  Cc: David Airlie, etnaviv, dri-devel, Gustavo Padovan,
	Christian Gmeiner, linux-arm-kernel, Lucas Stach

On Sun, Mar 19, 2017 at 01:03:42PM -0700, Chris Healy wrote:
> I don't have any input on this binary divider subject but I do want to
> bring up some observations regarding Etnaviv GPU power management that
> seems relevant.

GPU cooling isn't really to do with GPU power management, it's more
to do with helping avoid the SoC overheating, and going into emergency
shutdown.

So, it's solving a completely different problem.

However, thank you for the links, it'll be something to be researched
at some point.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-19 20:03       ` Chris Healy
@ 2017-03-20  9:19         ` Lucas Stach
  -1 siblings, 0 replies; 16+ messages in thread
From: Lucas Stach @ 2017-03-20  9:19 UTC (permalink / raw)
  To: linux-arm-kernel

Am Sonntag, den 19.03.2017, 13:03 -0700 schrieb Chris Healy:
> I don't have any input on this binary divider subject but I do want to
> bring up some observations regarding Etnaviv GPU power management that
> seems relevant.
> 
> I've done some comparisons between the Freescale Vivante GPU driver
> stack (1) and the Marvell PXA1928 Vivante GPU driver stack (2) and see
> more functionality in the PXA1928 stack than the Freescale i.MX6 stack
> that may be of value for Etnaviv.  When I look at the Marvell PXA1928
> Vivante GPU driver stack, (2) I see "gpufreq" code (3) that includes
> support for conservative, ondemand, performance, powersave, and
> userspace governors.  Additionally, AFAIK the key feature needed to
> support a gpufreq driver and associated governors is to be able to
> know what the load is on the GPU.  When looking at the PXA1928 driver,
> it seems that it is looking at some load counters within the GPU that
> are likely to be common across platforms.  (Check
> "gpufreq_get_gpu_load" (4) in gpufreq.c.)
> 
> Also, given the wealth of counters present in the 3DGPU and my
> understanding that there are 3 different controllable GPU frequencies
> (at least with the i.MX6), it seems that one could dynamically adjust
> each of these 3 different controllable frequencies independently based
> on associated load counters.  The i.MX6 has 3 different frequencies,
> IIRC, AXI, 3DGPU core, and 3DGPU shader.  I believe there are counters
> associated with each of these GPU sub-blocks so it seems feasible to
> adjust each of the 3 buses based on the sub-block load.  (I'm no
> expert by any means with any of this so this may be crazy talk...)
> 
> If my observations are correct that the gpufreq functionality present
> in the PXA1928 driver is portable across SoC platforms with the
> Vivante 3D GPUs, does it make sense to add a gpufreq driver with the
> Etnaviv driver?
> 
> What are the benefits and drawbacks of implementing a gpufreq driver
> with associated governors in comparison to adding this cooling device
> driver functionality?  (It seems to me that a gpufreq driver is more
> proactive and the cooling device is more reactive.)
> 
> Can and should gpufreq driver functionality (such as that present in
> the PXA1928 driver) and the proposed cooling device functionality
> co-exist?

Yes, probably we want to have both at some point. The cooling-device
stuff is about throttling the GPU when the SoC reaches critical
temperatures.

The devfreq governors are about providing exactly the right performance
point.
Though as I have not yet seen any SoCs where the voltage would be scaled
with GPU frequency, downclocking the GPU is of limited value. For most
of those SoCs a race-to-idle policy is probably okay, as this allows
other components of the system like the DRAM to go into lower power
operating modes when the GPU is idle.

Regards,
Lucas

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-20  9:19         ` Lucas Stach
  0 siblings, 0 replies; 16+ messages in thread
From: Lucas Stach @ 2017-03-20  9:19 UTC (permalink / raw)
  To: Chris Healy
  Cc: Russell King - ARM Linux, dri-devel, etnaviv, Gustavo Padovan,
	linux-arm-kernel

Am Sonntag, den 19.03.2017, 13:03 -0700 schrieb Chris Healy:
> I don't have any input on this binary divider subject but I do want to
> bring up some observations regarding Etnaviv GPU power management that
> seems relevant.
> 
> I've done some comparisons between the Freescale Vivante GPU driver
> stack (1) and the Marvell PXA1928 Vivante GPU driver stack (2) and see
> more functionality in the PXA1928 stack than the Freescale i.MX6 stack
> that may be of value for Etnaviv.  When I look at the Marvell PXA1928
> Vivante GPU driver stack, (2) I see "gpufreq" code (3) that includes
> support for conservative, ondemand, performance, powersave, and
> userspace governors.  Additionally, AFAIK the key feature needed to
> support a gpufreq driver and associated governors is to be able to
> know what the load is on the GPU.  When looking at the PXA1928 driver,
> it seems that it is looking at some load counters within the GPU that
> are likely to be common across platforms.  (Check
> "gpufreq_get_gpu_load" (4) in gpufreq.c.)
> 
> Also, given the wealth of counters present in the 3DGPU and my
> understanding that there are 3 different controllable GPU frequencies
> (at least with the i.MX6), it seems that one could dynamically adjust
> each of these 3 different controllable frequencies independently based
> on associated load counters.  The i.MX6 has 3 different frequencies,
> IIRC, AXI, 3DGPU core, and 3DGPU shader.  I believe there are counters
> associated with each of these GPU sub-blocks so it seems feasible to
> adjust each of the 3 buses based on the sub-block load.  (I'm no
> expert by any means with any of this so this may be crazy talk...)
> 
> If my observations are correct that the gpufreq functionality present
> in the PXA1928 driver is portable across SoC platforms with the
> Vivante 3D GPUs, does it make sense to add a gpufreq driver with the
> Etnaviv driver?
> 
> What are the benefits and drawbacks of implementing a gpufreq driver
> with associated governors in comparison to adding this cooling device
> driver functionality?  (It seems to me that a gpufreq driver is more
> proactive and the cooling device is more reactive.)
> 
> Can and should gpufreq driver functionality (such as that present in
> the PXA1928 driver) and the proposed cooling device functionality
> co-exist?

Yes, probably we want to have both at some point. The cooling-device
stuff is about throttling the GPU when the SoC reaches critical
temperatures.

The devfreq governors are about providing exactly the right performance
point.
Though as I have not yet seen any SoCs where the voltage would be scaled
with GPU frequency, downclocking the GPU is of limited value. For most
of those SoCs a race-to-idle policy is probably okay, as this allows
other components of the system like the DRAM to go into lower power
operating modes when the GPU is idle.

Regards,
Lucas

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-15 14:05     ` Russell King - ARM Linux
@ 2017-03-20  9:21       ` Lucas Stach
  -1 siblings, 0 replies; 16+ messages in thread
From: Lucas Stach @ 2017-03-20  9:21 UTC (permalink / raw)
  To: linux-arm-kernel

Am Mittwoch, den 15.03.2017, 14:05 +0000 schrieb Russell King - ARM
Linux:
> On Wed, Mar 15, 2017 at 02:03:09PM +0100, Lucas Stach wrote:
> > Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
> > > Each Vivante GPU contains a clock divider which can divide the GPU clock
> > > by 2^n, which can lower the power dissipation from the GPU.  It has been
> > > suggested that the GC600 on Dove is responsible for 20-30% of the power
> > > dissipation from the SoC, so lowering the GPU clock rate provides a way
> > > to throttle the power dissiptation, and reduce the temperature when the
> > > SoC gets hot.
> > > 
> > > This patch hooks the Etnaviv driver into the kernel's thermal management
> > > to allow the GPUs to be throttled when necessary, allowing a reduction in
> > > GPU clock rate from /1 to /64 in power of 2 steps.
> > 
> > Are those power of 2 steps a hardware limitation, or is it something you
> > implemented this way to get a smaller number of steps, with a more
> > meaningful difference in clock speed?
> > My understanding was that the FSCALE value is just a regular divider
> > with all steps values in the range of 1-64 being usable.
> 
> I don't share your understanding.  The Vivante GAL kernel driver only
> ever sets power-of-two values.  I have no evidence to support your
> suggestion.
> 
> There's evidence that says your understanding is incorrect however.
> It isn't a divider.  A value of 0x40 gives the fastest clock rate,
> a value of 0x01 gives the slowest.  If it were a binary divider,
> a value of 0x7f would give the slowest rate - so why doesn't Vivante
> use that in galcore when putting the GPU into idle/lower power - why
> do they just 0x01.
> 
> This all leads me to believe that it's not a binary divider, but a
> set of bits that select the clock from a set of divide-by-two stages,
> and having more than one bit set is invalid.
> 
> However, without definitive information from Vivante, we'll never
> really know.  We're unlikely to get that.
> 
Yes, it seems your understanding is correct and my mental model of this
FSCALE thing was off.

I'll pick up this patch for the next round of etnaviv features.

Regards,
Lucas

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-20  9:21       ` Lucas Stach
  0 siblings, 0 replies; 16+ messages in thread
From: Lucas Stach @ 2017-03-20  9:21 UTC (permalink / raw)
  To: Russell King - ARM Linux; +Cc: etnaviv, dri-devel, linux-arm-kernel

Am Mittwoch, den 15.03.2017, 14:05 +0000 schrieb Russell King - ARM
Linux:
> On Wed, Mar 15, 2017 at 02:03:09PM +0100, Lucas Stach wrote:
> > Am Sonntag, den 12.03.2017, 19:00 +0000 schrieb Russell King:
> > > Each Vivante GPU contains a clock divider which can divide the GPU clock
> > > by 2^n, which can lower the power dissipation from the GPU.  It has been
> > > suggested that the GC600 on Dove is responsible for 20-30% of the power
> > > dissipation from the SoC, so lowering the GPU clock rate provides a way
> > > to throttle the power dissiptation, and reduce the temperature when the
> > > SoC gets hot.
> > > 
> > > This patch hooks the Etnaviv driver into the kernel's thermal management
> > > to allow the GPUs to be throttled when necessary, allowing a reduction in
> > > GPU clock rate from /1 to /64 in power of 2 steps.
> > 
> > Are those power of 2 steps a hardware limitation, or is it something you
> > implemented this way to get a smaller number of steps, with a more
> > meaningful difference in clock speed?
> > My understanding was that the FSCALE value is just a regular divider
> > with all steps values in the range of 1-64 being usable.
> 
> I don't share your understanding.  The Vivante GAL kernel driver only
> ever sets power-of-two values.  I have no evidence to support your
> suggestion.
> 
> There's evidence that says your understanding is incorrect however.
> It isn't a divider.  A value of 0x40 gives the fastest clock rate,
> a value of 0x01 gives the slowest.  If it were a binary divider,
> a value of 0x7f would give the slowest rate - so why doesn't Vivante
> use that in galcore when putting the GPU into idle/lower power - why
> do they just 0x01.
> 
> This all leads me to believe that it's not a binary divider, but a
> set of bits that select the clock from a set of divide-by-two stages,
> and having more than one bit set is invalid.
> 
> However, without definitive information from Vivante, we'll never
> really know.  We're unlikely to get that.
> 
Yes, it seems your understanding is correct and my mental model of this
FSCALE thing was off.

I'll pick up this patch for the next round of etnaviv features.

Regards,
Lucas

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH] drm/etnaviv: add etnaviv cooling device
  2017-03-20  9:19         ` Lucas Stach
@ 2017-03-20  9:50           ` Russell King - ARM Linux
  -1 siblings, 0 replies; 16+ messages in thread
From: Russell King - ARM Linux @ 2017-03-20  9:50 UTC (permalink / raw)
  To: linux-arm-kernel

On Mon, Mar 20, 2017 at 10:19:35AM +0100, Lucas Stach wrote:
> Yes, probably we want to have both at some point. The cooling-device
> stuff is about throttling the GPU when the SoC reaches critical
> temperatures.
> 
> The devfreq governors are about providing exactly the right performance
> point.
> Though as I have not yet seen any SoCs where the voltage would be scaled
> with GPU frequency, downclocking the GPU is of limited value. For most
> of those SoCs a race-to-idle policy is probably okay, as this allows
> other components of the system like the DRAM to go into lower power
> operating modes when the GPU is idle.

However, since we already have runtime PM support in etnaviv (where we
even power the GPU off on some SoCs) the lack of voltage scaling does
suggest that GPU frequency scaling would be of limited value in terms
of overall power usage.

It's also worth bearing in mind that Vivante GPUs internally control
clock gates to various modules when they're on.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] drm/etnaviv: add etnaviv cooling device
@ 2017-03-20  9:50           ` Russell King - ARM Linux
  0 siblings, 0 replies; 16+ messages in thread
From: Russell King - ARM Linux @ 2017-03-20  9:50 UTC (permalink / raw)
  To: Lucas Stach
  Cc: David Airlie, etnaviv, dri-devel, Gustavo Padovan,
	Christian Gmeiner, linux-arm-kernel, Chris Healy

On Mon, Mar 20, 2017 at 10:19:35AM +0100, Lucas Stach wrote:
> Yes, probably we want to have both at some point. The cooling-device
> stuff is about throttling the GPU when the SoC reaches critical
> temperatures.
> 
> The devfreq governors are about providing exactly the right performance
> point.
> Though as I have not yet seen any SoCs where the voltage would be scaled
> with GPU frequency, downclocking the GPU is of limited value. For most
> of those SoCs a race-to-idle policy is probably okay, as this allows
> other components of the system like the DRAM to go into lower power
> operating modes when the GPU is idle.

However, since we already have runtime PM support in etnaviv (where we
even power the GPU off on some SoCs) the lack of voltage scaling does
suggest that GPU frequency scaling would be of limited value in terms
of overall power usage.

It's also worth bearing in mind that Vivante GPUs internally control
clock gates to various modules when they're on.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2017-03-20  9:50 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-12 19:00 [PATCH] drm/etnaviv: add etnaviv cooling device Russell King
2017-03-12 19:00 ` Russell King
2017-03-15 13:03 ` Lucas Stach
2017-03-15 13:03   ` Lucas Stach
2017-03-15 14:05   ` Russell King - ARM Linux
2017-03-15 14:05     ` Russell King - ARM Linux
2017-03-19 20:03     ` Chris Healy
2017-03-19 20:03       ` Chris Healy
2017-03-19 20:50       ` Russell King - ARM Linux
2017-03-19 20:50         ` Russell King - ARM Linux
2017-03-20  9:19       ` Lucas Stach
2017-03-20  9:19         ` Lucas Stach
2017-03-20  9:50         ` Russell King - ARM Linux
2017-03-20  9:50           ` Russell King - ARM Linux
2017-03-20  9:21     ` Lucas Stach
2017-03-20  9:21       ` Lucas Stach

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.