* [PATCH 0/2] DGFX HWMON Enhancement
@ 2023-10-06 17:02 ` Badal Nilawar
0 siblings, 0 replies; 19+ messages in thread
From: Badal Nilawar @ 2023-10-06 17:02 UTC (permalink / raw)
To: intel-xe, linux-hwmon
Cc: anshuman.gupta, ashutosh.dixit, andi.shyti, riana.tauro, rodrigo.vivi
This series contains couple of patches. First patch handles locking.
Second patch "Expose power1_max_interval" is, with couple of fixes, from
previous series https://patchwork.freedesktop.org/series/118934/
Badal Nilawar (2):
drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
drm/xe/hwmon: Expose power1_max_interval
.../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 +
drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
drivers/gpu/drm/xe/xe_hwmon.c | 277 +++++++++++++-----
3 files changed, 225 insertions(+), 69 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [Intel-xe] [PATCH 0/2] DGFX HWMON Enhancement
@ 2023-10-06 17:02 ` Badal Nilawar
0 siblings, 0 replies; 19+ messages in thread
From: Badal Nilawar @ 2023-10-06 17:02 UTC (permalink / raw)
To: intel-xe, linux-hwmon; +Cc: rodrigo.vivi
This series contains couple of patches. First patch handles locking.
Second patch "Expose power1_max_interval" is, with couple of fixes, from
previous series https://patchwork.freedesktop.org/series/118934/
Badal Nilawar (2):
drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
drm/xe/hwmon: Expose power1_max_interval
.../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 +
drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
drivers/gpu/drm/xe/xe_hwmon.c | 277 +++++++++++++-----
3 files changed, 225 insertions(+), 69 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
@ 2023-10-06 17:02 ` Badal Nilawar
-1 siblings, 0 replies; 19+ messages in thread
From: Badal Nilawar @ 2023-10-06 17:02 UTC (permalink / raw)
To: intel-xe, linux-hwmon
Cc: anshuman.gupta, ashutosh.dixit, andi.shyti, riana.tauro, rodrigo.vivi
Take hwmon_lock while accessing hwmon rw attributes. For readonly
attributes its not required to take lock as reads are protected
by sysfs layer and therefore sequential.
Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
Cc: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
---
drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
1 file changed, 71 insertions(+), 72 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 9ac05994a967..391f9a8dd9d7 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
struct xe_hwmon {
struct device *hwmon_dev;
struct xe_gt *gt;
- struct mutex hwmon_lock; /* rmw operations*/
+ struct mutex hwmon_lock; /* For rw attributes */
int scl_shift_power;
int scl_shift_energy;
struct xe_hwmon_energy_info ei; /* Energy info for energy1_input */
@@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
return reg.raw;
}
-static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
- enum xe_hwmon_reg_operation operation, u32 *value,
- u32 clr, u32 set)
+static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
+ enum xe_hwmon_reg_operation operation, u32 *value,
+ u32 clr, u32 set)
{
struct xe_reg reg;
reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
if (!reg.raw)
- return -EOPNOTSUPP;
+ return;
switch (operation) {
case REG_READ:
*value = xe_mmio_read32(hwmon->gt, reg);
- return 0;
+ break;
case REG_WRITE:
xe_mmio_write32(hwmon->gt, reg, *value);
- return 0;
+ break;
case REG_RMW:
*value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
- return 0;
+ break;
default:
drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
operation);
- return -EOPNOTSUPP;
+ break;
}
}
-static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
- enum xe_hwmon_reg hwmon_reg, u64 *value)
+static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
+ enum xe_hwmon_reg hwmon_reg, u64 *value)
{
struct xe_reg reg;
reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
if (!reg.raw)
- return -EOPNOTSUPP;
+ return;
*value = xe_mmio_read64_2x32(hwmon->gt, reg);
-
- return 0;
}
#define PL1_DISABLE 0
@@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
{
u32 reg_val;
u64 reg_val64, min, max;
+ mutex_lock(&hwmon->hwmon_lock);
+
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val, 0, 0);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
- return 0;
+ goto unlock;
}
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
@@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
if (min && max)
*value = clamp_t(u64, *value, min, max);
-
- return 0;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
}
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
{
+ int ret = 0;
u32 reg_val;
+ mutex_lock(&hwmon->hwmon_lock);
+
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
@@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val,
PKG_PWR_LIM_1_EN, 0);
- if (reg_val & PKG_PWR_LIM_1_EN)
- return -EOPNOTSUPP;
+ if (reg_val & PKG_PWR_LIM_1_EN) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
}
/* Computation in 64-bits to avoid overflow. Round to nearest. */
@@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
-
- return 0;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
}
-static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
{
u32 reg_val;
xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ, ®_val, 0, 0);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
-
- return 0;
}
/*
@@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
struct xe_hwmon_energy_info *ei = &hwmon->ei;
u32 reg_val;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
-
- mutex_lock(&hwmon->hwmon_lock);
-
xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
®_val, 0, 0);
@@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
hwmon->scl_shift_energy);
-
- mutex_unlock(&hwmon->hwmon_lock);
-
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
}
static const struct hwmon_channel_info *hwmon_info[] = {
@@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
uval);
}
+static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
+{
+ int ret;
+ u32 uval;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+ if (ret)
+ goto unlock;
+
+ *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
+ scale_factor, POWER_SETUP_I1_SHIFT);
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
+{
+ int ret;
+ u32 uval;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
+ ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
{
u32 reg_val;
@@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
static int
xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
{
- int ret;
- u32 uval;
-
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_power_max_read(hwmon, val);
+ xe_hwmon_power_max_read(hwmon, val);
+ return 0;
case hwmon_power_rated_max:
- return xe_hwmon_power_rated_max_read(hwmon, val);
- case hwmon_power_crit:
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
- if (ret)
- return ret;
- if (!(uval & POWER_SETUP_I1_WATTS))
- return -ENODEV;
- *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
- SF_POWER, POWER_SETUP_I1_SHIFT);
+ xe_hwmon_power_rated_max_read(hwmon, val);
return 0;
+ case hwmon_power_crit:
+ return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
@@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
static int
xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
{
- u32 uval;
-
switch (attr) {
case hwmon_power_max:
return xe_hwmon_power_max_write(hwmon, val);
case hwmon_power_crit:
- uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
- return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
@@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
static int
xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
{
- int ret;
- u32 uval;
-
switch (attr) {
case hwmon_curr_crit:
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
- if (ret)
- return ret;
- if (uval & POWER_SETUP_I1_WATTS)
- return -ENODEV;
- *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
- SF_CURR, POWER_SETUP_I1_SHIFT);
- return 0;
+ return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
@@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
static int
xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
{
- u32 uval;
-
switch (attr) {
case hwmon_curr_crit:
- uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
- return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
@@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
{
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
-
switch (attr) {
case hwmon_in_input:
ret = xe_hwmon_get_voltage(hwmon, val);
@@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
ret = -EOPNOTSUPP;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
-
return ret;
}
@@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u32 val_sku_unit = 0;
- int ret;
- ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
/*
* The contents of register PKG_POWER_SKU_UNIT do not change,
* so read it once and store the shift values.
*/
- if (!ret) {
+ if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
}
--
2.25.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [Intel-xe] [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
@ 2023-10-06 17:02 ` Badal Nilawar
0 siblings, 0 replies; 19+ messages in thread
From: Badal Nilawar @ 2023-10-06 17:02 UTC (permalink / raw)
To: intel-xe, linux-hwmon; +Cc: rodrigo.vivi
Take hwmon_lock while accessing hwmon rw attributes. For readonly
attributes its not required to take lock as reads are protected
by sysfs layer and therefore sequential.
Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
Cc: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
---
drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
1 file changed, 71 insertions(+), 72 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 9ac05994a967..391f9a8dd9d7 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
struct xe_hwmon {
struct device *hwmon_dev;
struct xe_gt *gt;
- struct mutex hwmon_lock; /* rmw operations*/
+ struct mutex hwmon_lock; /* For rw attributes */
int scl_shift_power;
int scl_shift_energy;
struct xe_hwmon_energy_info ei; /* Energy info for energy1_input */
@@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
return reg.raw;
}
-static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
- enum xe_hwmon_reg_operation operation, u32 *value,
- u32 clr, u32 set)
+static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
+ enum xe_hwmon_reg_operation operation, u32 *value,
+ u32 clr, u32 set)
{
struct xe_reg reg;
reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
if (!reg.raw)
- return -EOPNOTSUPP;
+ return;
switch (operation) {
case REG_READ:
*value = xe_mmio_read32(hwmon->gt, reg);
- return 0;
+ break;
case REG_WRITE:
xe_mmio_write32(hwmon->gt, reg, *value);
- return 0;
+ break;
case REG_RMW:
*value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
- return 0;
+ break;
default:
drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
operation);
- return -EOPNOTSUPP;
+ break;
}
}
-static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
- enum xe_hwmon_reg hwmon_reg, u64 *value)
+static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
+ enum xe_hwmon_reg hwmon_reg, u64 *value)
{
struct xe_reg reg;
reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
if (!reg.raw)
- return -EOPNOTSUPP;
+ return;
*value = xe_mmio_read64_2x32(hwmon->gt, reg);
-
- return 0;
}
#define PL1_DISABLE 0
@@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
{
u32 reg_val;
u64 reg_val64, min, max;
+ mutex_lock(&hwmon->hwmon_lock);
+
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val, 0, 0);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
- return 0;
+ goto unlock;
}
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
@@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
if (min && max)
*value = clamp_t(u64, *value, min, max);
-
- return 0;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
}
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
{
+ int ret = 0;
u32 reg_val;
+ mutex_lock(&hwmon->hwmon_lock);
+
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
@@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val,
PKG_PWR_LIM_1_EN, 0);
- if (reg_val & PKG_PWR_LIM_1_EN)
- return -EOPNOTSUPP;
+ if (reg_val & PKG_PWR_LIM_1_EN) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
}
/* Computation in 64-bits to avoid overflow. Round to nearest. */
@@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
-
- return 0;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
}
-static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
{
u32 reg_val;
xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ, ®_val, 0, 0);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
-
- return 0;
}
/*
@@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
struct xe_hwmon_energy_info *ei = &hwmon->ei;
u32 reg_val;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
-
- mutex_lock(&hwmon->hwmon_lock);
-
xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
®_val, 0, 0);
@@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
hwmon->scl_shift_energy);
-
- mutex_unlock(&hwmon->hwmon_lock);
-
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
}
static const struct hwmon_channel_info *hwmon_info[] = {
@@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
uval);
}
+static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
+{
+ int ret;
+ u32 uval;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+ if (ret)
+ goto unlock;
+
+ *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
+ scale_factor, POWER_SETUP_I1_SHIFT);
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
+{
+ int ret;
+ u32 uval;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
+ ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
{
u32 reg_val;
@@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
static int
xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
{
- int ret;
- u32 uval;
-
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_power_max_read(hwmon, val);
+ xe_hwmon_power_max_read(hwmon, val);
+ return 0;
case hwmon_power_rated_max:
- return xe_hwmon_power_rated_max_read(hwmon, val);
- case hwmon_power_crit:
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
- if (ret)
- return ret;
- if (!(uval & POWER_SETUP_I1_WATTS))
- return -ENODEV;
- *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
- SF_POWER, POWER_SETUP_I1_SHIFT);
+ xe_hwmon_power_rated_max_read(hwmon, val);
return 0;
+ case hwmon_power_crit:
+ return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
@@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
static int
xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
{
- u32 uval;
-
switch (attr) {
case hwmon_power_max:
return xe_hwmon_power_max_write(hwmon, val);
case hwmon_power_crit:
- uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
- return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
@@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
static int
xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
{
- int ret;
- u32 uval;
-
switch (attr) {
case hwmon_curr_crit:
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
- if (ret)
- return ret;
- if (uval & POWER_SETUP_I1_WATTS)
- return -ENODEV;
- *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
- SF_CURR, POWER_SETUP_I1_SHIFT);
- return 0;
+ return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
@@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
static int
xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
{
- u32 uval;
-
switch (attr) {
case hwmon_curr_crit:
- uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
- return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
@@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
{
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
-
switch (attr) {
case hwmon_in_input:
ret = xe_hwmon_get_voltage(hwmon, val);
@@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
ret = -EOPNOTSUPP;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
-
return ret;
}
@@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u32 val_sku_unit = 0;
- int ret;
- ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
/*
* The contents of register PKG_POWER_SKU_UNIT do not change,
* so read it once and store the shift values.
*/
- if (!ret) {
+ if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
}
--
2.25.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
@ 2023-10-06 17:02 ` Badal Nilawar
-1 siblings, 0 replies; 19+ messages in thread
From: Badal Nilawar @ 2023-10-06 17:02 UTC (permalink / raw)
To: intel-xe, linux-hwmon
Cc: anshuman.gupta, ashutosh.dixit, andi.shyti, riana.tauro, rodrigo.vivi
Expose power1_max_interval, that is the tau corresponding to PL1, as a
custom hwmon attribute. Some bit manipulation is needed because of the
format of PKG_PWR_LIM_1_TIME in
PACKAGE_RAPL_LIMIT register (1.x * power(2,y))
v2: Get rpm wake ref while accessing power1_max_interval
v3: %s/hwmon/xe_hwmon/
v4:
- As power1_max_interval is rw attr take lock in read function as well
- Refine comment about val to fix point conversion (Andi)
- Update kernel version and date in doc
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
---
.../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 ++
drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
drivers/gpu/drm/xe/xe_hwmon.c | 142 +++++++++++++++++-
3 files changed, 158 insertions(+), 1 deletion(-)
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 1a7a6c23e141..8c321bc9dc04 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -59,3 +59,12 @@ Contact: intel-xe@lists.freedesktop.org
Description: RO. Energy input of device in microjoules.
Only supported for particular Intel xe graphics platforms.
+
+What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
+Date: October 2023
+KernelVersion: 6.6
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
+ milliseconds over which sustained power is averaged.
+
+ Only supported for particular Intel xe graphics platforms.
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index d8ecbe1858d1..519dd1067a19 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -22,15 +22,23 @@
#define PKG_TDP GENMASK_ULL(14, 0)
#define PKG_MIN_PWR GENMASK_ULL(30, 16)
#define PKG_MAX_PWR GENMASK_ULL(46, 32)
+#define PKG_MAX_WIN GENMASK_ULL(54, 48)
+#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
+#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
+
#define PCU_CR_PACKAGE_POWER_SKU_UNIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
#define PKG_PWR_UNIT REG_GENMASK(3, 0)
#define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
+#define PKG_TIME_UNIT REG_GENMASK(19, 16)
#define PCU_CR_PACKAGE_ENERGY_STATUS XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
#define PKG_PWR_LIM_1_EN REG_BIT(15)
+#define PKG_PWR_LIM_1_TIME REG_GENMASK(23, 17)
+#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23, 22)
+#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21, 17)
#endif /* _XE_MCHBAR_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 391f9a8dd9d7..867481c91826 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -38,6 +38,7 @@ enum xe_hwmon_reg_operation {
#define SF_CURR 1000 /* milliamperes */
#define SF_VOLTAGE 1000 /* millivolts */
#define SF_ENERGY 1000000 /* microjoules */
+#define SF_TIME 1000 /* milliseconds */
struct xe_hwmon_energy_info {
u32 reg_val_prev;
@@ -50,6 +51,7 @@ struct xe_hwmon {
struct mutex hwmon_lock; /* For rw attributes */
int scl_shift_power;
int scl_shift_energy;
+ int scl_shift_time;
struct xe_hwmon_energy_info ei; /* Energy info for energy1_input */
};
@@ -253,6 +255,142 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
hwmon->scl_shift_energy);
}
+static ssize_t
+xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 r, x, y, x_w = 2; /* 2 bits */
+ u64 tau4, out;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ REG_READ, &r, 0, 0);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
+ y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
+ /*
+ * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
+ * = (4 | x) << (y - 2)
+ * where (y - 2) ensures a 1.x fixed point representation of 1.x
+ * However because y can be < 2, we compute
+ * tau4 = (4 | x) << y
+ * but add 2 when doing the final right shift to account for units
+ */
+ tau4 = ((1 << x_w) | x) << y;
+ /* val in hwmon interface units (millisec) */
+ out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ return sysfs_emit(buf, "%llu\n", out);
+}
+
+static ssize_t
+xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 x, y, rxy, x_w = 2; /* 2 bits */
+ u64 tau4, r, max_win;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
+ * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds
+ */
+#define PKG_MAX_WIN_DEFAULT 0x12ull
+
+ /*
+ * val must be < max in hwmon interface units. The steps below are
+ * explained in xe_hwmon_power1_max_interval_show()
+ */
+ r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
+ x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
+ y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
+ tau4 = ((1 << x_w) | x) << y;
+ max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ if (val > max_win)
+ return -EINVAL;
+
+ /* val in hw units */
+ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
+ /*
+ * Convert val to 1.x * power(2,y)
+ * y = ilog2(val)
+ * x = (val - (1 << y)) >> (y - 2)
+ */
+ if (!val) {
+ y = 0;
+ x = 0;
+ } else {
+ y = ilog2(val);
+ x = (val - (1ul << y)) << x_w >> y;
+ }
+
+ rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, (u32 *)&r,
+ PKG_PWR_LIM_1_TIME, rxy);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
+ xe_hwmon_power1_max_interval_show,
+ xe_hwmon_power1_max_interval_store, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_power1_max_interval.dev_attr.attr,
+ NULL
+};
+
+static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret = 0;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
+ ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return ret;
+}
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+ .is_visible = xe_hwmon_attributes_visible,
+};
+
+static const struct attribute_group *hwmon_groups[] = {
+ &hwmon_attrgroup,
+ NULL
+};
+
static const struct hwmon_channel_info *hwmon_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
@@ -574,6 +712,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
+ hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
}
/*
@@ -620,7 +759,8 @@ void xe_hwmon_register(struct xe_device *xe)
/* hwmon_dev points to device hwmon<i> */
hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
&hwmon_chip_info,
- NULL);
+ hwmon_groups);
+
if (IS_ERR(hwmon->hwmon_dev)) {
drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
xe->hwmon = NULL;
--
2.25.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [Intel-xe] [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
@ 2023-10-06 17:02 ` Badal Nilawar
0 siblings, 0 replies; 19+ messages in thread
From: Badal Nilawar @ 2023-10-06 17:02 UTC (permalink / raw)
To: intel-xe, linux-hwmon; +Cc: rodrigo.vivi
Expose power1_max_interval, that is the tau corresponding to PL1, as a
custom hwmon attribute. Some bit manipulation is needed because of the
format of PKG_PWR_LIM_1_TIME in
PACKAGE_RAPL_LIMIT register (1.x * power(2,y))
v2: Get rpm wake ref while accessing power1_max_interval
v3: %s/hwmon/xe_hwmon/
v4:
- As power1_max_interval is rw attr take lock in read function as well
- Refine comment about val to fix point conversion (Andi)
- Update kernel version and date in doc
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
---
.../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 ++
drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
drivers/gpu/drm/xe/xe_hwmon.c | 142 +++++++++++++++++-
3 files changed, 158 insertions(+), 1 deletion(-)
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 1a7a6c23e141..8c321bc9dc04 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -59,3 +59,12 @@ Contact: intel-xe@lists.freedesktop.org
Description: RO. Energy input of device in microjoules.
Only supported for particular Intel xe graphics platforms.
+
+What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
+Date: October 2023
+KernelVersion: 6.6
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
+ milliseconds over which sustained power is averaged.
+
+ Only supported for particular Intel xe graphics platforms.
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index d8ecbe1858d1..519dd1067a19 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -22,15 +22,23 @@
#define PKG_TDP GENMASK_ULL(14, 0)
#define PKG_MIN_PWR GENMASK_ULL(30, 16)
#define PKG_MAX_PWR GENMASK_ULL(46, 32)
+#define PKG_MAX_WIN GENMASK_ULL(54, 48)
+#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
+#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
+
#define PCU_CR_PACKAGE_POWER_SKU_UNIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
#define PKG_PWR_UNIT REG_GENMASK(3, 0)
#define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
+#define PKG_TIME_UNIT REG_GENMASK(19, 16)
#define PCU_CR_PACKAGE_ENERGY_STATUS XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
#define PKG_PWR_LIM_1_EN REG_BIT(15)
+#define PKG_PWR_LIM_1_TIME REG_GENMASK(23, 17)
+#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23, 22)
+#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21, 17)
#endif /* _XE_MCHBAR_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 391f9a8dd9d7..867481c91826 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -38,6 +38,7 @@ enum xe_hwmon_reg_operation {
#define SF_CURR 1000 /* milliamperes */
#define SF_VOLTAGE 1000 /* millivolts */
#define SF_ENERGY 1000000 /* microjoules */
+#define SF_TIME 1000 /* milliseconds */
struct xe_hwmon_energy_info {
u32 reg_val_prev;
@@ -50,6 +51,7 @@ struct xe_hwmon {
struct mutex hwmon_lock; /* For rw attributes */
int scl_shift_power;
int scl_shift_energy;
+ int scl_shift_time;
struct xe_hwmon_energy_info ei; /* Energy info for energy1_input */
};
@@ -253,6 +255,142 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
hwmon->scl_shift_energy);
}
+static ssize_t
+xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 r, x, y, x_w = 2; /* 2 bits */
+ u64 tau4, out;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ REG_READ, &r, 0, 0);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
+ y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
+ /*
+ * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
+ * = (4 | x) << (y - 2)
+ * where (y - 2) ensures a 1.x fixed point representation of 1.x
+ * However because y can be < 2, we compute
+ * tau4 = (4 | x) << y
+ * but add 2 when doing the final right shift to account for units
+ */
+ tau4 = ((1 << x_w) | x) << y;
+ /* val in hwmon interface units (millisec) */
+ out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ return sysfs_emit(buf, "%llu\n", out);
+}
+
+static ssize_t
+xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 x, y, rxy, x_w = 2; /* 2 bits */
+ u64 tau4, r, max_win;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
+ * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds
+ */
+#define PKG_MAX_WIN_DEFAULT 0x12ull
+
+ /*
+ * val must be < max in hwmon interface units. The steps below are
+ * explained in xe_hwmon_power1_max_interval_show()
+ */
+ r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
+ x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
+ y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
+ tau4 = ((1 << x_w) | x) << y;
+ max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ if (val > max_win)
+ return -EINVAL;
+
+ /* val in hw units */
+ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
+ /*
+ * Convert val to 1.x * power(2,y)
+ * y = ilog2(val)
+ * x = (val - (1 << y)) >> (y - 2)
+ */
+ if (!val) {
+ y = 0;
+ x = 0;
+ } else {
+ y = ilog2(val);
+ x = (val - (1ul << y)) << x_w >> y;
+ }
+
+ rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, (u32 *)&r,
+ PKG_PWR_LIM_1_TIME, rxy);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
+ xe_hwmon_power1_max_interval_show,
+ xe_hwmon_power1_max_interval_store, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_power1_max_interval.dev_attr.attr,
+ NULL
+};
+
+static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret = 0;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
+ ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return ret;
+}
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+ .is_visible = xe_hwmon_attributes_visible,
+};
+
+static const struct attribute_group *hwmon_groups[] = {
+ &hwmon_attrgroup,
+ NULL
+};
+
static const struct hwmon_channel_info *hwmon_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
@@ -574,6 +712,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
+ hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
}
/*
@@ -620,7 +759,8 @@ void xe_hwmon_register(struct xe_device *xe)
/* hwmon_dev points to device hwmon<i> */
hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
&hwmon_chip_info,
- NULL);
+ hwmon_groups);
+
if (IS_ERR(hwmon->hwmon_dev)) {
drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
xe->hwmon = NULL;
--
2.25.1
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [Intel-xe] ✗ CI.Patch_applied: failure for DGFX HWMON Enhancement
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
` (2 preceding siblings ...)
(?)
@ 2023-10-06 18:43 ` Patchwork
-1 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2023-10-06 18:43 UTC (permalink / raw)
To: Badal Nilawar; +Cc: intel-xe
== Series Details ==
Series: DGFX HWMON Enhancement
URL : https://patchwork.freedesktop.org/series/124740/
State : failure
== Summary ==
=== Applying kernel patches on branch 'drm-xe-next' with base: ===
Base commit: d4af396a2 fixup! drm/xe/pat: Prefer the arch/IP names
=== git am output follows ===
Applying: drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
Applying: drm/xe/hwmon: Expose power1_max_interval
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
@ 2023-10-09 13:36 ` Gupta, Anshuman
-1 siblings, 0 replies; 19+ messages in thread
From: Gupta, Anshuman @ 2023-10-09 13:36 UTC (permalink / raw)
To: Badal Nilawar, intel-xe, linux-hwmon
Cc: ashutosh.dixit, andi.shyti, riana.tauro, rodrigo.vivi
On 10/6/2023 10:32 PM, Badal Nilawar wrote:
> Take hwmon_lock while accessing hwmon rw attributes. For readonly
> attributes its not required to take lock as reads are protected
> by sysfs layer and therefore sequential.
>
> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> ---
> drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
> 1 file changed, 71 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
> index 9ac05994a967..391f9a8dd9d7 100644
> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
> struct xe_hwmon {
> struct device *hwmon_dev;
> struct xe_gt *gt;
> - struct mutex hwmon_lock; /* rmw operations*/
> + struct mutex hwmon_lock; /* For rw attributes */
> int scl_shift_power;
> int scl_shift_energy;
> struct xe_hwmon_energy_info ei; /* Energy info for energy1_input */
> @@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
> return reg.raw;
> }
>
> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
> - enum xe_hwmon_reg_operation operation, u32 *value,
> - u32 clr, u32 set)
> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
> + enum xe_hwmon_reg_operation operation, u32 *value,
> + u32 clr, u32 set)
> {
> struct xe_reg reg;
>
> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>
> if (!reg.raw)
> - return -EOPNOTSUPP;
> + return;
Don't we need to report this as warning?
What is possiblity of code path landing here.
>
> switch (operation) {
> case REG_READ:
> *value = xe_mmio_read32(hwmon->gt, reg);
> - return 0;
> + break;
> case REG_WRITE:
> xe_mmio_write32(hwmon->gt, reg, *value);
> - return 0;
> + break;
> case REG_RMW:
> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
> - return 0;
> + break;
> default:
> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
> operation);
> - return -EOPNOTSUPP;
> + break;
> }
> }
>
> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> - enum xe_hwmon_reg hwmon_reg, u64 *value)
> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it possible to pass len of void * value to xe_hwmon_process_reg()
so we can wrap this fucntion in xe_hwmon_process_reg ?
> {
> struct xe_reg reg;
>
> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>
> if (!reg.raw)
> - return -EOPNOTSUPP;
> + return;
>
> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
> -
> - return 0;
> }
>
> #define PL1_DISABLE 0
> @@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
> * clamped values when read.
> */
> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
> {
> u32 reg_val;
> u64 reg_val64, min, max;
>
> + mutex_lock(&hwmon->hwmon_lock);
> +
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val, 0, 0);
> /* Check if PL1 limit is disabled */
> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
> *value = PL1_DISABLE;
> - return 0;
> + goto unlock;
> }
>
> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
> @@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
>
> if (min && max)
> *value = clamp_t(u64, *value, min, max);
> -
> - return 0;
> +unlock:
> + mutex_unlock(&hwmon->hwmon_lock);
> }
>
> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
> {
> + int ret = 0;
> u32 reg_val;
>
> + mutex_lock(&hwmon->hwmon_lock);
> +
> /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
> if (value == PL1_DISABLE) {
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
> @@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val,
> PKG_PWR_LIM_1_EN, 0);
>
> - if (reg_val & PKG_PWR_LIM_1_EN)
> - return -EOPNOTSUPP;
> + if (reg_val & PKG_PWR_LIM_1_EN) {
> + ret = -EOPNOTSUPP;
> + goto unlock;
> + }
> }
>
> /* Computation in 64-bits to avoid overflow. Round to nearest. */
> @@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
>
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
> -
> - return 0;
> +unlock:
> + mutex_unlock(&hwmon->hwmon_lock);
> + return ret;
> }
>
> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
> {
> u32 reg_val;
>
> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ, ®_val, 0, 0);
> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
> *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
> -
> - return 0;
> }
>
> /*
> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
> struct xe_hwmon_energy_info *ei = &hwmon->ei;
> u32 reg_val;
>
> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> -
> - mutex_lock(&hwmon->hwmon_lock);
> -
> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
> ®_val, 0, 0);
>
> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
>
> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
> hwmon->scl_shift_energy);
> -
> - mutex_unlock(&hwmon->hwmon_lock);
> -
> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> }
>
> static const struct hwmon_channel_info *hwmon_info[] = {
> @@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
> uval);
> }
>
> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
> +{
> + int ret;
> + u32 uval;
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> + if (ret)
> + goto unlock;
> +
> + *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> + scale_factor, POWER_SETUP_I1_SHIFT);
> +unlock:
> + mutex_unlock(&hwmon->hwmon_lock);
> + return ret;
> +}
> +
> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
> +{
> + int ret;
> + u32 uval;
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> +
> + mutex_unlock(&hwmon->hwmon_lock);
> + return ret;
> +}
> +
> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
> {
> u32 reg_val;
> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
> static int
> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
> {
> - int ret;
> - u32 uval;
> -
> switch (attr) {
> case hwmon_power_max:
> - return xe_hwmon_power_max_read(hwmon, val);
> + xe_hwmon_power_max_read(hwmon, val);
> + return 0;
> case hwmon_power_rated_max:
> - return xe_hwmon_power_rated_max_read(hwmon, val);
> - case hwmon_power_crit:
> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> - if (ret)
> - return ret;
> - if (!(uval & POWER_SETUP_I1_WATTS))
> - return -ENODEV;
> - *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> - SF_POWER, POWER_SETUP_I1_SHIFT);
> + xe_hwmon_power_rated_max_read(hwmon, val);
> return 0;
> + case hwmon_power_crit:
> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
> default:
> return -EOPNOTSUPP;
> }
> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
> static int
> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
> {
> - u32 uval;
> -
> switch (attr) {
> case hwmon_power_max:
> return xe_hwmon_power_max_write(hwmon, val);
> case hwmon_power_crit:
> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
> default:
> return -EOPNOTSUPP;
> }
> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
> static int
> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> {
> - int ret;
> - u32 uval;
> -
> switch (attr) {
> case hwmon_curr_crit:
> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> - if (ret)
> - return ret;
> - if (uval & POWER_SETUP_I1_WATTS)
> - return -ENODEV;
> - *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> - SF_CURR, POWER_SETUP_I1_SHIFT);
> - return 0;
> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
> default:
> return -EOPNOTSUPP;
> }
> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> static int
> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
> {
> - u32 uval;
> -
> switch (attr) {
> case hwmon_curr_crit:
> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
> default:
> return -EOPNOTSUPP;
> }
> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> {
> int ret;
>
> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> -
> switch (attr) {
> case hwmon_in_input:
> ret = xe_hwmon_get_voltage(hwmon, val);
> @@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> ret = -EOPNOTSUPP;
> }
>
> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> -
> return ret;
> }
>
> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
> struct xe_hwmon *hwmon = xe->hwmon;
> long energy;
> u32 val_sku_unit = 0;
> - int ret;
>
> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
> /*
> * The contents of register PKG_POWER_SKU_UNIT do not change,
> * so read it once and store the shift values.
> */
> - if (!ret) {
> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Intel-xe] [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
@ 2023-10-09 13:36 ` Gupta, Anshuman
0 siblings, 0 replies; 19+ messages in thread
From: Gupta, Anshuman @ 2023-10-09 13:36 UTC (permalink / raw)
To: Badal Nilawar, intel-xe, linux-hwmon; +Cc: rodrigo.vivi
On 10/6/2023 10:32 PM, Badal Nilawar wrote:
> Take hwmon_lock while accessing hwmon rw attributes. For readonly
> attributes its not required to take lock as reads are protected
> by sysfs layer and therefore sequential.
>
> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> ---
> drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
> 1 file changed, 71 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
> index 9ac05994a967..391f9a8dd9d7 100644
> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
> struct xe_hwmon {
> struct device *hwmon_dev;
> struct xe_gt *gt;
> - struct mutex hwmon_lock; /* rmw operations*/
> + struct mutex hwmon_lock; /* For rw attributes */
> int scl_shift_power;
> int scl_shift_energy;
> struct xe_hwmon_energy_info ei; /* Energy info for energy1_input */
> @@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
> return reg.raw;
> }
>
> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
> - enum xe_hwmon_reg_operation operation, u32 *value,
> - u32 clr, u32 set)
> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
> + enum xe_hwmon_reg_operation operation, u32 *value,
> + u32 clr, u32 set)
> {
> struct xe_reg reg;
>
> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>
> if (!reg.raw)
> - return -EOPNOTSUPP;
> + return;
Don't we need to report this as warning?
What is possiblity of code path landing here.
>
> switch (operation) {
> case REG_READ:
> *value = xe_mmio_read32(hwmon->gt, reg);
> - return 0;
> + break;
> case REG_WRITE:
> xe_mmio_write32(hwmon->gt, reg, *value);
> - return 0;
> + break;
> case REG_RMW:
> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
> - return 0;
> + break;
> default:
> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
> operation);
> - return -EOPNOTSUPP;
> + break;
> }
> }
>
> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> - enum xe_hwmon_reg hwmon_reg, u64 *value)
> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it possible to pass len of void * value to xe_hwmon_process_reg()
so we can wrap this fucntion in xe_hwmon_process_reg ?
> {
> struct xe_reg reg;
>
> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>
> if (!reg.raw)
> - return -EOPNOTSUPP;
> + return;
>
> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
> -
> - return 0;
> }
>
> #define PL1_DISABLE 0
> @@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
> * clamped values when read.
> */
> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
> {
> u32 reg_val;
> u64 reg_val64, min, max;
>
> + mutex_lock(&hwmon->hwmon_lock);
> +
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val, 0, 0);
> /* Check if PL1 limit is disabled */
> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
> *value = PL1_DISABLE;
> - return 0;
> + goto unlock;
> }
>
> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
> @@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
>
> if (min && max)
> *value = clamp_t(u64, *value, min, max);
> -
> - return 0;
> +unlock:
> + mutex_unlock(&hwmon->hwmon_lock);
> }
>
> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
> {
> + int ret = 0;
> u32 reg_val;
>
> + mutex_lock(&hwmon->hwmon_lock);
> +
> /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
> if (value == PL1_DISABLE) {
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
> @@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ, ®_val,
> PKG_PWR_LIM_1_EN, 0);
>
> - if (reg_val & PKG_PWR_LIM_1_EN)
> - return -EOPNOTSUPP;
> + if (reg_val & PKG_PWR_LIM_1_EN) {
> + ret = -EOPNOTSUPP;
> + goto unlock;
> + }
> }
>
> /* Computation in 64-bits to avoid overflow. Round to nearest. */
> @@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
>
> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
> -
> - return 0;
> +unlock:
> + mutex_unlock(&hwmon->hwmon_lock);
> + return ret;
> }
>
> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
> {
> u32 reg_val;
>
> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ, ®_val, 0, 0);
> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
> *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
> -
> - return 0;
> }
>
> /*
> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
> struct xe_hwmon_energy_info *ei = &hwmon->ei;
> u32 reg_val;
>
> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> -
> - mutex_lock(&hwmon->hwmon_lock);
> -
> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
> ®_val, 0, 0);
>
> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
>
> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
> hwmon->scl_shift_energy);
> -
> - mutex_unlock(&hwmon->hwmon_lock);
> -
> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> }
>
> static const struct hwmon_channel_info *hwmon_info[] = {
> @@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
> uval);
> }
>
> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
> +{
> + int ret;
> + u32 uval;
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> + if (ret)
> + goto unlock;
> +
> + *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> + scale_factor, POWER_SETUP_I1_SHIFT);
> +unlock:
> + mutex_unlock(&hwmon->hwmon_lock);
> + return ret;
> +}
> +
> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
> +{
> + int ret;
> + u32 uval;
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> +
> + mutex_unlock(&hwmon->hwmon_lock);
> + return ret;
> +}
> +
> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
> {
> u32 reg_val;
> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
> static int
> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
> {
> - int ret;
> - u32 uval;
> -
> switch (attr) {
> case hwmon_power_max:
> - return xe_hwmon_power_max_read(hwmon, val);
> + xe_hwmon_power_max_read(hwmon, val);
> + return 0;
> case hwmon_power_rated_max:
> - return xe_hwmon_power_rated_max_read(hwmon, val);
> - case hwmon_power_crit:
> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> - if (ret)
> - return ret;
> - if (!(uval & POWER_SETUP_I1_WATTS))
> - return -ENODEV;
> - *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> - SF_POWER, POWER_SETUP_I1_SHIFT);
> + xe_hwmon_power_rated_max_read(hwmon, val);
> return 0;
> + case hwmon_power_crit:
> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
> default:
> return -EOPNOTSUPP;
> }
> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
> static int
> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
> {
> - u32 uval;
> -
> switch (attr) {
> case hwmon_power_max:
> return xe_hwmon_power_max_write(hwmon, val);
> case hwmon_power_crit:
> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
> default:
> return -EOPNOTSUPP;
> }
> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
> static int
> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> {
> - int ret;
> - u32 uval;
> -
> switch (attr) {
> case hwmon_curr_crit:
> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> - if (ret)
> - return ret;
> - if (uval & POWER_SETUP_I1_WATTS)
> - return -ENODEV;
> - *val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> - SF_CURR, POWER_SETUP_I1_SHIFT);
> - return 0;
> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
> default:
> return -EOPNOTSUPP;
> }
> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> static int
> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
> {
> - u32 uval;
> -
> switch (attr) {
> case hwmon_curr_crit:
> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
> default:
> return -EOPNOTSUPP;
> }
> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> {
> int ret;
>
> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> -
> switch (attr) {
> case hwmon_in_input:
> ret = xe_hwmon_get_voltage(hwmon, val);
> @@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> ret = -EOPNOTSUPP;
> }
>
> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> -
> return ret;
> }
>
> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
> struct xe_hwmon *hwmon = xe->hwmon;
> long energy;
> u32 val_sku_unit = 0;
> - int ret;
>
> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
> /*
> * The contents of register PKG_POWER_SKU_UNIT do not change,
> * so read it once and store the shift values.
> */
> - if (!ret) {
> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
@ 2023-10-11 5:59 ` Gupta, Anshuman
-1 siblings, 0 replies; 19+ messages in thread
From: Gupta, Anshuman @ 2023-10-11 5:59 UTC (permalink / raw)
To: Nilawar, Badal, intel-xe, linux-hwmon
Cc: Dixit, Ashutosh, andi.shyti, Tauro, Riana, Vivi, Rodrigo
> -----Original Message-----
> From: Nilawar, Badal <badal.nilawar@intel.com>
> Sent: Friday, October 6, 2023 10:32 PM
> To: intel-xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
> Cc: Gupta, Anshuman <anshuman.gupta@intel.com>; Dixit, Ashutosh
> <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com; Tauro, Riana
> <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Subject: [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
>
> Expose power1_max_interval, that is the tau corresponding to PL1, as a
> custom hwmon attribute. Some bit manipulation is needed because of the
> format of PKG_PWR_LIM_1_TIME in PACKAGE_RAPL_LIMIT register (1.x *
> power(2,y))
>
> v2: Get rpm wake ref while accessing power1_max_interval
> v3: %s/hwmon/xe_hwmon/
> v4:
> - As power1_max_interval is rw attr take lock in read function as well
> - Refine comment about val to fix point conversion (Andi)
> - Update kernel version and date in doc
>
> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> ---
> .../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 ++
> drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
> drivers/gpu/drm/xe/xe_hwmon.c | 142 +++++++++++++++++-
> 3 files changed, 158 insertions(+), 1 deletion(-)
>
> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> index 1a7a6c23e141..8c321bc9dc04 100644
> --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> @@ -59,3 +59,12 @@ Contact: intel-xe@lists.freedesktop.org
> Description: RO. Energy input of device in microjoules.
>
> Only supported for particular Intel xe graphics platforms.
> +
> +What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
> +Date: October 2023
> +KernelVersion: 6.6
> +Contact: intel-xe@lists.freedesktop.org
> +Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
> + milliseconds over which sustained power is averaged.
> +
> + Only supported for particular Intel xe graphics platforms.
> diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> index d8ecbe1858d1..519dd1067a19 100644
> --- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> @@ -22,15 +22,23 @@
> #define PKG_TDP GENMASK_ULL(14, 0)
> #define PKG_MIN_PWR GENMASK_ULL(30,
> 16)
> #define PKG_MAX_PWR GENMASK_ULL(46,
> 32)
> +#define PKG_MAX_WIN GENMASK_ULL(54,
> 48)
> +#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
> +#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
> +
>
> #define PCU_CR_PACKAGE_POWER_SKU_UNIT
> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
> #define PKG_PWR_UNIT REG_GENMASK(3, 0)
> #define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
> +#define PKG_TIME_UNIT REG_GENMASK(19,
> 16)
>
> #define PCU_CR_PACKAGE_ENERGY_STATUS
> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
>
> #define PCU_CR_PACKAGE_RAPL_LIMIT
> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
> #define PKG_PWR_LIM_1 REG_GENMASK(14,
> 0)
> #define PKG_PWR_LIM_1_EN REG_BIT(15)
> +#define PKG_PWR_LIM_1_TIME REG_GENMASK(23,
> 17)
> +#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23,
> 22)
> +#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21,
> 17)
>
> #endif /* _XE_MCHBAR_REGS_H_ */
> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
> b/drivers/gpu/drm/xe/xe_hwmon.c index 391f9a8dd9d7..867481c91826
> 100644
> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> @@ -38,6 +38,7 @@ enum xe_hwmon_reg_operation {
> #define SF_CURR 1000 /* milliamperes */
> #define SF_VOLTAGE 1000 /* millivolts */
> #define SF_ENERGY 1000000 /* microjoules */
> +#define SF_TIME 1000 /* milliseconds */
>
> struct xe_hwmon_energy_info {
> u32 reg_val_prev;
> @@ -50,6 +51,7 @@ struct xe_hwmon {
> struct mutex hwmon_lock; /* For rw attributes */
> int scl_shift_power;
> int scl_shift_energy;
> + int scl_shift_time;
> struct xe_hwmon_energy_info ei; /* Energy info for
> energy1_input */
> };
>
> @@ -253,6 +255,142 @@ xe_hwmon_energy_get(struct xe_hwmon
> *hwmon, long *energy)
> hwmon->scl_shift_energy);
> }
>
> +static ssize_t
> +xe_hwmon_power1_max_interval_show(struct device *dev, struct
> device_attribute *attr,
> + char *buf)
> +{
> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
> + u32 r, x, y, x_w = 2; /* 2 bits */
> + u64 tau4, out;
> +
> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
> + REG_READ, &r, 0, 0);
> +
> + mutex_unlock(&hwmon->hwmon_lock);
> +
> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> +
> + x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
> + y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
Need space here.
> + /*
> + * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
> + * = (4 | x) << (y - 2)
> + * where (y - 2) ensures a 1.x fixed point representation of 1.x
> + * However because y can be < 2, we compute
> + * tau4 = (4 | x) << y
> + * but add 2 when doing the final right shift to account for units
> + */
Please reformat the comment to make it readable.
> + tau4 = ((1 << x_w) | x) << y;
Need space here.
> + /* val in hwmon interface units (millisec) */
> + out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time +
> x_w);
> +
> + return sysfs_emit(buf, "%llu\n", out); }
> +
> +static ssize_t
> +xe_hwmon_power1_max_interval_store(struct device *dev, struct
> device_attribute *attr,
> + const char *buf, size_t count)
> +{
> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
> + u32 x, y, rxy, x_w = 2; /* 2 bits */
> + u64 tau4, r, max_win;
> + unsigned long val;
> + int ret;
> +
> + ret = kstrtoul(buf, 0, &val);
> + if (ret)
> + return ret;
> +
> + /*
> + * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
> + * The hwmon->scl_shift_time default of 0xa results in a max tau of
> 256 seconds
> + */
> +#define PKG_MAX_WIN_DEFAULT 0x12ull
Is it universal truth that we are always going to use these default MAX_WIN value ?
AFAIR from i915 review it was due to some h/w limitation on DG2, we were forced to use
these default values instead to read it from registers. If this is true probably future
platforms will address such h/W limitation, it is worth to leave a code comment here ?
> +
> + /*
> + * val must be < max in hwmon interface units. The steps below are
> + * explained in xe_hwmon_power1_max_interval_show()
> + */
> + r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
> + x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
> + y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
> + tau4 = ((1 << x_w) | x) << y;
> + max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time
> + x_w);
> +
> + if (val > max_win)
> + return -EINVAL;
> +
> + /* val in hw units */
> + val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time,
> SF_TIME);
Need Space here.
Please leave a line of space before a code comment.
Thanks,
Anshuman Gupta.
> + /*
> + * Convert val to 1.x * power(2,y)
> + * y = ilog2(val)
> + * x = (val - (1 << y)) >> (y - 2)
> + */
> + if (!val) {
> + y = 0;
> + x = 0;
> + } else {
> + y = ilog2(val);
> + x = (val - (1ul << y)) << x_w >> y;
> + }
> +
> + rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) |
> +REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
> +
> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
> (u32 *)&r,
> + PKG_PWR_LIM_1_TIME, rxy);
> +
> + mutex_unlock(&hwmon->hwmon_lock);
> +
> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> +
> + return count;
> +}
> +
> +static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
> + xe_hwmon_power1_max_interval_show,
> + xe_hwmon_power1_max_interval_store, 0);
> +
> +static struct attribute *hwmon_attributes[] = {
> + &sensor_dev_attr_power1_max_interval.dev_attr.attr,
> + NULL
> +};
> +
> +static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
> + struct attribute *attr, int index) {
> + struct device *dev = kobj_to_dev(kobj);
> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
> + int ret = 0;
> +
> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> +
> + if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
> + ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ?
> attr->mode : 0;
> +
> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> +
> + return ret;
> +}
> +
> +static const struct attribute_group hwmon_attrgroup = {
> + .attrs = hwmon_attributes,
> + .is_visible = xe_hwmon_attributes_visible, };
> +
> +static const struct attribute_group *hwmon_groups[] = {
> + &hwmon_attrgroup,
> + NULL
> +};
> +
> static const struct hwmon_channel_info *hwmon_info[] = {
> HWMON_CHANNEL_INFO(power, HWMON_P_MAX |
> HWMON_P_RATED_MAX | HWMON_P_CRIT),
> HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT), @@ -574,6
> +712,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
> xe_hwmon_process_reg(hwmon,
> REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
> val_sku_unit);
> hwmon->scl_shift_energy =
> REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
> + hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT,
> val_sku_unit);
> }
>
> /*
> @@ -620,7 +759,8 @@ void xe_hwmon_register(struct xe_device *xe)
> /* hwmon_dev points to device hwmon<i> */
> hwmon->hwmon_dev =
> devm_hwmon_device_register_with_info(dev, "xe", hwmon,
>
> &hwmon_chip_info,
> - NULL);
> +
> hwmon_groups);
> +
> if (IS_ERR(hwmon->hwmon_dev)) {
> drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n",
> hwmon->hwmon_dev);
> xe->hwmon = NULL;
> --
> 2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Intel-xe] [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
@ 2023-10-11 5:59 ` Gupta, Anshuman
0 siblings, 0 replies; 19+ messages in thread
From: Gupta, Anshuman @ 2023-10-11 5:59 UTC (permalink / raw)
To: Nilawar, Badal, intel-xe, linux-hwmon; +Cc: Vivi, Rodrigo
> -----Original Message-----
> From: Nilawar, Badal <badal.nilawar@intel.com>
> Sent: Friday, October 6, 2023 10:32 PM
> To: intel-xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
> Cc: Gupta, Anshuman <anshuman.gupta@intel.com>; Dixit, Ashutosh
> <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com; Tauro, Riana
> <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Subject: [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
>
> Expose power1_max_interval, that is the tau corresponding to PL1, as a
> custom hwmon attribute. Some bit manipulation is needed because of the
> format of PKG_PWR_LIM_1_TIME in PACKAGE_RAPL_LIMIT register (1.x *
> power(2,y))
>
> v2: Get rpm wake ref while accessing power1_max_interval
> v3: %s/hwmon/xe_hwmon/
> v4:
> - As power1_max_interval is rw attr take lock in read function as well
> - Refine comment about val to fix point conversion (Andi)
> - Update kernel version and date in doc
>
> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> ---
> .../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 ++
> drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
> drivers/gpu/drm/xe/xe_hwmon.c | 142 +++++++++++++++++-
> 3 files changed, 158 insertions(+), 1 deletion(-)
>
> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> index 1a7a6c23e141..8c321bc9dc04 100644
> --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> @@ -59,3 +59,12 @@ Contact: intel-xe@lists.freedesktop.org
> Description: RO. Energy input of device in microjoules.
>
> Only supported for particular Intel xe graphics platforms.
> +
> +What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
> +Date: October 2023
> +KernelVersion: 6.6
> +Contact: intel-xe@lists.freedesktop.org
> +Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
> + milliseconds over which sustained power is averaged.
> +
> + Only supported for particular Intel xe graphics platforms.
> diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> index d8ecbe1858d1..519dd1067a19 100644
> --- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> @@ -22,15 +22,23 @@
> #define PKG_TDP GENMASK_ULL(14, 0)
> #define PKG_MIN_PWR GENMASK_ULL(30,
> 16)
> #define PKG_MAX_PWR GENMASK_ULL(46,
> 32)
> +#define PKG_MAX_WIN GENMASK_ULL(54,
> 48)
> +#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
> +#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
> +
>
> #define PCU_CR_PACKAGE_POWER_SKU_UNIT
> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
> #define PKG_PWR_UNIT REG_GENMASK(3, 0)
> #define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
> +#define PKG_TIME_UNIT REG_GENMASK(19,
> 16)
>
> #define PCU_CR_PACKAGE_ENERGY_STATUS
> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
>
> #define PCU_CR_PACKAGE_RAPL_LIMIT
> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
> #define PKG_PWR_LIM_1 REG_GENMASK(14,
> 0)
> #define PKG_PWR_LIM_1_EN REG_BIT(15)
> +#define PKG_PWR_LIM_1_TIME REG_GENMASK(23,
> 17)
> +#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23,
> 22)
> +#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21,
> 17)
>
> #endif /* _XE_MCHBAR_REGS_H_ */
> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
> b/drivers/gpu/drm/xe/xe_hwmon.c index 391f9a8dd9d7..867481c91826
> 100644
> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> @@ -38,6 +38,7 @@ enum xe_hwmon_reg_operation {
> #define SF_CURR 1000 /* milliamperes */
> #define SF_VOLTAGE 1000 /* millivolts */
> #define SF_ENERGY 1000000 /* microjoules */
> +#define SF_TIME 1000 /* milliseconds */
>
> struct xe_hwmon_energy_info {
> u32 reg_val_prev;
> @@ -50,6 +51,7 @@ struct xe_hwmon {
> struct mutex hwmon_lock; /* For rw attributes */
> int scl_shift_power;
> int scl_shift_energy;
> + int scl_shift_time;
> struct xe_hwmon_energy_info ei; /* Energy info for
> energy1_input */
> };
>
> @@ -253,6 +255,142 @@ xe_hwmon_energy_get(struct xe_hwmon
> *hwmon, long *energy)
> hwmon->scl_shift_energy);
> }
>
> +static ssize_t
> +xe_hwmon_power1_max_interval_show(struct device *dev, struct
> device_attribute *attr,
> + char *buf)
> +{
> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
> + u32 r, x, y, x_w = 2; /* 2 bits */
> + u64 tau4, out;
> +
> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
> + REG_READ, &r, 0, 0);
> +
> + mutex_unlock(&hwmon->hwmon_lock);
> +
> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> +
> + x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
> + y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
Need space here.
> + /*
> + * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
> + * = (4 | x) << (y - 2)
> + * where (y - 2) ensures a 1.x fixed point representation of 1.x
> + * However because y can be < 2, we compute
> + * tau4 = (4 | x) << y
> + * but add 2 when doing the final right shift to account for units
> + */
Please reformat the comment to make it readable.
> + tau4 = ((1 << x_w) | x) << y;
Need space here.
> + /* val in hwmon interface units (millisec) */
> + out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time +
> x_w);
> +
> + return sysfs_emit(buf, "%llu\n", out); }
> +
> +static ssize_t
> +xe_hwmon_power1_max_interval_store(struct device *dev, struct
> device_attribute *attr,
> + const char *buf, size_t count)
> +{
> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
> + u32 x, y, rxy, x_w = 2; /* 2 bits */
> + u64 tau4, r, max_win;
> + unsigned long val;
> + int ret;
> +
> + ret = kstrtoul(buf, 0, &val);
> + if (ret)
> + return ret;
> +
> + /*
> + * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
> + * The hwmon->scl_shift_time default of 0xa results in a max tau of
> 256 seconds
> + */
> +#define PKG_MAX_WIN_DEFAULT 0x12ull
Is it universal truth that we are always going to use these default MAX_WIN value ?
AFAIR from i915 review it was due to some h/w limitation on DG2, we were forced to use
these default values instead to read it from registers. If this is true probably future
platforms will address such h/W limitation, it is worth to leave a code comment here ?
> +
> + /*
> + * val must be < max in hwmon interface units. The steps below are
> + * explained in xe_hwmon_power1_max_interval_show()
> + */
> + r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
> + x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
> + y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
> + tau4 = ((1 << x_w) | x) << y;
> + max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time
> + x_w);
> +
> + if (val > max_win)
> + return -EINVAL;
> +
> + /* val in hw units */
> + val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time,
> SF_TIME);
Need Space here.
Please leave a line of space before a code comment.
Thanks,
Anshuman Gupta.
> + /*
> + * Convert val to 1.x * power(2,y)
> + * y = ilog2(val)
> + * x = (val - (1 << y)) >> (y - 2)
> + */
> + if (!val) {
> + y = 0;
> + x = 0;
> + } else {
> + y = ilog2(val);
> + x = (val - (1ul << y)) << x_w >> y;
> + }
> +
> + rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) |
> +REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
> +
> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> +
> + mutex_lock(&hwmon->hwmon_lock);
> +
> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
> (u32 *)&r,
> + PKG_PWR_LIM_1_TIME, rxy);
> +
> + mutex_unlock(&hwmon->hwmon_lock);
> +
> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> +
> + return count;
> +}
> +
> +static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
> + xe_hwmon_power1_max_interval_show,
> + xe_hwmon_power1_max_interval_store, 0);
> +
> +static struct attribute *hwmon_attributes[] = {
> + &sensor_dev_attr_power1_max_interval.dev_attr.attr,
> + NULL
> +};
> +
> +static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
> + struct attribute *attr, int index) {
> + struct device *dev = kobj_to_dev(kobj);
> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
> + int ret = 0;
> +
> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> +
> + if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
> + ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ?
> attr->mode : 0;
> +
> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> +
> + return ret;
> +}
> +
> +static const struct attribute_group hwmon_attrgroup = {
> + .attrs = hwmon_attributes,
> + .is_visible = xe_hwmon_attributes_visible, };
> +
> +static const struct attribute_group *hwmon_groups[] = {
> + &hwmon_attrgroup,
> + NULL
> +};
> +
> static const struct hwmon_channel_info *hwmon_info[] = {
> HWMON_CHANNEL_INFO(power, HWMON_P_MAX |
> HWMON_P_RATED_MAX | HWMON_P_CRIT),
> HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT), @@ -574,6
> +712,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
> xe_hwmon_process_reg(hwmon,
> REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
> val_sku_unit);
> hwmon->scl_shift_energy =
> REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
> + hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT,
> val_sku_unit);
> }
>
> /*
> @@ -620,7 +759,8 @@ void xe_hwmon_register(struct xe_device *xe)
> /* hwmon_dev points to device hwmon<i> */
> hwmon->hwmon_dev =
> devm_hwmon_device_register_with_info(dev, "xe", hwmon,
>
> &hwmon_chip_info,
> - NULL);
> +
> hwmon_groups);
> +
> if (IS_ERR(hwmon->hwmon_dev)) {
> drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n",
> hwmon->hwmon_dev);
> xe->hwmon = NULL;
> --
> 2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
2023-10-09 13:36 ` [Intel-xe] " Gupta, Anshuman
@ 2023-10-16 7:33 ` Nilawar, Badal
-1 siblings, 0 replies; 19+ messages in thread
From: Nilawar, Badal @ 2023-10-16 7:33 UTC (permalink / raw)
To: Gupta, Anshuman, intel-xe, linux-hwmon
Cc: ashutosh.dixit, andi.shyti, riana.tauro, rodrigo.vivi
Hi Anshuman,
On 09-10-2023 19:06, Gupta, Anshuman wrote:
>
>
> On 10/6/2023 10:32 PM, Badal Nilawar wrote:
>> Take hwmon_lock while accessing hwmon rw attributes. For readonly
>> attributes its not required to take lock as reads are protected
>> by sysfs layer and therefore sequential.
>>
>> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
>> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
>> 1 file changed, 71 insertions(+), 72 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>> b/drivers/gpu/drm/xe/xe_hwmon.c
>> index 9ac05994a967..391f9a8dd9d7 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
>> struct xe_hwmon {
>> struct device *hwmon_dev;
>> struct xe_gt *gt;
>> - struct mutex hwmon_lock; /* rmw operations*/
>> + struct mutex hwmon_lock; /* For rw attributes */
>> int scl_shift_power;
>> int scl_shift_energy;
>> struct xe_hwmon_energy_info ei; /* Energy info for
>> energy1_input */
>> @@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon
>> *hwmon, enum xe_hwmon_reg hwmon_reg)
>> return reg.raw;
>> }
>> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>> xe_hwmon_reg hwmon_reg,
>> - enum xe_hwmon_reg_operation operation, u32 *value,
>> - u32 clr, u32 set)
>> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>> xe_hwmon_reg hwmon_reg,
>> + enum xe_hwmon_reg_operation operation, u32 *value,
>> + u32 clr, u32 set)
>> {
>> struct xe_reg reg;
>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>> if (!reg.raw)
>> - return -EOPNOTSUPP;
>> + return;
> Don't we need to report this as warning?
> What is possiblity of code path landing here.
Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not
needed when reg is not supported by platform. During runtime code path
will not fall here as visible functions are taking care of creating
hwmon entries only if regs are supported. So it doesn't make sense to
add warn here.
>> switch (operation) {
>> case REG_READ:
>> *value = xe_mmio_read32(hwmon->gt, reg);
>> - return 0;
>> + break;
>> case REG_WRITE:
>> xe_mmio_write32(hwmon->gt, reg, *value);
>> - return 0;
>> + break;
>> case REG_RMW:
>> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
>> - return 0;
>> + break;
>> default:
>> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg
>> operation: %d\n",
>> operation);
>> - return -EOPNOTSUPP;
>> + break;
>> }
>> }
>> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>> - enum xe_hwmon_reg hwmon_reg, u64 *value)
>> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it
>> possible to pass len of void * value to xe_hwmon_process_reg()
> so we can wrap this fucntion in xe_hwmon_process_reg ?
Yes its possible but I feel it would be more useful if there are regs of
variable lengths i.e. other than 64 or 32 bit. As of now hwmon regs are
32 and 64 bit lenghts so I prefered 2 separate functions. If you insist
I will wrap.
Regards,
Badal
>
>> {
>> struct xe_reg reg;
>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>> if (!reg.raw)
>> - return -EOPNOTSUPP;
>> + return;
>> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
>> -
>> - return 0;
>> }
>> #define PL1_DISABLE 0
>> @@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct
>> xe_hwmon *hwmon,
>> * same pattern for sysfs, allow arbitrary PL1 limits to be set but
>> display
>> * clamped values when read.
>> */
>> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
>> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
>> {
>> u32 reg_val;
>> u64 reg_val64, min, max;
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>> ®_val, 0, 0);
>> /* Check if PL1 limit is disabled */
>> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
>> *value = PL1_DISABLE;
>> - return 0;
>> + goto unlock;
>> }
>> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
>> @@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct
>> xe_hwmon *hwmon, long *value)
>> if (min && max)
>> *value = clamp_t(u64, *value, min, max);
>> -
>> - return 0;
>> +unlock:
>> + mutex_unlock(&hwmon->hwmon_lock);
>> }
>> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
>> {
>> + int ret = 0;
>> u32 reg_val;
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> /* Disable PL1 limit and verify, as limit cannot be disabled on
>> all platforms */
>> if (value == PL1_DISABLE) {
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>> ®_val,
>> @@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct
>> xe_hwmon *hwmon, long value)
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>> ®_val,
>> PKG_PWR_LIM_1_EN, 0);
>> - if (reg_val & PKG_PWR_LIM_1_EN)
>> - return -EOPNOTSUPP;
>> + if (reg_val & PKG_PWR_LIM_1_EN) {
>> + ret = -EOPNOTSUPP;
>> + goto unlock;
>> + }
>> }
>> /* Computation in 64-bits to avoid overflow. Round to nearest. */
>> @@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct
>> xe_hwmon *hwmon, long value)
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
>> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
>> -
>> - return 0;
>> +unlock:
>> + mutex_unlock(&hwmon->hwmon_lock);
>> + return ret;
>> }
>> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long
>> *value)
>> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
>> long *value)
>> {
>> u32 reg_val;
>> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
>> ®_val, 0, 0);
>> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
>> *value = mul_u64_u32_shr(reg_val, SF_POWER,
>> hwmon->scl_shift_power);
>> -
>> - return 0;
>> }
>> /*
>> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long
>> *energy)
>> struct xe_hwmon_energy_info *ei = &hwmon->ei;
>> u32 reg_val;
>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> -
>> - mutex_lock(&hwmon->hwmon_lock);
>> -
>> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
>> ®_val, 0, 0);
>> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long
>> *energy)
>> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
>> hwmon->scl_shift_energy);
>> -
>> - mutex_unlock(&hwmon->hwmon_lock);
>> -
>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> }
>> static const struct hwmon_channel_info *hwmon_info[] = {
>> @@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt
>> *gt, u32 uval)
>> uval);
>> }
>> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long
>> *value, u32 scale_factor)
>> +{
>> + int ret;
>> + u32 uval;
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>> + if (ret)
>> + goto unlock;
>> +
>> + *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK,
>> uval),
>> + scale_factor, POWER_SETUP_I1_SHIFT);
>> +unlock:
>> + mutex_unlock(&hwmon->hwmon_lock);
>> + return ret;
>> +}
>> +
>> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon,
>> long value, u32 scale_factor)
>> +{
>> + int ret;
>> + u32 uval;
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT,
>> scale_factor);
>> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>> +
>> + mutex_unlock(&hwmon->hwmon_lock);
>> + return ret;
>> +}
>> +
>> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
>> {
>> u32 reg_val;
>> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon
>> *hwmon, u32 attr, int chan)
>> static int
>> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long
>> *val)
>> {
>> - int ret;
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_power_max:
>> - return xe_hwmon_power_max_read(hwmon, val);
>> + xe_hwmon_power_max_read(hwmon, val);
>> + return 0;
>> case hwmon_power_rated_max:
>> - return xe_hwmon_power_rated_max_read(hwmon, val);
>> - case hwmon_power_crit:
>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>> - if (ret)
>> - return ret;
>> - if (!(uval & POWER_SETUP_I1_WATTS))
>> - return -ENODEV;
>> - *val =
>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>> - SF_POWER, POWER_SETUP_I1_SHIFT);
>> + xe_hwmon_power_rated_max_read(hwmon, val);
>> return 0;
>> + case hwmon_power_crit:
>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32
>> attr, int chan, long *val)
>> static int
>> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan,
>> long val)
>> {
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_power_max:
>> return xe_hwmon_power_max_write(hwmon, val);
>> case hwmon_power_crit:
>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>> SF_POWER);
>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon
>> *hwmon, u32 attr)
>> static int
>> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
>> {
>> - int ret;
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_curr_crit:
>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>> - if (ret)
>> - return ret;
>> - if (uval & POWER_SETUP_I1_WATTS)
>> - return -ENODEV;
>> - *val =
>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>> - SF_CURR, POWER_SETUP_I1_SHIFT);
>> - return 0;
>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32
>> attr, long *val)
>> static int
>> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
>> {
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_curr_crit:
>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>> SF_CURR);
>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr,
>> long *val)
>> {
>> int ret;
>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> -
>> switch (attr) {
>> case hwmon_in_input:
>> ret = xe_hwmon_get_voltage(hwmon, val);
>> @@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr,
>> long *val)
>> ret = -EOPNOTSUPP;
>> }
>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> -
>> return ret;
>> }
>> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct
>> xe_device *xe)
>> struct xe_hwmon *hwmon = xe->hwmon;
>> long energy;
>> u32 val_sku_unit = 0;
>> - int ret;
>> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>> REG_READ, &val_sku_unit, 0, 0);
>> /*
>> * The contents of register PKG_POWER_SKU_UNIT do not change,
>> * so read it once and store the shift values.
>> */
>> - if (!ret) {
>> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
>> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ,
>> &val_sku_unit, 0, 0);
>> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>> val_sku_unit);
>> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT,
>> val_sku_unit);
>> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Intel-xe] [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
@ 2023-10-16 7:33 ` Nilawar, Badal
0 siblings, 0 replies; 19+ messages in thread
From: Nilawar, Badal @ 2023-10-16 7:33 UTC (permalink / raw)
To: Gupta, Anshuman, intel-xe, linux-hwmon; +Cc: rodrigo.vivi
Hi Anshuman,
On 09-10-2023 19:06, Gupta, Anshuman wrote:
>
>
> On 10/6/2023 10:32 PM, Badal Nilawar wrote:
>> Take hwmon_lock while accessing hwmon rw attributes. For readonly
>> attributes its not required to take lock as reads are protected
>> by sysfs layer and therefore sequential.
>>
>> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
>> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_hwmon.c | 143 +++++++++++++++++-----------------
>> 1 file changed, 71 insertions(+), 72 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>> b/drivers/gpu/drm/xe/xe_hwmon.c
>> index 9ac05994a967..391f9a8dd9d7 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
>> struct xe_hwmon {
>> struct device *hwmon_dev;
>> struct xe_gt *gt;
>> - struct mutex hwmon_lock; /* rmw operations*/
>> + struct mutex hwmon_lock; /* For rw attributes */
>> int scl_shift_power;
>> int scl_shift_energy;
>> struct xe_hwmon_energy_info ei; /* Energy info for
>> energy1_input */
>> @@ -95,47 +95,45 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon
>> *hwmon, enum xe_hwmon_reg hwmon_reg)
>> return reg.raw;
>> }
>> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>> xe_hwmon_reg hwmon_reg,
>> - enum xe_hwmon_reg_operation operation, u32 *value,
>> - u32 clr, u32 set)
>> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>> xe_hwmon_reg hwmon_reg,
>> + enum xe_hwmon_reg_operation operation, u32 *value,
>> + u32 clr, u32 set)
>> {
>> struct xe_reg reg;
>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>> if (!reg.raw)
>> - return -EOPNOTSUPP;
>> + return;
> Don't we need to report this as warning?
> What is possiblity of code path landing here.
Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not
needed when reg is not supported by platform. During runtime code path
will not fall here as visible functions are taking care of creating
hwmon entries only if regs are supported. So it doesn't make sense to
add warn here.
>> switch (operation) {
>> case REG_READ:
>> *value = xe_mmio_read32(hwmon->gt, reg);
>> - return 0;
>> + break;
>> case REG_WRITE:
>> xe_mmio_write32(hwmon->gt, reg, *value);
>> - return 0;
>> + break;
>> case REG_RMW:
>> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
>> - return 0;
>> + break;
>> default:
>> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg
>> operation: %d\n",
>> operation);
>> - return -EOPNOTSUPP;
>> + break;
>> }
>> }
>> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>> - enum xe_hwmon_reg hwmon_reg, u64 *value)
>> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it
>> possible to pass len of void * value to xe_hwmon_process_reg()
> so we can wrap this fucntion in xe_hwmon_process_reg ?
Yes its possible but I feel it would be more useful if there are regs of
variable lengths i.e. other than 64 or 32 bit. As of now hwmon regs are
32 and 64 bit lenghts so I prefered 2 separate functions. If you insist
I will wrap.
Regards,
Badal
>
>> {
>> struct xe_reg reg;
>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>> if (!reg.raw)
>> - return -EOPNOTSUPP;
>> + return;
>> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
>> -
>> - return 0;
>> }
>> #define PL1_DISABLE 0
>> @@ -146,16 +144,18 @@ static int xe_hwmon_process_reg_read64(struct
>> xe_hwmon *hwmon,
>> * same pattern for sysfs, allow arbitrary PL1 limits to be set but
>> display
>> * clamped values when read.
>> */
>> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
>> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
>> {
>> u32 reg_val;
>> u64 reg_val64, min, max;
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>> ®_val, 0, 0);
>> /* Check if PL1 limit is disabled */
>> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
>> *value = PL1_DISABLE;
>> - return 0;
>> + goto unlock;
>> }
>> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
>> @@ -169,14 +169,17 @@ static int xe_hwmon_power_max_read(struct
>> xe_hwmon *hwmon, long *value)
>> if (min && max)
>> *value = clamp_t(u64, *value, min, max);
>> -
>> - return 0;
>> +unlock:
>> + mutex_unlock(&hwmon->hwmon_lock);
>> }
>> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
>> {
>> + int ret = 0;
>> u32 reg_val;
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> /* Disable PL1 limit and verify, as limit cannot be disabled on
>> all platforms */
>> if (value == PL1_DISABLE) {
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>> ®_val,
>> @@ -184,8 +187,10 @@ static int xe_hwmon_power_max_write(struct
>> xe_hwmon *hwmon, long value)
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>> ®_val,
>> PKG_PWR_LIM_1_EN, 0);
>> - if (reg_val & PKG_PWR_LIM_1_EN)
>> - return -EOPNOTSUPP;
>> + if (reg_val & PKG_PWR_LIM_1_EN) {
>> + ret = -EOPNOTSUPP;
>> + goto unlock;
>> + }
>> }
>> /* Computation in 64-bits to avoid overflow. Round to nearest. */
>> @@ -194,19 +199,18 @@ static int xe_hwmon_power_max_write(struct
>> xe_hwmon *hwmon, long value)
>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW, ®_val,
>> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
>> -
>> - return 0;
>> +unlock:
>> + mutex_unlock(&hwmon->hwmon_lock);
>> + return ret;
>> }
>> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long
>> *value)
>> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
>> long *value)
>> {
>> u32 reg_val;
>> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
>> ®_val, 0, 0);
>> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
>> *value = mul_u64_u32_shr(reg_val, SF_POWER,
>> hwmon->scl_shift_power);
>> -
>> - return 0;
>> }
>> /*
>> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long
>> *energy)
>> struct xe_hwmon_energy_info *ei = &hwmon->ei;
>> u32 reg_val;
>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> -
>> - mutex_lock(&hwmon->hwmon_lock);
>> -
>> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ,
>> ®_val, 0, 0);
>> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long
>> *energy)
>> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
>> hwmon->scl_shift_energy);
>> -
>> - mutex_unlock(&hwmon->hwmon_lock);
>> -
>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> }
>> static const struct hwmon_channel_info *hwmon_info[] = {
>> @@ -284,6 +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt
>> *gt, u32 uval)
>> uval);
>> }
>> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long
>> *value, u32 scale_factor)
>> +{
>> + int ret;
>> + u32 uval;
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>> + if (ret)
>> + goto unlock;
>> +
>> + *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK,
>> uval),
>> + scale_factor, POWER_SETUP_I1_SHIFT);
>> +unlock:
>> + mutex_unlock(&hwmon->hwmon_lock);
>> + return ret;
>> +}
>> +
>> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon,
>> long value, u32 scale_factor)
>> +{
>> + int ret;
>> + u32 uval;
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT,
>> scale_factor);
>> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>> +
>> + mutex_unlock(&hwmon->hwmon_lock);
>> + return ret;
>> +}
>> +
>> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
>> {
>> u32 reg_val;
>> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon
>> *hwmon, u32 attr, int chan)
>> static int
>> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long
>> *val)
>> {
>> - int ret;
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_power_max:
>> - return xe_hwmon_power_max_read(hwmon, val);
>> + xe_hwmon_power_max_read(hwmon, val);
>> + return 0;
>> case hwmon_power_rated_max:
>> - return xe_hwmon_power_rated_max_read(hwmon, val);
>> - case hwmon_power_crit:
>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>> - if (ret)
>> - return ret;
>> - if (!(uval & POWER_SETUP_I1_WATTS))
>> - return -ENODEV;
>> - *val =
>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>> - SF_POWER, POWER_SETUP_I1_SHIFT);
>> + xe_hwmon_power_rated_max_read(hwmon, val);
>> return 0;
>> + case hwmon_power_crit:
>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32
>> attr, int chan, long *val)
>> static int
>> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan,
>> long val)
>> {
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_power_max:
>> return xe_hwmon_power_max_write(hwmon, val);
>> case hwmon_power_crit:
>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>> SF_POWER);
>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon
>> *hwmon, u32 attr)
>> static int
>> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
>> {
>> - int ret;
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_curr_crit:
>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>> - if (ret)
>> - return ret;
>> - if (uval & POWER_SETUP_I1_WATTS)
>> - return -ENODEV;
>> - *val =
>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>> - SF_CURR, POWER_SETUP_I1_SHIFT);
>> - return 0;
>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32
>> attr, long *val)
>> static int
>> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
>> {
>> - u32 uval;
>> -
>> switch (attr) {
>> case hwmon_curr_crit:
>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>> SF_CURR);
>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
>> default:
>> return -EOPNOTSUPP;
>> }
>> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr,
>> long *val)
>> {
>> int ret;
>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> -
>> switch (attr) {
>> case hwmon_in_input:
>> ret = xe_hwmon_get_voltage(hwmon, val);
>> @@ -430,8 +432,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr,
>> long *val)
>> ret = -EOPNOTSUPP;
>> }
>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> -
>> return ret;
>> }
>> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct
>> xe_device *xe)
>> struct xe_hwmon *hwmon = xe->hwmon;
>> long energy;
>> u32 val_sku_unit = 0;
>> - int ret;
>> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>> REG_READ, &val_sku_unit, 0, 0);
>> /*
>> * The contents of register PKG_POWER_SKU_UNIT do not change,
>> * so read it once and store the shift values.
>> */
>> - if (!ret) {
>> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
>> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT, REG_READ,
>> &val_sku_unit, 0, 0);
>> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>> val_sku_unit);
>> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT,
>> val_sku_unit);
>> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
2023-10-11 5:59 ` [Intel-xe] " Gupta, Anshuman
@ 2023-10-16 7:39 ` Nilawar, Badal
-1 siblings, 0 replies; 19+ messages in thread
From: Nilawar, Badal @ 2023-10-16 7:39 UTC (permalink / raw)
To: Gupta, Anshuman, intel-xe, linux-hwmon
Cc: Dixit, Ashutosh, andi.shyti, Tauro, Riana, Vivi, Rodrigo
Hi Anshuman,
On 11-10-2023 11:29, Gupta, Anshuman wrote:
>
>
>> -----Original Message-----
>> From: Nilawar, Badal <badal.nilawar@intel.com>
>> Sent: Friday, October 6, 2023 10:32 PM
>> To: intel-xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
>> Cc: Gupta, Anshuman <anshuman.gupta@intel.com>; Dixit, Ashutosh
>> <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com; Tauro, Riana
>> <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
>> Subject: [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
>>
>> Expose power1_max_interval, that is the tau corresponding to PL1, as a
>> custom hwmon attribute. Some bit manipulation is needed because of the
>> format of PKG_PWR_LIM_1_TIME in PACKAGE_RAPL_LIMIT register (1.x *
>> power(2,y))
>>
>> v2: Get rpm wake ref while accessing power1_max_interval
>> v3: %s/hwmon/xe_hwmon/
>> v4:
>> - As power1_max_interval is rw attr take lock in read function as well
>> - Refine comment about val to fix point conversion (Andi)
>> - Update kernel version and date in doc
>>
>> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
>> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>> ---
>> .../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 ++
>> drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
>> drivers/gpu/drm/xe/xe_hwmon.c | 142 +++++++++++++++++-
>> 3 files changed, 158 insertions(+), 1 deletion(-)
>>
>> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> index 1a7a6c23e141..8c321bc9dc04 100644
>> --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> @@ -59,3 +59,12 @@ Contact: intel-xe@lists.freedesktop.org
>> Description: RO. Energy input of device in microjoules.
>>
>> Only supported for particular Intel xe graphics platforms.
>> +
>> +What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
>> +Date: October 2023
>> +KernelVersion: 6.6
>> +Contact: intel-xe@lists.freedesktop.org
>> +Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
>> + milliseconds over which sustained power is averaged.
>> +
>> + Only supported for particular Intel xe graphics platforms.
>> diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> index d8ecbe1858d1..519dd1067a19 100644
>> --- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> @@ -22,15 +22,23 @@
>> #define PKG_TDP GENMASK_ULL(14, 0)
>> #define PKG_MIN_PWR GENMASK_ULL(30,
>> 16)
>> #define PKG_MAX_PWR GENMASK_ULL(46,
>> 32)
>> +#define PKG_MAX_WIN GENMASK_ULL(54,
>> 48)
>> +#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
>> +#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
>> +
>>
>> #define PCU_CR_PACKAGE_POWER_SKU_UNIT
>> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
>> #define PKG_PWR_UNIT REG_GENMASK(3, 0)
>> #define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
>> +#define PKG_TIME_UNIT REG_GENMASK(19,
>> 16)
>>
>> #define PCU_CR_PACKAGE_ENERGY_STATUS
>> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
>>
>> #define PCU_CR_PACKAGE_RAPL_LIMIT
>> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
>> #define PKG_PWR_LIM_1 REG_GENMASK(14,
>> 0)
>> #define PKG_PWR_LIM_1_EN REG_BIT(15)
>> +#define PKG_PWR_LIM_1_TIME REG_GENMASK(23,
>> 17)
>> +#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23,
>> 22)
>> +#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21,
>> 17)
>>
>> #endif /* _XE_MCHBAR_REGS_H_ */
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>> b/drivers/gpu/drm/xe/xe_hwmon.c index 391f9a8dd9d7..867481c91826
>> 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>> @@ -38,6 +38,7 @@ enum xe_hwmon_reg_operation {
>> #define SF_CURR 1000 /* milliamperes */
>> #define SF_VOLTAGE 1000 /* millivolts */
>> #define SF_ENERGY 1000000 /* microjoules */
>> +#define SF_TIME 1000 /* milliseconds */
>>
>> struct xe_hwmon_energy_info {
>> u32 reg_val_prev;
>> @@ -50,6 +51,7 @@ struct xe_hwmon {
>> struct mutex hwmon_lock; /* For rw attributes */
>> int scl_shift_power;
>> int scl_shift_energy;
>> + int scl_shift_time;
>> struct xe_hwmon_energy_info ei; /* Energy info for
>> energy1_input */
>> };
>>
>> @@ -253,6 +255,142 @@ xe_hwmon_energy_get(struct xe_hwmon
>> *hwmon, long *energy)
>> hwmon->scl_shift_energy);
>> }
>>
>> +static ssize_t
>> +xe_hwmon_power1_max_interval_show(struct device *dev, struct
>> device_attribute *attr,
>> + char *buf)
>> +{
>> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
>> + u32 r, x, y, x_w = 2; /* 2 bits */
>> + u64 tau4, out;
>> +
>> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> + REG_READ, &r, 0, 0);
>> +
>> + mutex_unlock(&hwmon->hwmon_lock);
>> +
>> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> +
>> + x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
>> + y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
> Need space here.
ok
>> + /*
>> + * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
>> + * = (4 | x) << (y - 2)
>> + * where (y - 2) ensures a 1.x fixed point representation of 1.x
>> + * However because y can be < 2, we compute
>> + * tau4 = (4 | x) << y
>> + * but add 2 when doing the final right shift to account for units
>> + */
> Please reformat the comment to make it readable.
ok
>> + tau4 = ((1 << x_w) | x) << y;
> Need space here.
ok
>> + /* val in hwmon interface units (millisec) */
>> + out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time +
>> x_w);
>> +
>> + return sysfs_emit(buf, "%llu\n", out); }
>> +
>> +static ssize_t
>> +xe_hwmon_power1_max_interval_store(struct device *dev, struct
>> device_attribute *attr,
>> + const char *buf, size_t count)
>> +{
>> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
>> + u32 x, y, rxy, x_w = 2; /* 2 bits */
>> + u64 tau4, r, max_win;
>> + unsigned long val;
>> + int ret;
>> +
>> + ret = kstrtoul(buf, 0, &val);
>> + if (ret)
>> + return ret;
>> +
>> + /*
>> + * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
>> + * The hwmon->scl_shift_time default of 0xa results in a max tau of
>> 256 seconds
>> + */
>> +#define PKG_MAX_WIN_DEFAULT 0x12ull
> Is it universal truth that we are always going to use these default MAX_WIN value ?
Not sure about future platforms.
> AFAIR from i915 review it was due to some h/w limitation on DG2, we were forced to use
> these default values instead to read it from registers. If this is true probably future
> platforms will address such h/W limitation, it is worth to leave a code comment here ?
Sure will add comment here.
>> +
>> + /*
>> + * val must be < max in hwmon interface units. The steps below are
>> + * explained in xe_hwmon_power1_max_interval_show()
>> + */
>> + r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
>> + x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
>> + y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
>> + tau4 = ((1 << x_w) | x) << y;
>> + max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time
>> + x_w);
>> +
>> + if (val > max_win)
>> + return -EINVAL;
>> +
>> + /* val in hw units */
>> + val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time,
>> SF_TIME);
> Need Space here.
> Please leave a line of space before a code comment.
Ok
Regards,
Badal
> Thanks,
> Anshuman Gupta.
>> + /*
>> + * Convert val to 1.x * power(2,y)
>> + * y = ilog2(val)
>> + * x = (val - (1 << y)) >> (y - 2)
>> + */
>> + if (!val) {
>> + y = 0;
>> + x = 0;
>> + } else {
>> + y = ilog2(val);
>> + x = (val - (1ul << y)) << x_w >> y;
>> + }
>> +
>> + rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) |
>> +REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
>> +
>> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>> (u32 *)&r,
>> + PKG_PWR_LIM_1_TIME, rxy);
>> +
>> + mutex_unlock(&hwmon->hwmon_lock);
>> +
>> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> +
>> + return count;
>> +}
>> +
>> +static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
>> + xe_hwmon_power1_max_interval_show,
>> + xe_hwmon_power1_max_interval_store, 0);
>> +
>> +static struct attribute *hwmon_attributes[] = {
>> + &sensor_dev_attr_power1_max_interval.dev_attr.attr,
>> + NULL
>> +};
>> +
>> +static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
>> + struct attribute *attr, int index) {
>> + struct device *dev = kobj_to_dev(kobj);
>> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
>> + int ret = 0;
>> +
>> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +
>> + if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
>> + ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ?
>> attr->mode : 0;
>> +
>> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> +
>> + return ret;
>> +}
>> +
>> +static const struct attribute_group hwmon_attrgroup = {
>> + .attrs = hwmon_attributes,
>> + .is_visible = xe_hwmon_attributes_visible, };
>> +
>> +static const struct attribute_group *hwmon_groups[] = {
>> + &hwmon_attrgroup,
>> + NULL
>> +};
>> +
>> static const struct hwmon_channel_info *hwmon_info[] = {
>> HWMON_CHANNEL_INFO(power, HWMON_P_MAX |
>> HWMON_P_RATED_MAX | HWMON_P_CRIT),
>> HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT), @@ -574,6
>> +712,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
>> xe_hwmon_process_reg(hwmon,
>> REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
>> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>> val_sku_unit);
>> hwmon->scl_shift_energy =
>> REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
>> + hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT,
>> val_sku_unit);
>> }
>>
>> /*
>> @@ -620,7 +759,8 @@ void xe_hwmon_register(struct xe_device *xe)
>> /* hwmon_dev points to device hwmon<i> */
>> hwmon->hwmon_dev =
>> devm_hwmon_device_register_with_info(dev, "xe", hwmon,
>>
>> &hwmon_chip_info,
>> - NULL);
>> +
>> hwmon_groups);
>> +
>> if (IS_ERR(hwmon->hwmon_dev)) {
>> drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n",
>> hwmon->hwmon_dev);
>> xe->hwmon = NULL;
>> --
>> 2.25.1
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Intel-xe] [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
@ 2023-10-16 7:39 ` Nilawar, Badal
0 siblings, 0 replies; 19+ messages in thread
From: Nilawar, Badal @ 2023-10-16 7:39 UTC (permalink / raw)
To: Gupta, Anshuman, intel-xe, linux-hwmon; +Cc: Vivi, Rodrigo
Hi Anshuman,
On 11-10-2023 11:29, Gupta, Anshuman wrote:
>
>
>> -----Original Message-----
>> From: Nilawar, Badal <badal.nilawar@intel.com>
>> Sent: Friday, October 6, 2023 10:32 PM
>> To: intel-xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
>> Cc: Gupta, Anshuman <anshuman.gupta@intel.com>; Dixit, Ashutosh
>> <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com; Tauro, Riana
>> <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
>> Subject: [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval
>>
>> Expose power1_max_interval, that is the tau corresponding to PL1, as a
>> custom hwmon attribute. Some bit manipulation is needed because of the
>> format of PKG_PWR_LIM_1_TIME in PACKAGE_RAPL_LIMIT register (1.x *
>> power(2,y))
>>
>> v2: Get rpm wake ref while accessing power1_max_interval
>> v3: %s/hwmon/xe_hwmon/
>> v4:
>> - As power1_max_interval is rw attr take lock in read function as well
>> - Refine comment about val to fix point conversion (Andi)
>> - Update kernel version and date in doc
>>
>> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
>> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>> ---
>> .../ABI/testing/sysfs-driver-intel-xe-hwmon | 9 ++
>> drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 8 +
>> drivers/gpu/drm/xe/xe_hwmon.c | 142 +++++++++++++++++-
>> 3 files changed, 158 insertions(+), 1 deletion(-)
>>
>> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> index 1a7a6c23e141..8c321bc9dc04 100644
>> --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> @@ -59,3 +59,12 @@ Contact: intel-xe@lists.freedesktop.org
>> Description: RO. Energy input of device in microjoules.
>>
>> Only supported for particular Intel xe graphics platforms.
>> +
>> +What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
>> +Date: October 2023
>> +KernelVersion: 6.6
>> +Contact: intel-xe@lists.freedesktop.org
>> +Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
>> + milliseconds over which sustained power is averaged.
>> +
>> + Only supported for particular Intel xe graphics platforms.
>> diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> index d8ecbe1858d1..519dd1067a19 100644
>> --- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> @@ -22,15 +22,23 @@
>> #define PKG_TDP GENMASK_ULL(14, 0)
>> #define PKG_MIN_PWR GENMASK_ULL(30,
>> 16)
>> #define PKG_MAX_PWR GENMASK_ULL(46,
>> 32)
>> +#define PKG_MAX_WIN GENMASK_ULL(54,
>> 48)
>> +#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
>> +#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
>> +
>>
>> #define PCU_CR_PACKAGE_POWER_SKU_UNIT
>> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
>> #define PKG_PWR_UNIT REG_GENMASK(3, 0)
>> #define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
>> +#define PKG_TIME_UNIT REG_GENMASK(19,
>> 16)
>>
>> #define PCU_CR_PACKAGE_ENERGY_STATUS
>> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
>>
>> #define PCU_CR_PACKAGE_RAPL_LIMIT
>> XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
>> #define PKG_PWR_LIM_1 REG_GENMASK(14,
>> 0)
>> #define PKG_PWR_LIM_1_EN REG_BIT(15)
>> +#define PKG_PWR_LIM_1_TIME REG_GENMASK(23,
>> 17)
>> +#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23,
>> 22)
>> +#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21,
>> 17)
>>
>> #endif /* _XE_MCHBAR_REGS_H_ */
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>> b/drivers/gpu/drm/xe/xe_hwmon.c index 391f9a8dd9d7..867481c91826
>> 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>> @@ -38,6 +38,7 @@ enum xe_hwmon_reg_operation {
>> #define SF_CURR 1000 /* milliamperes */
>> #define SF_VOLTAGE 1000 /* millivolts */
>> #define SF_ENERGY 1000000 /* microjoules */
>> +#define SF_TIME 1000 /* milliseconds */
>>
>> struct xe_hwmon_energy_info {
>> u32 reg_val_prev;
>> @@ -50,6 +51,7 @@ struct xe_hwmon {
>> struct mutex hwmon_lock; /* For rw attributes */
>> int scl_shift_power;
>> int scl_shift_energy;
>> + int scl_shift_time;
>> struct xe_hwmon_energy_info ei; /* Energy info for
>> energy1_input */
>> };
>>
>> @@ -253,6 +255,142 @@ xe_hwmon_energy_get(struct xe_hwmon
>> *hwmon, long *energy)
>> hwmon->scl_shift_energy);
>> }
>>
>> +static ssize_t
>> +xe_hwmon_power1_max_interval_show(struct device *dev, struct
>> device_attribute *attr,
>> + char *buf)
>> +{
>> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
>> + u32 r, x, y, x_w = 2; /* 2 bits */
>> + u64 tau4, out;
>> +
>> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> + REG_READ, &r, 0, 0);
>> +
>> + mutex_unlock(&hwmon->hwmon_lock);
>> +
>> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> +
>> + x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
>> + y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
> Need space here.
ok
>> + /*
>> + * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
>> + * = (4 | x) << (y - 2)
>> + * where (y - 2) ensures a 1.x fixed point representation of 1.x
>> + * However because y can be < 2, we compute
>> + * tau4 = (4 | x) << y
>> + * but add 2 when doing the final right shift to account for units
>> + */
> Please reformat the comment to make it readable.
ok
>> + tau4 = ((1 << x_w) | x) << y;
> Need space here.
ok
>> + /* val in hwmon interface units (millisec) */
>> + out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time +
>> x_w);
>> +
>> + return sysfs_emit(buf, "%llu\n", out); }
>> +
>> +static ssize_t
>> +xe_hwmon_power1_max_interval_store(struct device *dev, struct
>> device_attribute *attr,
>> + const char *buf, size_t count)
>> +{
>> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
>> + u32 x, y, rxy, x_w = 2; /* 2 bits */
>> + u64 tau4, r, max_win;
>> + unsigned long val;
>> + int ret;
>> +
>> + ret = kstrtoul(buf, 0, &val);
>> + if (ret)
>> + return ret;
>> +
>> + /*
>> + * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
>> + * The hwmon->scl_shift_time default of 0xa results in a max tau of
>> 256 seconds
>> + */
>> +#define PKG_MAX_WIN_DEFAULT 0x12ull
> Is it universal truth that we are always going to use these default MAX_WIN value ?
Not sure about future platforms.
> AFAIR from i915 review it was due to some h/w limitation on DG2, we were forced to use
> these default values instead to read it from registers. If this is true probably future
> platforms will address such h/W limitation, it is worth to leave a code comment here ?
Sure will add comment here.
>> +
>> + /*
>> + * val must be < max in hwmon interface units. The steps below are
>> + * explained in xe_hwmon_power1_max_interval_show()
>> + */
>> + r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
>> + x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
>> + y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
>> + tau4 = ((1 << x_w) | x) << y;
>> + max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time
>> + x_w);
>> +
>> + if (val > max_win)
>> + return -EINVAL;
>> +
>> + /* val in hw units */
>> + val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time,
>> SF_TIME);
> Need Space here.
> Please leave a line of space before a code comment.
Ok
Regards,
Badal
> Thanks,
> Anshuman Gupta.
>> + /*
>> + * Convert val to 1.x * power(2,y)
>> + * y = ilog2(val)
>> + * x = (val - (1 << y)) >> (y - 2)
>> + */
>> + if (!val) {
>> + y = 0;
>> + x = 0;
>> + } else {
>> + y = ilog2(val);
>> + x = (val - (1ul << y)) << x_w >> y;
>> + }
>> +
>> + rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) |
>> +REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
>> +
>> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +
>> + mutex_lock(&hwmon->hwmon_lock);
>> +
>> + xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>> (u32 *)&r,
>> + PKG_PWR_LIM_1_TIME, rxy);
>> +
>> + mutex_unlock(&hwmon->hwmon_lock);
>> +
>> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> +
>> + return count;
>> +}
>> +
>> +static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
>> + xe_hwmon_power1_max_interval_show,
>> + xe_hwmon_power1_max_interval_store, 0);
>> +
>> +static struct attribute *hwmon_attributes[] = {
>> + &sensor_dev_attr_power1_max_interval.dev_attr.attr,
>> + NULL
>> +};
>> +
>> +static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
>> + struct attribute *attr, int index) {
>> + struct device *dev = kobj_to_dev(kobj);
>> + struct xe_hwmon *hwmon = dev_get_drvdata(dev);
>> + int ret = 0;
>> +
>> + xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +
>> + if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
>> + ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ?
>> attr->mode : 0;
>> +
>> + xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> +
>> + return ret;
>> +}
>> +
>> +static const struct attribute_group hwmon_attrgroup = {
>> + .attrs = hwmon_attributes,
>> + .is_visible = xe_hwmon_attributes_visible, };
>> +
>> +static const struct attribute_group *hwmon_groups[] = {
>> + &hwmon_attrgroup,
>> + NULL
>> +};
>> +
>> static const struct hwmon_channel_info *hwmon_info[] = {
>> HWMON_CHANNEL_INFO(power, HWMON_P_MAX |
>> HWMON_P_RATED_MAX | HWMON_P_CRIT),
>> HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT), @@ -574,6
>> +712,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
>> xe_hwmon_process_reg(hwmon,
>> REG_PKG_POWER_SKU_UNIT, REG_READ, &val_sku_unit, 0, 0);
>> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>> val_sku_unit);
>> hwmon->scl_shift_energy =
>> REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
>> + hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT,
>> val_sku_unit);
>> }
>>
>> /*
>> @@ -620,7 +759,8 @@ void xe_hwmon_register(struct xe_device *xe)
>> /* hwmon_dev points to device hwmon<i> */
>> hwmon->hwmon_dev =
>> devm_hwmon_device_register_with_info(dev, "xe", hwmon,
>>
>> &hwmon_chip_info,
>> - NULL);
>> +
>> hwmon_groups);
>> +
>> if (IS_ERR(hwmon->hwmon_dev)) {
>> drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n",
>> hwmon->hwmon_dev);
>> xe->hwmon = NULL;
>> --
>> 2.25.1
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
2023-10-16 7:33 ` [Intel-xe] " Nilawar, Badal
@ 2023-10-16 7:51 ` Gupta, Anshuman
-1 siblings, 0 replies; 19+ messages in thread
From: Gupta, Anshuman @ 2023-10-16 7:51 UTC (permalink / raw)
To: Nilawar, Badal, intel-xe, linux-hwmon
Cc: Dixit, Ashutosh, andi.shyti, Tauro, Riana, Vivi, Rodrigo
> -----Original Message-----
> From: Nilawar, Badal <badal.nilawar@intel.com>
> Sent: Monday, October 16, 2023 1:03 PM
> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
> Cc: Dixit, Ashutosh <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com;
> Tauro, Riana <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Subject: Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with
> hwmon_lock
>
> Hi Anshuman,
>
> On 09-10-2023 19:06, Gupta, Anshuman wrote:
> >
> >
> > On 10/6/2023 10:32 PM, Badal Nilawar wrote:
> >> Take hwmon_lock while accessing hwmon rw attributes. For readonly
> >> attributes its not required to take lock as reads are protected by
> >> sysfs layer and therefore sequential.
> >>
> >> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
> >> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
> >> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> >> ---
> >> drivers/gpu/drm/xe/xe_hwmon.c | 143
> >> +++++++++++++++++-----------------
> >> 1 file changed, 71 insertions(+), 72 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
> >> b/drivers/gpu/drm/xe/xe_hwmon.c index 9ac05994a967..391f9a8dd9d7
> >> 100644
> >> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> >> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> >> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
> >> struct xe_hwmon {
> >> struct device *hwmon_dev;
> >> struct xe_gt *gt;
> >> - struct mutex hwmon_lock; /* rmw operations*/
> >> + struct mutex hwmon_lock; /* For rw attributes */
> >> int scl_shift_power;
> >> int scl_shift_energy;
> >> struct xe_hwmon_energy_info ei; /* Energy info for
> >> energy1_input */ @@ -95,47 +95,45 @@ static u32
> >> xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
> hwmon_reg)
> >> return reg.raw;
> >> }
> >> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
> >> xe_hwmon_reg hwmon_reg,
> >> - enum xe_hwmon_reg_operation operation, u32 *value,
> >> - u32 clr, u32 set)
> >> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
> >> xe_hwmon_reg hwmon_reg,
> >> + enum xe_hwmon_reg_operation operation, u32 *value,
> >> + u32 clr, u32 set)
> >> {
> >> struct xe_reg reg;
> >> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
> >> if (!reg.raw)
> >> - return -EOPNOTSUPP;
> >> + return;
> > Don't we need to report this as warning?
> > What is possiblity of code path landing here.
> Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not
> needed when reg is not supported by platform. During runtime code path will
> not fall here as visible functions are taking care of creating hwmon entries only
> if regs are supported. So it doesn't make sense to add warn here.
> >> switch (operation) {
> >> case REG_READ:
> >> *value = xe_mmio_read32(hwmon->gt, reg);
> >> - return 0;
> >> + break;
> >> case REG_WRITE:
> >> xe_mmio_write32(hwmon->gt, reg, *value);
> >> - return 0;
> >> + break;
> >> case REG_RMW:
> >> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
> >> - return 0;
> >> + break;
> >> default:
> >> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg
> >> operation: %d\n",
> >> operation);
> >> - return -EOPNOTSUPP;
> >> + break;
> >> }
> >> }
> >> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> >> - enum xe_hwmon_reg hwmon_reg, u64 *value)
> >> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> >> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it
> >> possible to pass len of void * value to xe_hwmon_process_reg()
> > so we can wrap this fucntion in xe_hwmon_process_reg ?
> Yes its possible but I feel it would be more useful if there are regs of variable
> lengths i.e. other than 64 or 32 bit. As of now hwmon regs are
> 32 and 64 bit lenghts so I prefered 2 separate functions. If you insist I will
> wrap.
Another thing do we have any consumer of REG_WRITE, I don't see any caller (it is a dead code), considering above there is no real benefit of keeping the abstraction with
xe_hwmon_process_reg() and hwmon_process_reg_read64(). If keeping hwmon_process_reg_read64() is simple then it is defeating the purpose of
xe_hwmon_process_reg().
IMHO the only benefit xe_hwmon_process_reg() provides scalability for future platforms like forcewake is needed,
if intention is to keep it simple then Calling xe_mmio_{read32, read64_2x32, xe_mmio_rmw32} directly is much simpler than having multiple wrappers ?
Br,
Anshuman Gupta.
>
> Regards,
> Badal
> >
> >> {
> >> struct xe_reg reg;
> >> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
> >> if (!reg.raw)
> >> - return -EOPNOTSUPP;
> >> + return;
> >> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
> >> -
> >> - return 0;
> >> }
> >> #define PL1_DISABLE 0
> >> @@ -146,16 +144,18 @@ static int
> xe_hwmon_process_reg_read64(struct
> >> xe_hwmon *hwmon,
> >> * same pattern for sysfs, allow arbitrary PL1 limits to be set but
> >> display
> >> * clamped values when read.
> >> */
> >> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
> >> *value)
> >> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
> >> +*value)
> >> {
> >> u32 reg_val;
> >> u64 reg_val64, min, max;
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
> >> ®_val, 0, 0);
> >> /* Check if PL1 limit is disabled */
> >> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
> >> *value = PL1_DISABLE;
> >> - return 0;
> >> + goto unlock;
> >> }
> >> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val); @@ -169,14
> >> +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon
> *hwmon,
> >> long *value)
> >> if (min && max)
> >> *value = clamp_t(u64, *value, min, max);
> >> -
> >> - return 0;
> >> +unlock:
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> }
> >> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long
> >> value)
> >> {
> >> + int ret = 0;
> >> u32 reg_val;
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> /* Disable PL1 limit and verify, as limit cannot be disabled on
> >> all platforms */
> >> if (value == PL1_DISABLE) {
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
> >> ®_val, @@ -184,8 +187,10 @@ static int
> >> xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
> >> ®_val,
> >> PKG_PWR_LIM_1_EN, 0);
> >> - if (reg_val & PKG_PWR_LIM_1_EN)
> >> - return -EOPNOTSUPP;
> >> + if (reg_val & PKG_PWR_LIM_1_EN) {
> >> + ret = -EOPNOTSUPP;
> >> + goto unlock;
> >> + }
> >> }
> >> /* Computation in 64-bits to avoid overflow. Round to nearest.
> >> */ @@ -194,19 +199,18 @@ static int
> xe_hwmon_power_max_write(struct
> >> xe_hwmon *hwmon, long value)
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
> >> ®_val,
> >> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
> >> -
> >> - return 0;
> >> +unlock:
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> + return ret;
> >> }
> >> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
> >> long
> >> *value)
> >> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon
> *hwmon,
> >> long *value)
> >> {
> >> u32 reg_val;
> >> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
> >> ®_val, 0, 0);
> >> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
> >> *value = mul_u64_u32_shr(reg_val, SF_POWER,
> >> hwmon->scl_shift_power);
> >> -
> >> - return 0;
> >> }
> >> /*
> >> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon
> *hwmon, long
> >> *energy)
> >> struct xe_hwmon_energy_info *ei = &hwmon->ei;
> >> u32 reg_val;
> >> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> >> -
> >> - mutex_lock(&hwmon->hwmon_lock);
> >> -
> >> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS,
> REG_READ,
> >> ®_val, 0, 0);
> >> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon
> *hwmon, long
> >> *energy)
> >> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
> >> hwmon->scl_shift_energy);
> >> -
> >> - mutex_unlock(&hwmon->hwmon_lock);
> >> -
> >> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> >> }
> >> static const struct hwmon_channel_info *hwmon_info[] = { @@ -284,6
> >> +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32
> >> uval)
> >> uval);
> >> }
> >> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon,
> >> +long
> >> *value, u32 scale_factor)
> >> +{
> >> + int ret;
> >> + u32 uval;
> >> +
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> >> + if (ret)
> >> + goto unlock;
> >> +
> >> + *value =
> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK,
> >> uval),
> >> + scale_factor, POWER_SETUP_I1_SHIFT);
> >> +unlock:
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> + return ret;
> >> +}
> >> +
> >> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon,
> >> long value, u32 scale_factor)
> >> +{
> >> + int ret;
> >> + u32 uval;
> >> +
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT,
> >> scale_factor);
> >> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> >> +
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> + return ret;
> >> +}
> >> +
> >> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long
> >> *value)
> >> {
> >> u32 reg_val;
> >> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon
> >> *hwmon, u32 attr, int chan)
> >> static int
> >> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan,
> >> long
> >> *val)
> >> {
> >> - int ret;
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_power_max:
> >> - return xe_hwmon_power_max_read(hwmon, val);
> >> + xe_hwmon_power_max_read(hwmon, val);
> >> + return 0;
> >> case hwmon_power_rated_max:
> >> - return xe_hwmon_power_rated_max_read(hwmon, val);
> >> - case hwmon_power_crit:
> >> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> >> - if (ret)
> >> - return ret;
> >> - if (!(uval & POWER_SETUP_I1_WATTS))
> >> - return -ENODEV;
> >> - *val =
> >> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> >> - SF_POWER, POWER_SETUP_I1_SHIFT);
> >> + xe_hwmon_power_rated_max_read(hwmon, val);
> >> return 0;
> >> + case hwmon_power_crit:
> >> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon
> *hwmon, u32
> >> attr, int chan, long *val)
> >> static int
> >> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan,
> >> long val)
> >> {
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_power_max:
> >> return xe_hwmon_power_max_write(hwmon, val);
> >> case hwmon_power_crit:
> >> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
> >> SF_POWER);
> >> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> >> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct
> xe_hwmon
> >> *hwmon, u32 attr)
> >> static int
> >> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> >> {
> >> - int ret;
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_curr_crit:
> >> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> >> - if (ret)
> >> - return ret;
> >> - if (uval & POWER_SETUP_I1_WATTS)
> >> - return -ENODEV;
> >> - *val =
> >> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> >> - SF_CURR, POWER_SETUP_I1_SHIFT);
> >> - return 0;
> >> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon
> *hwmon, u32
> >> attr, long *val)
> >> static int
> >> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
> >> {
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_curr_crit:
> >> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
> >> SF_CURR);
> >> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> >> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon,
> u32
> >> attr, long *val)
> >> {
> >> int ret;
> >> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> >> -
> >> switch (attr) {
> >> case hwmon_in_input:
> >> ret = xe_hwmon_get_voltage(hwmon, val); @@ -430,8 +432,6 @@
> >> xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> >> ret = -EOPNOTSUPP;
> >> }
> >> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> >> -
> >> return ret;
> >> }
> >> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct
> >> xe_device *xe)
> >> struct xe_hwmon *hwmon = xe->hwmon;
> >> long energy;
> >> u32 val_sku_unit = 0;
> >> - int ret;
> >> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
> >> REG_READ, &val_sku_unit, 0, 0);
> >> /*
> >> * The contents of register PKG_POWER_SKU_UNIT do not change,
> >> * so read it once and store the shift values.
> >> */
> >> - if (!ret) {
> >> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
> >> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
> >> +REG_READ,
> >> &val_sku_unit, 0, 0);
> >> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
> >> val_sku_unit);
> >> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT,
> >> val_sku_unit);
> >> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Intel-xe] [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
@ 2023-10-16 7:51 ` Gupta, Anshuman
0 siblings, 0 replies; 19+ messages in thread
From: Gupta, Anshuman @ 2023-10-16 7:51 UTC (permalink / raw)
To: Nilawar, Badal, intel-xe, linux-hwmon; +Cc: Vivi, Rodrigo
> -----Original Message-----
> From: Nilawar, Badal <badal.nilawar@intel.com>
> Sent: Monday, October 16, 2023 1:03 PM
> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
> Cc: Dixit, Ashutosh <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com;
> Tauro, Riana <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Subject: Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with
> hwmon_lock
>
> Hi Anshuman,
>
> On 09-10-2023 19:06, Gupta, Anshuman wrote:
> >
> >
> > On 10/6/2023 10:32 PM, Badal Nilawar wrote:
> >> Take hwmon_lock while accessing hwmon rw attributes. For readonly
> >> attributes its not required to take lock as reads are protected by
> >> sysfs layer and therefore sequential.
> >>
> >> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
> >> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
> >> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> >> ---
> >> drivers/gpu/drm/xe/xe_hwmon.c | 143
> >> +++++++++++++++++-----------------
> >> 1 file changed, 71 insertions(+), 72 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
> >> b/drivers/gpu/drm/xe/xe_hwmon.c index 9ac05994a967..391f9a8dd9d7
> >> 100644
> >> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> >> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> >> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
> >> struct xe_hwmon {
> >> struct device *hwmon_dev;
> >> struct xe_gt *gt;
> >> - struct mutex hwmon_lock; /* rmw operations*/
> >> + struct mutex hwmon_lock; /* For rw attributes */
> >> int scl_shift_power;
> >> int scl_shift_energy;
> >> struct xe_hwmon_energy_info ei; /* Energy info for
> >> energy1_input */ @@ -95,47 +95,45 @@ static u32
> >> xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
> hwmon_reg)
> >> return reg.raw;
> >> }
> >> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
> >> xe_hwmon_reg hwmon_reg,
> >> - enum xe_hwmon_reg_operation operation, u32 *value,
> >> - u32 clr, u32 set)
> >> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
> >> xe_hwmon_reg hwmon_reg,
> >> + enum xe_hwmon_reg_operation operation, u32 *value,
> >> + u32 clr, u32 set)
> >> {
> >> struct xe_reg reg;
> >> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
> >> if (!reg.raw)
> >> - return -EOPNOTSUPP;
> >> + return;
> > Don't we need to report this as warning?
> > What is possiblity of code path landing here.
> Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not
> needed when reg is not supported by platform. During runtime code path will
> not fall here as visible functions are taking care of creating hwmon entries only
> if regs are supported. So it doesn't make sense to add warn here.
> >> switch (operation) {
> >> case REG_READ:
> >> *value = xe_mmio_read32(hwmon->gt, reg);
> >> - return 0;
> >> + break;
> >> case REG_WRITE:
> >> xe_mmio_write32(hwmon->gt, reg, *value);
> >> - return 0;
> >> + break;
> >> case REG_RMW:
> >> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
> >> - return 0;
> >> + break;
> >> default:
> >> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg
> >> operation: %d\n",
> >> operation);
> >> - return -EOPNOTSUPP;
> >> + break;
> >> }
> >> }
> >> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> >> - enum xe_hwmon_reg hwmon_reg, u64 *value)
> >> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
> >> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it
> >> possible to pass len of void * value to xe_hwmon_process_reg()
> > so we can wrap this fucntion in xe_hwmon_process_reg ?
> Yes its possible but I feel it would be more useful if there are regs of variable
> lengths i.e. other than 64 or 32 bit. As of now hwmon regs are
> 32 and 64 bit lenghts so I prefered 2 separate functions. If you insist I will
> wrap.
Another thing do we have any consumer of REG_WRITE, I don't see any caller (it is a dead code), considering above there is no real benefit of keeping the abstraction with
xe_hwmon_process_reg() and hwmon_process_reg_read64(). If keeping hwmon_process_reg_read64() is simple then it is defeating the purpose of
xe_hwmon_process_reg().
IMHO the only benefit xe_hwmon_process_reg() provides scalability for future platforms like forcewake is needed,
if intention is to keep it simple then Calling xe_mmio_{read32, read64_2x32, xe_mmio_rmw32} directly is much simpler than having multiple wrappers ?
Br,
Anshuman Gupta.
>
> Regards,
> Badal
> >
> >> {
> >> struct xe_reg reg;
> >> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
> >> if (!reg.raw)
> >> - return -EOPNOTSUPP;
> >> + return;
> >> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
> >> -
> >> - return 0;
> >> }
> >> #define PL1_DISABLE 0
> >> @@ -146,16 +144,18 @@ static int
> xe_hwmon_process_reg_read64(struct
> >> xe_hwmon *hwmon,
> >> * same pattern for sysfs, allow arbitrary PL1 limits to be set but
> >> display
> >> * clamped values when read.
> >> */
> >> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
> >> *value)
> >> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
> >> +*value)
> >> {
> >> u32 reg_val;
> >> u64 reg_val64, min, max;
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
> >> ®_val, 0, 0);
> >> /* Check if PL1 limit is disabled */
> >> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
> >> *value = PL1_DISABLE;
> >> - return 0;
> >> + goto unlock;
> >> }
> >> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val); @@ -169,14
> >> +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon
> *hwmon,
> >> long *value)
> >> if (min && max)
> >> *value = clamp_t(u64, *value, min, max);
> >> -
> >> - return 0;
> >> +unlock:
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> }
> >> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long
> >> value)
> >> {
> >> + int ret = 0;
> >> u32 reg_val;
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> /* Disable PL1 limit and verify, as limit cannot be disabled on
> >> all platforms */
> >> if (value == PL1_DISABLE) {
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
> >> ®_val, @@ -184,8 +187,10 @@ static int
> >> xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
> >> ®_val,
> >> PKG_PWR_LIM_1_EN, 0);
> >> - if (reg_val & PKG_PWR_LIM_1_EN)
> >> - return -EOPNOTSUPP;
> >> + if (reg_val & PKG_PWR_LIM_1_EN) {
> >> + ret = -EOPNOTSUPP;
> >> + goto unlock;
> >> + }
> >> }
> >> /* Computation in 64-bits to avoid overflow. Round to nearest.
> >> */ @@ -194,19 +199,18 @@ static int
> xe_hwmon_power_max_write(struct
> >> xe_hwmon *hwmon, long value)
> >> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
> >> ®_val,
> >> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
> >> -
> >> - return 0;
> >> +unlock:
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> + return ret;
> >> }
> >> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
> >> long
> >> *value)
> >> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon
> *hwmon,
> >> long *value)
> >> {
> >> u32 reg_val;
> >> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
> >> ®_val, 0, 0);
> >> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
> >> *value = mul_u64_u32_shr(reg_val, SF_POWER,
> >> hwmon->scl_shift_power);
> >> -
> >> - return 0;
> >> }
> >> /*
> >> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon
> *hwmon, long
> >> *energy)
> >> struct xe_hwmon_energy_info *ei = &hwmon->ei;
> >> u32 reg_val;
> >> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> >> -
> >> - mutex_lock(&hwmon->hwmon_lock);
> >> -
> >> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS,
> REG_READ,
> >> ®_val, 0, 0);
> >> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon
> *hwmon, long
> >> *energy)
> >> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
> >> hwmon->scl_shift_energy);
> >> -
> >> - mutex_unlock(&hwmon->hwmon_lock);
> >> -
> >> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> >> }
> >> static const struct hwmon_channel_info *hwmon_info[] = { @@ -284,6
> >> +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32
> >> uval)
> >> uval);
> >> }
> >> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon,
> >> +long
> >> *value, u32 scale_factor)
> >> +{
> >> + int ret;
> >> + u32 uval;
> >> +
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> >> + if (ret)
> >> + goto unlock;
> >> +
> >> + *value =
> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK,
> >> uval),
> >> + scale_factor, POWER_SETUP_I1_SHIFT);
> >> +unlock:
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> + return ret;
> >> +}
> >> +
> >> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon,
> >> long value, u32 scale_factor)
> >> +{
> >> + int ret;
> >> + u32 uval;
> >> +
> >> + mutex_lock(&hwmon->hwmon_lock);
> >> +
> >> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT,
> >> scale_factor);
> >> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> >> +
> >> + mutex_unlock(&hwmon->hwmon_lock);
> >> + return ret;
> >> +}
> >> +
> >> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long
> >> *value)
> >> {
> >> u32 reg_val;
> >> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon
> >> *hwmon, u32 attr, int chan)
> >> static int
> >> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan,
> >> long
> >> *val)
> >> {
> >> - int ret;
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_power_max:
> >> - return xe_hwmon_power_max_read(hwmon, val);
> >> + xe_hwmon_power_max_read(hwmon, val);
> >> + return 0;
> >> case hwmon_power_rated_max:
> >> - return xe_hwmon_power_rated_max_read(hwmon, val);
> >> - case hwmon_power_crit:
> >> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> >> - if (ret)
> >> - return ret;
> >> - if (!(uval & POWER_SETUP_I1_WATTS))
> >> - return -ENODEV;
> >> - *val =
> >> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> >> - SF_POWER, POWER_SETUP_I1_SHIFT);
> >> + xe_hwmon_power_rated_max_read(hwmon, val);
> >> return 0;
> >> + case hwmon_power_crit:
> >> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon
> *hwmon, u32
> >> attr, int chan, long *val)
> >> static int
> >> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan,
> >> long val)
> >> {
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_power_max:
> >> return xe_hwmon_power_max_write(hwmon, val);
> >> case hwmon_power_crit:
> >> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
> >> SF_POWER);
> >> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> >> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct
> xe_hwmon
> >> *hwmon, u32 attr)
> >> static int
> >> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> >> {
> >> - int ret;
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_curr_crit:
> >> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
> >> - if (ret)
> >> - return ret;
> >> - if (uval & POWER_SETUP_I1_WATTS)
> >> - return -ENODEV;
> >> - *val =
> >> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
> >> - SF_CURR, POWER_SETUP_I1_SHIFT);
> >> - return 0;
> >> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon
> *hwmon, u32
> >> attr, long *val)
> >> static int
> >> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
> >> {
> >> - u32 uval;
> >> -
> >> switch (attr) {
> >> case hwmon_curr_crit:
> >> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
> >> SF_CURR);
> >> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
> >> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
> >> default:
> >> return -EOPNOTSUPP;
> >> }
> >> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon,
> u32
> >> attr, long *val)
> >> {
> >> int ret;
> >> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
> >> -
> >> switch (attr) {
> >> case hwmon_in_input:
> >> ret = xe_hwmon_get_voltage(hwmon, val); @@ -430,8 +432,6 @@
> >> xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
> >> ret = -EOPNOTSUPP;
> >> }
> >> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
> >> -
> >> return ret;
> >> }
> >> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct
> >> xe_device *xe)
> >> struct xe_hwmon *hwmon = xe->hwmon;
> >> long energy;
> >> u32 val_sku_unit = 0;
> >> - int ret;
> >> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
> >> REG_READ, &val_sku_unit, 0, 0);
> >> /*
> >> * The contents of register PKG_POWER_SKU_UNIT do not change,
> >> * so read it once and store the shift values.
> >> */
> >> - if (!ret) {
> >> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
> >> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
> >> +REG_READ,
> >> &val_sku_unit, 0, 0);
> >> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
> >> val_sku_unit);
> >> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT,
> >> val_sku_unit);
> >> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
2023-10-16 7:51 ` [Intel-xe] " Gupta, Anshuman
@ 2023-10-16 13:37 ` Nilawar, Badal
-1 siblings, 0 replies; 19+ messages in thread
From: Nilawar, Badal @ 2023-10-16 13:37 UTC (permalink / raw)
To: Gupta, Anshuman, intel-xe, linux-hwmon
Cc: Dixit, Ashutosh, andi.shyti, Tauro, Riana, Vivi, Rodrigo
On 16-10-2023 13:21, Gupta, Anshuman wrote:
>
>
>> -----Original Message-----
>> From: Nilawar, Badal <badal.nilawar@intel.com>
>> Sent: Monday, October 16, 2023 1:03 PM
>> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
>> xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
>> Cc: Dixit, Ashutosh <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com;
>> Tauro, Riana <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
>> Subject: Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with
>> hwmon_lock
>>
>> Hi Anshuman,
>>
>> On 09-10-2023 19:06, Gupta, Anshuman wrote:
>>>
>>>
>>> On 10/6/2023 10:32 PM, Badal Nilawar wrote:
>>>> Take hwmon_lock while accessing hwmon rw attributes. For readonly
>>>> attributes its not required to take lock as reads are protected by
>>>> sysfs layer and therefore sequential.
>>>>
>>>> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
>>>> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
>>>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>>>> ---
>>>> drivers/gpu/drm/xe/xe_hwmon.c | 143
>>>> +++++++++++++++++-----------------
>>>> 1 file changed, 71 insertions(+), 72 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>>>> b/drivers/gpu/drm/xe/xe_hwmon.c index 9ac05994a967..391f9a8dd9d7
>>>> 100644
>>>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>>>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>>>> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
>>>> struct xe_hwmon {
>>>> struct device *hwmon_dev;
>>>> struct xe_gt *gt;
>>>> - struct mutex hwmon_lock; /* rmw operations*/
>>>> + struct mutex hwmon_lock; /* For rw attributes */
>>>> int scl_shift_power;
>>>> int scl_shift_energy;
>>>> struct xe_hwmon_energy_info ei; /* Energy info for
>>>> energy1_input */ @@ -95,47 +95,45 @@ static u32
>>>> xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
>> hwmon_reg)
>>>> return reg.raw;
>>>> }
>>>> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>>>> xe_hwmon_reg hwmon_reg,
>>>> - enum xe_hwmon_reg_operation operation, u32 *value,
>>>> - u32 clr, u32 set)
>>>> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>>>> xe_hwmon_reg hwmon_reg,
>>>> + enum xe_hwmon_reg_operation operation, u32 *value,
>>>> + u32 clr, u32 set)
>>>> {
>>>> struct xe_reg reg;
>>>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>>>> if (!reg.raw)
>>>> - return -EOPNOTSUPP;
>>>> + return;
>>> Don't we need to report this as warning?
>>> What is possiblity of code path landing here.
>> Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not
>> needed when reg is not supported by platform. During runtime code path will
>> not fall here as visible functions are taking care of creating hwmon entries only
>> if regs are supported. So it doesn't make sense to add warn here.
>>>> switch (operation) {
>>>> case REG_READ:
>>>> *value = xe_mmio_read32(hwmon->gt, reg);
>>>> - return 0;
>>>> + break;
>>>> case REG_WRITE:
>>>> xe_mmio_write32(hwmon->gt, reg, *value);
>>>> - return 0;
>>>> + break;
>>>> case REG_RMW:
>>>> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
>>>> - return 0;
>>>> + break;
>>>> default:
>>>> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg
>>>> operation: %d\n",
>>>> operation);
>>>> - return -EOPNOTSUPP;
>>>> + break;
>>>> }
>>>> }
>>>> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>>>> - enum xe_hwmon_reg hwmon_reg, u64 *value)
>>>> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>>>> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it
>>>> possible to pass len of void * value to xe_hwmon_process_reg()
>>> so we can wrap this fucntion in xe_hwmon_process_reg ?
>> Yes its possible but I feel it would be more useful if there are regs of variable
>> lengths i.e. other than 64 or 32 bit. As of now hwmon regs are
>> 32 and 64 bit lenghts so I prefered 2 separate functions. If you insist I will
>> wrap.
> Another thing do we have any consumer of REG_WRITE, I don't see any caller (it is a dead code), considering above there is no real benefit of keeping the abstraction with
Yes REG_WRITE is not used, I will remove it.
> xe_hwmon_process_reg() and hwmon_process_reg_read64(). If keeping hwmon_process_reg_read64() is simple then it is defeating the purpose of
> xe_hwmon_process_reg().
I don't completely agree on this. hwmon_process_reg_read64 is looking
simple since it handling only 1 operation i.e. read64. Considering it
may get scaled in future I should have writen it as
hwmon_process_reg64() and pass REG_READ64 operation as argument.
> IMHO the only benefit xe_hwmon_process_reg() provides scalability for future platforms like forcewake is needed,
> if intention is to keep it simple then Calling xe_mmio_{read32, read64_2x32, xe_mmio_rmw32} directly is much simpler than having multiple wrappers ?
You mean use
xe_mmio_{read32, read64_2x32, xe_mmio_rmw32}(hwmon->gt,
xe_hwmon_get_reg(hwmon, hwmon_reg)); ?
Regards,
Badal
>
> Br,
> Anshuman Gupta.
>
>>
>> Regards,
>> Badal
>>>
>>>> {
>>>> struct xe_reg reg;
>>>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>>>> if (!reg.raw)
>>>> - return -EOPNOTSUPP;
>>>> + return;
>>>> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
>>>> -
>>>> - return 0;
>>>> }
>>>> #define PL1_DISABLE 0
>>>> @@ -146,16 +144,18 @@ static int
>> xe_hwmon_process_reg_read64(struct
>>>> xe_hwmon *hwmon,
>>>> * same pattern for sysfs, allow arbitrary PL1 limits to be set but
>>>> display
>>>> * clamped values when read.
>>>> */
>>>> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
>>>> *value)
>>>> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
>>>> +*value)
>>>> {
>>>> u32 reg_val;
>>>> u64 reg_val64, min, max;
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>>>> ®_val, 0, 0);
>>>> /* Check if PL1 limit is disabled */
>>>> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
>>>> *value = PL1_DISABLE;
>>>> - return 0;
>>>> + goto unlock;
>>>> }
>>>> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val); @@ -169,14
>>>> +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon
>> *hwmon,
>>>> long *value)
>>>> if (min && max)
>>>> *value = clamp_t(u64, *value, min, max);
>>>> -
>>>> - return 0;
>>>> +unlock:
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> }
>>>> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long
>>>> value)
>>>> {
>>>> + int ret = 0;
>>>> u32 reg_val;
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> /* Disable PL1 limit and verify, as limit cannot be disabled on
>>>> all platforms */
>>>> if (value == PL1_DISABLE) {
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>>>> ®_val, @@ -184,8 +187,10 @@ static int
>>>> xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>>>> ®_val,
>>>> PKG_PWR_LIM_1_EN, 0);
>>>> - if (reg_val & PKG_PWR_LIM_1_EN)
>>>> - return -EOPNOTSUPP;
>>>> + if (reg_val & PKG_PWR_LIM_1_EN) {
>>>> + ret = -EOPNOTSUPP;
>>>> + goto unlock;
>>>> + }
>>>> }
>>>> /* Computation in 64-bits to avoid overflow. Round to nearest.
>>>> */ @@ -194,19 +199,18 @@ static int
>> xe_hwmon_power_max_write(struct
>>>> xe_hwmon *hwmon, long value)
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>>>> ®_val,
>>>> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
>>>> -
>>>> - return 0;
>>>> +unlock:
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> + return ret;
>>>> }
>>>> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
>>>> long
>>>> *value)
>>>> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon
>> *hwmon,
>>>> long *value)
>>>> {
>>>> u32 reg_val;
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
>>>> ®_val, 0, 0);
>>>> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
>>>> *value = mul_u64_u32_shr(reg_val, SF_POWER,
>>>> hwmon->scl_shift_power);
>>>> -
>>>> - return 0;
>>>> }
>>>> /*
>>>> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon
>> *hwmon, long
>>>> *energy)
>>>> struct xe_hwmon_energy_info *ei = &hwmon->ei;
>>>> u32 reg_val;
>>>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>>>> -
>>>> - mutex_lock(&hwmon->hwmon_lock);
>>>> -
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS,
>> REG_READ,
>>>> ®_val, 0, 0);
>>>> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon
>> *hwmon, long
>>>> *energy)
>>>> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
>>>> hwmon->scl_shift_energy);
>>>> -
>>>> - mutex_unlock(&hwmon->hwmon_lock);
>>>> -
>>>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>>>> }
>>>> static const struct hwmon_channel_info *hwmon_info[] = { @@ -284,6
>>>> +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32
>>>> uval)
>>>> uval);
>>>> }
>>>> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon,
>>>> +long
>>>> *value, u32 scale_factor)
>>>> +{
>>>> + int ret;
>>>> + u32 uval;
>>>> +
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>>> + if (ret)
>>>> + goto unlock;
>>>> +
>>>> + *value =
>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK,
>>>> uval),
>>>> + scale_factor, POWER_SETUP_I1_SHIFT);
>>>> +unlock:
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> + return ret;
>>>> +}
>>>> +
>>>> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon,
>>>> long value, u32 scale_factor)
>>>> +{
>>>> + int ret;
>>>> + u32 uval;
>>>> +
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT,
>>>> scale_factor);
>>>> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>>>> +
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> + return ret;
>>>> +}
>>>> +
>>>> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long
>>>> *value)
>>>> {
>>>> u32 reg_val;
>>>> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon
>>>> *hwmon, u32 attr, int chan)
>>>> static int
>>>> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan,
>>>> long
>>>> *val)
>>>> {
>>>> - int ret;
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_power_max:
>>>> - return xe_hwmon_power_max_read(hwmon, val);
>>>> + xe_hwmon_power_max_read(hwmon, val);
>>>> + return 0;
>>>> case hwmon_power_rated_max:
>>>> - return xe_hwmon_power_rated_max_read(hwmon, val);
>>>> - case hwmon_power_crit:
>>>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>>> - if (ret)
>>>> - return ret;
>>>> - if (!(uval & POWER_SETUP_I1_WATTS))
>>>> - return -ENODEV;
>>>> - *val =
>>>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>>>> - SF_POWER, POWER_SETUP_I1_SHIFT);
>>>> + xe_hwmon_power_rated_max_read(hwmon, val);
>>>> return 0;
>>>> + case hwmon_power_crit:
>>>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon
>> *hwmon, u32
>>>> attr, int chan, long *val)
>>>> static int
>>>> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan,
>>>> long val)
>>>> {
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_power_max:
>>>> return xe_hwmon_power_max_write(hwmon, val);
>>>> case hwmon_power_crit:
>>>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>>>> SF_POWER);
>>>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>>>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct
>> xe_hwmon
>>>> *hwmon, u32 attr)
>>>> static int
>>>> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
>>>> {
>>>> - int ret;
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_curr_crit:
>>>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>>> - if (ret)
>>>> - return ret;
>>>> - if (uval & POWER_SETUP_I1_WATTS)
>>>> - return -ENODEV;
>>>> - *val =
>>>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>>>> - SF_CURR, POWER_SETUP_I1_SHIFT);
>>>> - return 0;
>>>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon
>> *hwmon, u32
>>>> attr, long *val)
>>>> static int
>>>> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
>>>> {
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_curr_crit:
>>>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>>>> SF_CURR);
>>>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>>>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon,
>> u32
>>>> attr, long *val)
>>>> {
>>>> int ret;
>>>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>>>> -
>>>> switch (attr) {
>>>> case hwmon_in_input:
>>>> ret = xe_hwmon_get_voltage(hwmon, val); @@ -430,8 +432,6 @@
>>>> xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
>>>> ret = -EOPNOTSUPP;
>>>> }
>>>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>>>> -
>>>> return ret;
>>>> }
>>>> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct
>>>> xe_device *xe)
>>>> struct xe_hwmon *hwmon = xe->hwmon;
>>>> long energy;
>>>> u32 val_sku_unit = 0;
>>>> - int ret;
>>>> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>>>> REG_READ, &val_sku_unit, 0, 0);
>>>> /*
>>>> * The contents of register PKG_POWER_SKU_UNIT do not change,
>>>> * so read it once and store the shift values.
>>>> */
>>>> - if (!ret) {
>>>> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
>>>> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>>>> +REG_READ,
>>>> &val_sku_unit, 0, 0);
>>>> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>>>> val_sku_unit);
>>>> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT,
>>>> val_sku_unit);
>>>> }
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Intel-xe] [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock
@ 2023-10-16 13:37 ` Nilawar, Badal
0 siblings, 0 replies; 19+ messages in thread
From: Nilawar, Badal @ 2023-10-16 13:37 UTC (permalink / raw)
To: Gupta, Anshuman, intel-xe, linux-hwmon; +Cc: Vivi, Rodrigo
On 16-10-2023 13:21, Gupta, Anshuman wrote:
>
>
>> -----Original Message-----
>> From: Nilawar, Badal <badal.nilawar@intel.com>
>> Sent: Monday, October 16, 2023 1:03 PM
>> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
>> xe@lists.freedesktop.org; linux-hwmon@vger.kernel.org
>> Cc: Dixit, Ashutosh <ashutosh.dixit@intel.com>; andi.shyti@linux.intel.com;
>> Tauro, Riana <riana.tauro@intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>
>> Subject: Re: [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with
>> hwmon_lock
>>
>> Hi Anshuman,
>>
>> On 09-10-2023 19:06, Gupta, Anshuman wrote:
>>>
>>>
>>> On 10/6/2023 10:32 PM, Badal Nilawar wrote:
>>>> Take hwmon_lock while accessing hwmon rw attributes. For readonly
>>>> attributes its not required to take lock as reads are protected by
>>>> sysfs layer and therefore sequential.
>>>>
>>>> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
>>>> Cc: Anshuman Gupta <anshuman.gupta@intel.com>
>>>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>>>> ---
>>>> drivers/gpu/drm/xe/xe_hwmon.c | 143
>>>> +++++++++++++++++-----------------
>>>> 1 file changed, 71 insertions(+), 72 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>>>> b/drivers/gpu/drm/xe/xe_hwmon.c index 9ac05994a967..391f9a8dd9d7
>>>> 100644
>>>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>>>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>>>> @@ -47,7 +47,7 @@ struct xe_hwmon_energy_info {
>>>> struct xe_hwmon {
>>>> struct device *hwmon_dev;
>>>> struct xe_gt *gt;
>>>> - struct mutex hwmon_lock; /* rmw operations*/
>>>> + struct mutex hwmon_lock; /* For rw attributes */
>>>> int scl_shift_power;
>>>> int scl_shift_energy;
>>>> struct xe_hwmon_energy_info ei; /* Energy info for
>>>> energy1_input */ @@ -95,47 +95,45 @@ static u32
>>>> xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
>> hwmon_reg)
>>>> return reg.raw;
>>>> }
>>>> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>>>> xe_hwmon_reg hwmon_reg,
>>>> - enum xe_hwmon_reg_operation operation, u32 *value,
>>>> - u32 clr, u32 set)
>>>> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>>>> xe_hwmon_reg hwmon_reg,
>>>> + enum xe_hwmon_reg_operation operation, u32 *value,
>>>> + u32 clr, u32 set)
>>>> {
>>>> struct xe_reg reg;
>>>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>>>> if (!reg.raw)
>>>> - return -EOPNOTSUPP;
>>>> + return;
>>> Don't we need to report this as warning?
>>> What is possiblity of code path landing here.
>> Warning is added in xe_hwmon_get_reg when reg is invalid. Warning is not
>> needed when reg is not supported by platform. During runtime code path will
>> not fall here as visible functions are taking care of creating hwmon entries only
>> if regs are supported. So it doesn't make sense to add warn here.
>>>> switch (operation) {
>>>> case REG_READ:
>>>> *value = xe_mmio_read32(hwmon->gt, reg);
>>>> - return 0;
>>>> + break;
>>>> case REG_WRITE:
>>>> xe_mmio_write32(hwmon->gt, reg, *value);
>>>> - return 0;
>>>> + break;
>>>> case REG_RMW:
>>>> *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
>>>> - return 0;
>>>> + break;
>>>> default:
>>>> drm_warn(>_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg
>>>> operation: %d\n",
>>>> operation);
>>>> - return -EOPNOTSUPP;
>>>> + break;
>>>> }
>>>> }
>>>> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>>>> - enum xe_hwmon_reg hwmon_reg, u64 *value)
>>>> +static void xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>>>> + enum xe_hwmon_reg hwmon_reg, u64 *value)Isn't it
>>>> possible to pass len of void * value to xe_hwmon_process_reg()
>>> so we can wrap this fucntion in xe_hwmon_process_reg ?
>> Yes its possible but I feel it would be more useful if there are regs of variable
>> lengths i.e. other than 64 or 32 bit. As of now hwmon regs are
>> 32 and 64 bit lenghts so I prefered 2 separate functions. If you insist I will
>> wrap.
> Another thing do we have any consumer of REG_WRITE, I don't see any caller (it is a dead code), considering above there is no real benefit of keeping the abstraction with
Yes REG_WRITE is not used, I will remove it.
> xe_hwmon_process_reg() and hwmon_process_reg_read64(). If keeping hwmon_process_reg_read64() is simple then it is defeating the purpose of
> xe_hwmon_process_reg().
I don't completely agree on this. hwmon_process_reg_read64 is looking
simple since it handling only 1 operation i.e. read64. Considering it
may get scaled in future I should have writen it as
hwmon_process_reg64() and pass REG_READ64 operation as argument.
> IMHO the only benefit xe_hwmon_process_reg() provides scalability for future platforms like forcewake is needed,
> if intention is to keep it simple then Calling xe_mmio_{read32, read64_2x32, xe_mmio_rmw32} directly is much simpler than having multiple wrappers ?
You mean use
xe_mmio_{read32, read64_2x32, xe_mmio_rmw32}(hwmon->gt,
xe_hwmon_get_reg(hwmon, hwmon_reg)); ?
Regards,
Badal
>
> Br,
> Anshuman Gupta.
>
>>
>> Regards,
>> Badal
>>>
>>>> {
>>>> struct xe_reg reg;
>>>> reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>>>> if (!reg.raw)
>>>> - return -EOPNOTSUPP;
>>>> + return;
>>>> *value = xe_mmio_read64_2x32(hwmon->gt, reg);
>>>> -
>>>> - return 0;
>>>> }
>>>> #define PL1_DISABLE 0
>>>> @@ -146,16 +144,18 @@ static int
>> xe_hwmon_process_reg_read64(struct
>>>> xe_hwmon *hwmon,
>>>> * same pattern for sysfs, allow arbitrary PL1 limits to be set but
>>>> display
>>>> * clamped values when read.
>>>> */
>>>> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
>>>> *value)
>>>> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
>>>> +*value)
>>>> {
>>>> u32 reg_val;
>>>> u64 reg_val64, min, max;
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>>>> ®_val, 0, 0);
>>>> /* Check if PL1 limit is disabled */
>>>> if (!(reg_val & PKG_PWR_LIM_1_EN)) {
>>>> *value = PL1_DISABLE;
>>>> - return 0;
>>>> + goto unlock;
>>>> }
>>>> reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val); @@ -169,14
>>>> +169,17 @@ static int xe_hwmon_power_max_read(struct xe_hwmon
>> *hwmon,
>>>> long *value)
>>>> if (min && max)
>>>> *value = clamp_t(u64, *value, min, max);
>>>> -
>>>> - return 0;
>>>> +unlock:
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> }
>>>> static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long
>>>> value)
>>>> {
>>>> + int ret = 0;
>>>> u32 reg_val;
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> /* Disable PL1 limit and verify, as limit cannot be disabled on
>>>> all platforms */
>>>> if (value == PL1_DISABLE) {
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>>>> ®_val, @@ -184,8 +187,10 @@ static int
>>>> xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>>>> ®_val,
>>>> PKG_PWR_LIM_1_EN, 0);
>>>> - if (reg_val & PKG_PWR_LIM_1_EN)
>>>> - return -EOPNOTSUPP;
>>>> + if (reg_val & PKG_PWR_LIM_1_EN) {
>>>> + ret = -EOPNOTSUPP;
>>>> + goto unlock;
>>>> + }
>>>> }
>>>> /* Computation in 64-bits to avoid overflow. Round to nearest.
>>>> */ @@ -194,19 +199,18 @@ static int
>> xe_hwmon_power_max_write(struct
>>>> xe_hwmon *hwmon, long value)
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>>>> ®_val,
>>>> PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
>>>> -
>>>> - return 0;
>>>> +unlock:
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> + return ret;
>>>> }
>>>> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
>>>> long
>>>> *value)
>>>> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon
>> *hwmon,
>>>> long *value)
>>>> {
>>>> u32 reg_val;
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
>>>> ®_val, 0, 0);
>>>> reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
>>>> *value = mul_u64_u32_shr(reg_val, SF_POWER,
>>>> hwmon->scl_shift_power);
>>>> -
>>>> - return 0;
>>>> }
>>>> /*
>>>> @@ -235,10 +239,6 @@ xe_hwmon_energy_get(struct xe_hwmon
>> *hwmon, long
>>>> *energy)
>>>> struct xe_hwmon_energy_info *ei = &hwmon->ei;
>>>> u32 reg_val;
>>>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>>>> -
>>>> - mutex_lock(&hwmon->hwmon_lock);
>>>> -
>>>> xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS,
>> REG_READ,
>>>> ®_val, 0, 0);
>>>> @@ -251,10 +251,6 @@ xe_hwmon_energy_get(struct xe_hwmon
>> *hwmon, long
>>>> *energy)
>>>> *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
>>>> hwmon->scl_shift_energy);
>>>> -
>>>> - mutex_unlock(&hwmon->hwmon_lock);
>>>> -
>>>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>>>> }
>>>> static const struct hwmon_channel_info *hwmon_info[] = { @@ -284,6
>>>> +280,38 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32
>>>> uval)
>>>> uval);
>>>> }
>>>> +static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon,
>>>> +long
>>>> *value, u32 scale_factor)
>>>> +{
>>>> + int ret;
>>>> + u32 uval;
>>>> +
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> + ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>>> + if (ret)
>>>> + goto unlock;
>>>> +
>>>> + *value =
>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK,
>>>> uval),
>>>> + scale_factor, POWER_SETUP_I1_SHIFT);
>>>> +unlock:
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> + return ret;
>>>> +}
>>>> +
>>>> +static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon,
>>>> long value, u32 scale_factor)
>>>> +{
>>>> + int ret;
>>>> + u32 uval;
>>>> +
>>>> + mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> + uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT,
>>>> scale_factor);
>>>> + ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>>>> +
>>>> + mutex_unlock(&hwmon->hwmon_lock);
>>>> + return ret;
>>>> +}
>>>> +
>>>> static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long
>>>> *value)
>>>> {
>>>> u32 reg_val;
>>>> @@ -317,23 +345,15 @@ xe_hwmon_power_is_visible(struct xe_hwmon
>>>> *hwmon, u32 attr, int chan)
>>>> static int
>>>> xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan,
>>>> long
>>>> *val)
>>>> {
>>>> - int ret;
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_power_max:
>>>> - return xe_hwmon_power_max_read(hwmon, val);
>>>> + xe_hwmon_power_max_read(hwmon, val);
>>>> + return 0;
>>>> case hwmon_power_rated_max:
>>>> - return xe_hwmon_power_rated_max_read(hwmon, val);
>>>> - case hwmon_power_crit:
>>>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>>> - if (ret)
>>>> - return ret;
>>>> - if (!(uval & POWER_SETUP_I1_WATTS))
>>>> - return -ENODEV;
>>>> - *val =
>>>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>>>> - SF_POWER, POWER_SETUP_I1_SHIFT);
>>>> + xe_hwmon_power_rated_max_read(hwmon, val);
>>>> return 0;
>>>> + case hwmon_power_crit:
>>>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -342,14 +362,11 @@ xe_hwmon_power_read(struct xe_hwmon
>> *hwmon, u32
>>>> attr, int chan, long *val)
>>>> static int
>>>> xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan,
>>>> long val)
>>>> {
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_power_max:
>>>> return xe_hwmon_power_max_write(hwmon, val);
>>>> case hwmon_power_crit:
>>>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>>>> SF_POWER);
>>>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>>>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -372,19 +389,9 @@ xe_hwmon_curr_is_visible(const struct
>> xe_hwmon
>>>> *hwmon, u32 attr)
>>>> static int
>>>> xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
>>>> {
>>>> - int ret;
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_curr_crit:
>>>> - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>>> - if (ret)
>>>> - return ret;
>>>> - if (uval & POWER_SETUP_I1_WATTS)
>>>> - return -ENODEV;
>>>> - *val =
>>>> mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
>>>> - SF_CURR, POWER_SETUP_I1_SHIFT);
>>>> - return 0;
>>>> + return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -393,12 +400,9 @@ xe_hwmon_curr_read(struct xe_hwmon
>> *hwmon, u32
>>>> attr, long *val)
>>>> static int
>>>> xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
>>>> {
>>>> - u32 uval;
>>>> -
>>>> switch (attr) {
>>>> case hwmon_curr_crit:
>>>> - uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT,
>>>> SF_CURR);
>>>> - return xe_hwmon_pcode_write_i1(hwmon->gt, uval);
>>>> + return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
>>>> default:
>>>> return -EOPNOTSUPP;
>>>> }
>>>> @@ -420,8 +424,6 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon,
>> u32
>>>> attr, long *val)
>>>> {
>>>> int ret;
>>>> - xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>>>> -
>>>> switch (attr) {
>>>> case hwmon_in_input:
>>>> ret = xe_hwmon_get_voltage(hwmon, val); @@ -430,8 +432,6 @@
>>>> xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
>>>> ret = -EOPNOTSUPP;
>>>> }
>>>> - xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>>>> -
>>>> return ret;
>>>> }
>>>> @@ -565,14 +565,13 @@ xe_hwmon_get_preregistration_info(struct
>>>> xe_device *xe)
>>>> struct xe_hwmon *hwmon = xe->hwmon;
>>>> long energy;
>>>> u32 val_sku_unit = 0;
>>>> - int ret;
>>>> - ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>>>> REG_READ, &val_sku_unit, 0, 0);
>>>> /*
>>>> * The contents of register PKG_POWER_SKU_UNIT do not change,
>>>> * so read it once and store the shift values.
>>>> */
>>>> - if (!ret) {
>>>> + if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
>>>> + xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>>>> +REG_READ,
>>>> &val_sku_unit, 0, 0);
>>>> hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>>>> val_sku_unit);
>>>> hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT,
>>>> val_sku_unit);
>>>> }
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2023-10-16 13:38 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-06 17:02 [PATCH 0/2] DGFX HWMON Enhancement Badal Nilawar
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
2023-10-06 17:02 ` [PATCH 1/2] drm/xe/hwmon: Protect hwmon rw attributes with hwmon_lock Badal Nilawar
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
2023-10-09 13:36 ` Gupta, Anshuman
2023-10-09 13:36 ` [Intel-xe] " Gupta, Anshuman
2023-10-16 7:33 ` Nilawar, Badal
2023-10-16 7:33 ` [Intel-xe] " Nilawar, Badal
2023-10-16 7:51 ` Gupta, Anshuman
2023-10-16 7:51 ` [Intel-xe] " Gupta, Anshuman
2023-10-16 13:37 ` Nilawar, Badal
2023-10-16 13:37 ` [Intel-xe] " Nilawar, Badal
2023-10-06 17:02 ` [PATCH 2/2] drm/xe/hwmon: Expose power1_max_interval Badal Nilawar
2023-10-06 17:02 ` [Intel-xe] " Badal Nilawar
2023-10-11 5:59 ` Gupta, Anshuman
2023-10-11 5:59 ` [Intel-xe] " Gupta, Anshuman
2023-10-16 7:39 ` Nilawar, Badal
2023-10-16 7:39 ` [Intel-xe] " Nilawar, Badal
2023-10-06 18:43 ` [Intel-xe] ✗ CI.Patch_applied: failure for DGFX HWMON Enhancement Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.