diff mbox series

[v2,2/6] drm/xe/hwmon: Expose power attributes

Message ID 20230627183043.2024530-3-badal.nilawar@intel.com (mailing list archive)
State Handled Elsewhere
Headers show
Series Add HWMON support for DGFX | expand

Commit Message

Nilawar, Badal June 27, 2023, 6:30 p.m. UTC
Expose power_max (pl1) and power_rated_max (tdp) attributes.
This is port from i915 hwmon.

v2:
  - Move rpm calls (xe_device_mem_access_get/put) to hwmon functions from
    process_hwmon_reg to avoid multiple rpm entry exits during consecutive
    reg accesses
  - Fix review comments (Riana)

Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
---
 .../ABI/testing/sysfs-driver-intel-xe-hwmon   |  22 ++
 drivers/gpu/drm/xe/regs/xe_gt_regs.h          |   4 +
 drivers/gpu/drm/xe/regs/xe_mchbar_regs.h      |  34 ++
 drivers/gpu/drm/xe/xe_hwmon.c                 | 372 +++++++++++++++++-
 drivers/gpu/drm/xe/xe_hwmon.h                 |   4 +
 drivers/gpu/drm/xe/xe_uc.c                    |   6 +
 6 files changed, 435 insertions(+), 7 deletions(-)
 create mode 100644 Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
 create mode 100644 drivers/gpu/drm/xe/regs/xe_mchbar_regs.h

Comments

Matthew Brost June 29, 2023, 12:18 a.m. UTC | #1
On Wed, Jun 28, 2023 at 12:00:39AM +0530, Badal Nilawar wrote:
> Expose power_max (pl1) and power_rated_max (tdp) attributes.
> This is port from i915 hwmon.
> 

Not expert in this domain but this statement makes be nervous as
generally shouldn't just blindly port things from the i915. Doing an
initial review but will need a domain expert too.

> v2:
>   - Move rpm calls (xe_device_mem_access_get/put) to hwmon functions from
>     process_hwmon_reg to avoid multiple rpm entry exits during consecutive
>     reg accesses
>   - Fix review comments (Riana)
> 
> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
> ---
>  .../ABI/testing/sysfs-driver-intel-xe-hwmon   |  22 ++
>  drivers/gpu/drm/xe/regs/xe_gt_regs.h          |   4 +
>  drivers/gpu/drm/xe/regs/xe_mchbar_regs.h      |  34 ++
>  drivers/gpu/drm/xe/xe_hwmon.c                 | 372 +++++++++++++++++-
>  drivers/gpu/drm/xe/xe_hwmon.h                 |   4 +
>  drivers/gpu/drm/xe/xe_uc.c                    |   6 +
>  6 files changed, 435 insertions(+), 7 deletions(-)
>  create mode 100644 Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>  create mode 100644 drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> 
> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> new file mode 100644
> index 000000000000..ff3465195870
> --- /dev/null
> +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
> @@ -0,0 +1,22 @@
> +What:		/sys/devices/.../hwmon/hwmon<i>/power1_max
> +Date:		July 2023

Future?

> +KernelVersion:	6.3
> +Contact:	intel-gfx@lists.freedesktop.org

s/intel-gfx/intel-xe

> +Description:	RW. Card reactive sustained  (PL1/Tau) power limit in microwatts.
> +
> +		The power controller will throttle the operating frequency
> +		if the power averaged over a window (typically seconds)
> +		exceeds this limit. A read value of 0 means that the PL1
> +		power limit is disabled, writing 0 disables the
> +		limit. Writing values > 0 will enable the power limit.
> +
> +		Only supported for particular Intel xe graphics platforms.
> +
> +What:		/sys/devices/.../hwmon/hwmon<i>/power1_rated_max
> +Date:		July 2023
> +KernelVersion:	6.3
> +Contact:	intel-gfx@lists.freedesktop.org
> +Description:	RO. Card default power limit (default TDP setting).
> +
> +		Only supported for particular Intel xe graphics platforms.
> +
> diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> index d654f3311351..eb7210afbd2c 100644
> --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> @@ -397,4 +397,8 @@
>  #define XEHPC_BCS5_BCS6_INTR_MASK		XE_REG(0x190118)
>  #define XEHPC_BCS7_BCS8_INTR_MASK		XE_REG(0x19011c)
>  
> +#define PVC_GT0_PACKAGE_RAPL_LIMIT		XE_REG(0x281008)
> +#define PVC_GT0_PACKAGE_POWER_SKU_UNIT		XE_REG(0x281068)
> +#define PVC_GT0_PACKAGE_POWER_SKU		XE_REG(0x281080)
> +
>  #endif
> diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> new file mode 100644
> index 000000000000..cb2d49b5c8a9
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
> @@ -0,0 +1,34 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2023 Intel Corporation
> + */
> +
> +#ifndef _XE_MCHBAR_REGS_H__
> +#define _XE_MCHBAR_REGS_H_
> +
> +#include "regs/xe_reg_defs.h"
> +
> +/*
> + * MCHBAR mirror.
> + *
> + * This mirrors the MCHBAR MMIO space whose location is determined by
> + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
> + * every way.
> + */
> +
> +#define MCHBAR_MIRROR_BASE_SNB			0x140000
> +
> +#define PCU_CR_PACKAGE_POWER_SKU		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5930)
> +#define   PKG_PKG_TDP				GENMASK_ULL(14, 0)
> +#define   PKG_MIN_PWR				GENMASK_ULL(30, 16)
> +#define   PKG_MAX_PWR				GENMASK_ULL(46, 32)
> +
> +#define PCU_CR_PACKAGE_POWER_SKU_UNIT		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
> +#define   PKG_PWR_UNIT				REG_GENMASK(3, 0)
> +
> +#define PCU_CR_PACKAGE_RAPL_LIMIT		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
> +#define   PKG_PWR_LIM_1				REG_GENMASK(14, 0)
> +#define   PKG_PWR_LIM_1_EN			REG_BIT(15)
> +
> +#endif
> +
> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
> index 8f653fdf4ad5..a4fba29d5d5a 100644
> --- a/drivers/gpu/drm/xe/xe_hwmon.c
> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
> @@ -5,53 +5,394 @@
>  
>  #include <linux/hwmon.h>
>  
> +#include "regs/xe_mchbar_regs.h"
>  #include "regs/xe_gt_regs.h"
>  #include "xe_device.h"
>  #include "xe_hwmon.h"
> +#include "xe_mmio.h"
> +#include "xe_gt.h"

Nit - includes should be in alphabetical order.

> +
> +enum hwm_reg_name {
> +	pkg_rapl_limit,
> +	pkg_power_sku,
> +	pkg_power_sku_unit,

Upper case? I guess hwmon_power_max is lower case so is that the
convention?

> +};
> +
> +enum hwm_reg_operation {
> +	reg_read,
> +	reg_write,
> +	reg_rmw,

Upper case?

> +};
> +

s/hwm_reg/xe_hwm_reg

> +/*
> + * SF_* - scale factors for particular quantities according to hwmon spec.
> + * - power  - microwatts
> + */
> +#define SF_POWER	1000000
>  
>  struct hwm_drvdata {
>  	struct xe_hwmon *hwmon;
>  	struct device *hwmon_dev;
> +	struct xe_gt *gt;
>  	char name[12];
> +	bool reset_in_progress;
> +	wait_queue_head_t waitq;
>  };
>  
>  struct xe_hwmon {
>  	struct hwm_drvdata ddat;
>  	struct mutex hwmon_lock;
> +	int scl_shift_power;
>  };
>

Same as previous patch, 1 struct seems like a better idea to me.

> +struct xe_reg hwm_get_reg(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name)

s/hwm_get_reg/xe_hwmon_get_reg

> +{
> +	struct xe_device *xe = gt_to_xe(ddat->gt);
> +
> +	switch (reg_name) {
> +	case pkg_rapl_limit:
> +		if (xe->info.platform == XE_DG2)
> +			return PCU_CR_PACKAGE_RAPL_LIMIT;
> +		else if (xe->info.platform == XE_PVC)
> +			return PVC_GT0_PACKAGE_RAPL_LIMIT;
> +		break;
> +	case pkg_power_sku:
> +		if (xe->info.platform == XE_DG2)
> +			return PCU_CR_PACKAGE_POWER_SKU;
> +		else if (xe->info.platform == XE_PVC)
> +			return PVC_GT0_PACKAGE_POWER_SKU;
> +		break;
> +	case pkg_power_sku_unit:
> +		if (xe->info.platform == XE_DG2)
> +			return PCU_CR_PACKAGE_POWER_SKU_UNIT;
> +		else if (xe->info.platform == XE_PVC)
> +			return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
> +		break;
> +	default:

BUG_ON or WARN_ON saying not possible?

> +		break;
> +	}
> +
> +	return XE_REG(0);
> +}
> +
> +int process_hwmon_reg(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name,
> +		      enum hwm_reg_operation operation, u32 *value,
> +		      u32 clr, u32 set)
> +{
> +	struct xe_reg reg;
> +	int ret = 0;
> +
> +	reg = hwm_get_reg(ddat, reg_name);
> +
> +	if (!reg.raw)
> +		return -EOPNOTSUPP;
> +
> +	switch (operation) {
> +	case reg_read:
> +		*value = xe_mmio_read32(ddat->gt, reg);
> +		break;
> +	case reg_write:
> +		xe_mmio_write32(ddat->gt, reg, *value);
> +		break;
> +	case reg_rmw:
> +		*value = xe_mmio_rmw32(ddat->gt, reg, clr, set);
> +		break;
> +	default:

BUG_ON or WARN_ON saying not possible?

> +		ret = -EOPNOTSUPP;
> +	}
> +
> +	return ret;
> +}
> +
> +int process_hwmon_reg_read64(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name, u64 *value)
> +{
> +	struct xe_reg reg;
> +
> +	reg = hwm_get_reg(ddat, reg_name);
> +
> +	if (!reg.raw)
> +		return -EOPNOTSUPP;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));

Don't need this, the upper levels of the stack should have this. You
could just do a xe_device_assert_mem_access here. 

> +
> +	*value = xe_mmio_read64(ddat->gt, reg);
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +
> +	return 0;
> +}
> +
> +#define PL1_DISABLE 0
> +
> +/*
> + * HW allows arbitrary PL1 limits to be set but silently clamps these values to
> + * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
> + * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
> + * clamped values when read.
> + */
> +static int hwm_power_max_read(struct hwm_drvdata *ddat, long *value)

The return value is always 0, why not return value?

s/hwm_power_max_read/xe_hwmon_power_max_read

> +{
> +	struct xe_hwmon *hwmon = ddat->hwmon;
> +	u32 reg_val;
> +	u64 r, min, max;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> +

Same as above, use xe_device_assert_mem_access.

> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &reg_val, 0, 0);
> +	/* Check if PL1 limit is disabled */
> +	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
> +		*value = PL1_DISABLE;
> +		xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +		return 0;
> +	}
> +
> +	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
> +	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
> +
> +	process_hwmon_reg_read64(ddat, pkg_power_sku, &r);
> +	min = REG_FIELD_GET(PKG_MIN_PWR, r);
> +	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
> +	max = REG_FIELD_GET(PKG_MAX_PWR, r);
> +	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
> +
> +	if (min && max)
> +		*value = clamp_t(u64, *value, min, max);
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +	return 0;
> +}
> +
> +static int hwm_power_max_write(struct hwm_drvdata *ddat, long value)
> +{
> +	struct xe_hwmon *hwmon = ddat->hwmon;
> +	DEFINE_WAIT(wait);
> +	int ret = 0;
> +	u32 nval;
> +
> +	/* Block waiting for GuC reset to complete when needed */
> +	for (;;) {
> +		mutex_lock(&hwmon->hwmon_lock);
> +
> +		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
> +
> +		if (!hwmon->ddat.reset_in_progress)
> +			break;
> +
> +		if (signal_pending(current)) {
> +			ret = -EINTR;
> +			break;
> +		}
> +
> +		mutex_unlock(&hwmon->hwmon_lock);
> +
> +		schedule();
> +	}
> +	finish_wait(&ddat->waitq, &wait);
> +	if (ret)
> +		goto unlock;

Anyway to not open code this? We similar in with
xe_guc_submit_reset_wait, could we expose a global reset in progress in
function which we can expose at the gt level?

> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> +

This certainly is an outer most thing, e.g. doing this under
hwmon->hwmon_lock seems dangerous. Again the upper levels of the stack
should do the xe_device_mem_access_get, which it does. Just pointing out
doing xe_device_mem_access_get/put under a lock isn't a good idea.

Also the the loop which acquires hwmon->hwmon_lock is confusing too.

> +	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
> +	if (value == PL1_DISABLE) {
> +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
> +				  PKG_PWR_LIM_1_EN, 0);
> +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &nval,
> +				  PKG_PWR_LIM_1_EN, 0);
> +
> +		if (nval & PKG_PWR_LIM_1_EN)
> +			ret = -ENODEV;
> +		goto exit;
> +	}
> +
> +	/* Computation in 64-bits to avoid overflow. Round to nearest. */
> +	nval = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
> +	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
> +
> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
> +			  PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
> +exit:
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +unlock:
> +	mutex_unlock(&hwmon->hwmon_lock);
> +

Again a clear lock / unlock is desirable.

> +	return 0;
> +}
> +
> +static int hwm_power_rated_max_read(struct hwm_drvdata *ddat, long *value)
> +{

s/hwm_power_rated_max_read/xe_hwmon_power_rated_max_read

> +	struct xe_hwmon *hwmon = ddat->hwmon;
> +	u32 reg_val;
> +
> +	process_hwmon_reg(ddat, pkg_power_sku, reg_read, &reg_val, 0, 0);
> +	reg_val = REG_FIELD_GET(PKG_PKG_TDP, reg_val);
> +	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
> +
> +	return 0;
> +}
> +
>  static const struct hwmon_channel_info *hwm_info[] = {

s/hwm_info/xe_hwmon_info

> +	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX),
>  	NULL
>  };
>  
> +static umode_t
> +hwm_power_is_visible(struct hwm_drvdata *ddat, u32 attr, int chan)
> +{

s/hwm_power_is_visible/xe_hwmon_power_is_visible

> +	u32 reg_val;
> +
> +	switch (attr) {
> +	case hwmon_power_max:
> +		return process_hwmon_reg(ddat, pkg_rapl_limit,
> +					 reg_read, &reg_val, 0, 0) ? 0 : 0664;
> +	case hwmon_power_rated_max:
> +		return process_hwmon_reg(ddat, pkg_power_sku,
> +					 reg_read, &reg_val, 0, 0) ? 0 : 0444;
> +	default:
> +		return 0;
> +	}
> +}
> +
> +static int
> +hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
> +{

s/hwm_power_read/xe_hwmon_power_read

> +	switch (attr) {
> +	case hwmon_power_max:
> +		return hwm_power_max_read(ddat, val);
> +	case hwmon_power_rated_max:
> +		return hwm_power_rated_max_read(ddat, val);
> +	default:
> +		return -EOPNOTSUPP;
> +	}
> +}
> +
> +static int
> +hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
> +{

s/hwm_power_write/xe_hwmon_power_write

> +	switch (attr) {
> +	case hwmon_power_max:
> +		return hwm_power_max_write(ddat, val);
> +	default:
> +		return -EOPNOTSUPP;
> +	}
> +}
> +
> +void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old)
> +{
> +	struct xe_hwmon *hwmon = xe->hwmon;
> +	struct hwm_drvdata *ddat = &hwmon->ddat;
> +	u32 r;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> +

Upper level should have, use an assert if anything.

> +	if (!hwmon || process_hwmon_reg(ddat, pkg_rapl_limit,
> +					reg_read, &r, 0, 0))
> +		return;
> +
> +	mutex_lock(&hwmon->hwmon_lock);
> +
> +	hwmon->ddat.reset_in_progress = true;
> +
> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &r,
> +			  PKG_PWR_LIM_1_EN, 0);
> +	*old = !!(r & PKG_PWR_LIM_1_EN);
> +
> +	mutex_unlock(&hwmon->hwmon_lock);
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +}
> +
> +void xe_hwmon_power_max_restore(struct xe_device *xe, bool old)
> +{
> +	struct xe_hwmon *hwmon = xe->hwmon;
> +	struct hwm_drvdata *ddat = &hwmon->ddat;
> +	u32 r;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));

Upper level should have, use an assert if anything.

> +
> +	if (!hwmon || process_hwmon_reg(ddat, pkg_rapl_limit,
> +					reg_read, &r, 0, 0))
> +		return;
> +
> +	mutex_lock(&hwmon->hwmon_lock);
> +
> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &r,
> +			  PKG_PWR_LIM_1_EN, old ? PKG_PWR_LIM_1_EN : 0);
> +
> +	hwmon->ddat.reset_in_progress = false;
> +	wake_up_all(&hwmon->ddat.waitq);
> +
> +	mutex_unlock(&hwmon->hwmon_lock);
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +}
> +
>  static umode_t
>  hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
>  	       u32 attr, int channel)
>  {
> +	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
> +	int ret;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> +

Nit, include the get / put in previous patch.

>  	switch (type) {
> +	case hwmon_power:
> +		ret = hwm_power_is_visible(ddat, attr, channel);
> +		break;
>  	default:
> -		return 0;
> +		ret = 0;
>  	}
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +
> +	return ret;
>  }
>  
>  static int
>  hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
>  	 int channel, long *val)
>  {
> +	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
> +	int ret;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));

Nit, include the get / put in previous patch.

> +
>  	switch (type) {
> +	case hwmon_power:
> +		ret = hwm_power_read(ddat, attr, channel, val);
> +		break;
>  	default:
> -		return -EOPNOTSUPP;
> +		ret = -EOPNOTSUPP;
> +		break;
>  	}
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +
> +	return ret;
>  }
>  
>  static int
>  hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
>  	  int channel, long val)
>  {
> +	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
> +	int ret;
> +
> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));

Nit, include the get / put in previous patch.

> +
>  	switch (type) {
> +	case hwmon_power:
> +		ret = hwm_power_write(ddat, attr, channel, val);
> +		break;
>  	default:
> -		return -EOPNOTSUPP;
> +		ret = -EOPNOTSUPP;
> +		break;
>  	}
> +
> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> +
> +	return ret;
>  }
>  
>  static const struct hwmon_ops hwm_ops = {
> @@ -66,8 +407,19 @@ static const struct hwmon_chip_info hwm_chip_info = {
>  };
>  
>  static void
> -hwm_get_preregistration_info(struct xe_device *xe)
> +hwm_get_preregistration_info(struct hwm_drvdata *ddat)

Why change the function argument?

>  {
> +	struct xe_hwmon *hwmon = ddat->hwmon;
> +	u32 val_sku_unit = 0;
> +	int ret;
> +
> +	ret = process_hwmon_reg(ddat, pkg_power_sku_unit, reg_read, &val_sku_unit, 0, 0);
> +	/*
> +	 * The contents of register pkg_power_sku_unit do not change,
> +	 * so read it once and store the shift values.
> +	 */
> +	if (!ret)
> +		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
>  }
>  
>  void xe_hwmon_register(struct xe_device *xe)
> @@ -89,18 +441,24 @@ void xe_hwmon_register(struct xe_device *xe)
>  	mutex_init(&hwmon->hwmon_lock);
>  	ddat = &hwmon->ddat;
>  
> +	/* primary GT to access device level properties */
> +	ddat->gt = xe->tiles[0].primary_gt;
> +
>  	ddat->hwmon = hwmon;
>  	snprintf(ddat->name, sizeof(ddat->name), "xe");
>  
> -	hwm_get_preregistration_info(xe);
> +	init_waitqueue_head(&ddat->waitq);
>  
> -	drm_dbg(&xe->drm, "Register HWMON interface\n");
> +	hwm_get_preregistration_info(ddat);
>  
> -	/*  hwmon_dev points to device hwmon<i> */
> +	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
> +
> +	/* hwmon_dev points to device hwmon<i> */
>  	hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
>  							 ddat,
>  							 &hwm_chip_info,
>  							 NULL);
> +

Unrelated, probably delete,

>  	if (IS_ERR(hwmon_dev)) {
>  		drm_warn(&xe->drm, "Fail to register xe hwmon\n");

Missed this the prior patch but include the error value in the print.

>  		xe->hwmon = NULL;
> diff --git a/drivers/gpu/drm/xe/xe_hwmon.h b/drivers/gpu/drm/xe/xe_hwmon.h
> index a078eeb0a68b..a5dc693569c5 100644
> --- a/drivers/gpu/drm/xe/xe_hwmon.h
> +++ b/drivers/gpu/drm/xe/xe_hwmon.h
> @@ -14,9 +14,13 @@ struct xe_device;
>  #if IS_REACHABLE(CONFIG_HWMON)
>  void xe_hwmon_register(struct xe_device *xe);
>  void xe_hwmon_unregister(struct xe_device *xe);
> +void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old);
> +void xe_hwmon_power_max_restore(struct xe_device *xe, bool old);
>  #else
>  static inline void xe_hwmon_register(struct xe_device *xe) { };
>  static inline void xe_hwmon_unregister(struct xe_device *xe) { };
> +static inline void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old) { };
> +static inline void xe_hwmon_power_max_restore(struct xe_device *xe, bool old) { };
>  #endif
>  
>  #endif /* __XE_HWMON_H__ */
> diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
> index 70eabf567156..9df5a3a85dc3 100644
> --- a/drivers/gpu/drm/xe/xe_uc.c
> +++ b/drivers/gpu/drm/xe/xe_uc.c
> @@ -13,6 +13,7 @@
>  #include "xe_huc.h"
>  #include "xe_uc_fw.h"
>  #include "xe_wopcm.h"
> +#include "xe_hwmon.h"
>  
>  static struct xe_gt *
>  uc_to_gt(struct xe_uc *uc)
> @@ -127,11 +128,15 @@ int xe_uc_init_hwconfig(struct xe_uc *uc)
>  int xe_uc_init_hw(struct xe_uc *uc)
>  {
>  	int ret;
> +	bool pl1en;
>  
>  	/* GuC submission not enabled, nothing to do */
>  	if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
>  		return 0;
>  
> +	/* Disable a potentially low PL1 power limit to allow freq to be raised */
> +	xe_hwmon_power_max_disable(uc_to_xe(uc), &pl1en);

Don't love the pass by reference, how about 'pl1en =
xe_hwmon_power_max_disable(...'

Matt

> +
>  	ret = xe_uc_sanitize_reset(uc);
>  	if (ret)
>  		return ret;
> @@ -160,6 +165,7 @@ int xe_uc_init_hw(struct xe_uc *uc)
>  	if (ret)
>  		return ret;
>  
> +	xe_hwmon_power_max_restore(uc_to_xe(uc), pl1en);

Add line of white space.

Matt

>  	/* We don't fail the driver load if HuC fails to auth, but let's warn */
>  	ret = xe_huc_auth(&uc->huc);
>  	XE_WARN_ON(ret);
> -- 
> 2.25.1
>
Andi Shyti June 29, 2023, 2:09 p.m. UTC | #2
Hi Matt and Badal,

[...]

> > +};
> > +
> > +enum hwm_reg_operation {
> > +	reg_read,
> > +	reg_write,
> > +	reg_rmw,
> 
> Upper case?
> 
> > +};
> > +
> 
> s/hwm_reg/xe_hwm_reg

I agree with Matt here... throughout this series of patches the
naming is very confusing, sometimes starting with xe_*, sometimes
with hwm_*, sometimes with hwmon_*... there is no consistency.

Please, Badal, choos a consistent prefix and stick with it for
every function and global definition.

Matt suggested xe_hwmon_*, I'm fine with it.

[...]

> >  struct hwm_drvdata {
> >  	struct xe_hwmon *hwmon;
> >  	struct device *hwmon_dev;
> > +	struct xe_gt *gt;
> >  	char name[12];
> > +	bool reset_in_progress;
> > +	wait_queue_head_t waitq;
> >  };
> >  
> >  struct xe_hwmon {
> >  	struct hwm_drvdata ddat;
> >  	struct mutex hwmon_lock;
> > +	int scl_shift_power;
> >  };
> >
> 
> Same as previous patch, 1 struct seems like a better idea to me.

I made the same comment in the previous patch.

[...]

> > +	switch (reg_name) {
> > +	case pkg_rapl_limit:
> > +		if (xe->info.platform == XE_DG2)
> > +			return PCU_CR_PACKAGE_RAPL_LIMIT;
> > +		else if (xe->info.platform == XE_PVC)
> > +			return PVC_GT0_PACKAGE_RAPL_LIMIT;
> > +		break;
> > +	case pkg_power_sku:
> > +		if (xe->info.platform == XE_DG2)
> > +			return PCU_CR_PACKAGE_POWER_SKU;
> > +		else if (xe->info.platform == XE_PVC)
> > +			return PVC_GT0_PACKAGE_POWER_SKU;
> > +		break;
> > +	case pkg_power_sku_unit:
> > +		if (xe->info.platform == XE_DG2)
> > +			return PCU_CR_PACKAGE_POWER_SKU_UNIT;
> > +		else if (xe->info.platform == XE_PVC)
> > +			return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
> > +		break;
> > +	default:
> 
> BUG_ON or WARN_ON saying not possible?

MISSING_CASE() is in i915_utils.h, perhaps we can move it to a
more generic place... it would be at handy here.

> > +static int hwm_power_max_read(struct hwm_drvdata *ddat, long *value)
> 
> The return value is always 0, why not return value?
> 
> s/hwm_power_max_read/xe_hwmon_power_max_read
> 
> > +{
> > +	struct xe_hwmon *hwmon = ddat->hwmon;
> > +	u32 reg_val;
> > +	u64 r, min, max;
> > +
> > +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> > +
> 
> Same as above, use xe_device_assert_mem_access.
> 
> > +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &reg_val, 0, 0);
> > +	/* Check if PL1 limit is disabled */
> > +	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
> > +		*value = PL1_DISABLE;
> > +		xe_device_mem_access_put(gt_to_xe(ddat->gt));
> > +		return 0;
> > +	}
> > +
> > +	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
> > +	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
> > +
> > +	process_hwmon_reg_read64(ddat, pkg_power_sku, &r);
> > +	min = REG_FIELD_GET(PKG_MIN_PWR, r);
> > +	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
> > +	max = REG_FIELD_GET(PKG_MAX_PWR, r);
> > +	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
> > +
> > +	if (min && max)
> > +		*value = clamp_t(u64, *value, min, max);
> > +
> > +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> > +	return 0;
> > +}
> > +
> > +static int hwm_power_max_write(struct hwm_drvdata *ddat, long value)
> > +{
> > +	struct xe_hwmon *hwmon = ddat->hwmon;
> > +	DEFINE_WAIT(wait);
> > +	int ret = 0;
> > +	u32 nval;
> > +
> > +	/* Block waiting for GuC reset to complete when needed */
> > +	for (;;) {

with a do...while() you shouldn't need a for(;;)... your choice,
not going to beat on that.

> > +		mutex_lock(&hwmon->hwmon_lock);
> > +
> > +		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
> > +
> > +		if (!hwmon->ddat.reset_in_progress)
> > +			break;
> > +
> > +		if (signal_pending(current)) {
> > +			ret = -EINTR;
> > +			break;

cough! cough! unlock! cough! cough!

> > +		}
> > +
> > +		mutex_unlock(&hwmon->hwmon_lock);
> > +
> > +		schedule();
> > +	}
> > +	finish_wait(&ddat->waitq, &wait);
> > +	if (ret)
> > +		goto unlock;
> 
> Anyway to not open code this? We similar in with
> xe_guc_submit_reset_wait, could we expose a global reset in progress in
> function which we can expose at the gt level?
> 
> > +
> > +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> > +
> 
> This certainly is an outer most thing, e.g. doing this under
> hwmon->hwmon_lock seems dangerous. Again the upper levels of the stack
> should do the xe_device_mem_access_get, which it does. Just pointing out
> doing xe_device_mem_access_get/put under a lock isn't a good idea.
> 
> Also the the loop which acquires hwmon->hwmon_lock is confusing too.
> 
> > +	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
> > +	if (value == PL1_DISABLE) {
> > +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
> > +				  PKG_PWR_LIM_1_EN, 0);
> > +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &nval,
> > +				  PKG_PWR_LIM_1_EN, 0);
> > +
> > +		if (nval & PKG_PWR_LIM_1_EN)
> > +			ret = -ENODEV;
> > +		goto exit;

cough! cough! lock! cough! cough!

> > +	}
> > +
> > +	/* Computation in 64-bits to avoid overflow. Round to nearest. */
> > +	nval = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
> > +	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
> > +
> > +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
> > +			  PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
> > +exit:
> > +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> > +unlock:
> > +	mutex_unlock(&hwmon->hwmon_lock);
> > +

mmhhh???... jokes apart this is so error prone that it will
deadlock as soon as someone will think of editing this file :)

It worried me already at the first part.

Please, as Matt said, have a more linear locking here.

[...]

Andi
Nilawar, Badal July 6, 2023, 10:36 a.m. UTC | #3
Hi Matt,

On 29-06-2023 05:48, Matthew Brost wrote:
> On Wed, Jun 28, 2023 at 12:00:39AM +0530, Badal Nilawar wrote:
>> Expose power_max (pl1) and power_rated_max (tdp) attributes.
>> This is port from i915 hwmon.
>>
> 
> Not expert in this domain but this statement makes be nervous as
> generally shouldn't just blindly port things from the i915. Doing an
> initial review but will need a domain expert too.
This is not blindly ported. Just for reference I added that comment in 
case if someone want to see i915 implementation. I will move that 
comment to cover letter.
> 
>> v2:
>>    - Move rpm calls (xe_device_mem_access_get/put) to hwmon functions from
>>      process_hwmon_reg to avoid multiple rpm entry exits during consecutive
>>      reg accesses
>>    - Fix review comments (Riana)
>>
>> Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
>> ---
>>   .../ABI/testing/sysfs-driver-intel-xe-hwmon   |  22 ++
>>   drivers/gpu/drm/xe/regs/xe_gt_regs.h          |   4 +
>>   drivers/gpu/drm/xe/regs/xe_mchbar_regs.h      |  34 ++
>>   drivers/gpu/drm/xe/xe_hwmon.c                 | 372 +++++++++++++++++-
>>   drivers/gpu/drm/xe/xe_hwmon.h                 |   4 +
>>   drivers/gpu/drm/xe/xe_uc.c                    |   6 +
>>   6 files changed, 435 insertions(+), 7 deletions(-)
>>   create mode 100644 Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>>   create mode 100644 drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>>
>> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> new file mode 100644
>> index 000000000000..ff3465195870
>> --- /dev/null
>> +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
>> @@ -0,0 +1,22 @@
>> +What:		/sys/devices/.../hwmon/hwmon<i>/power1_max
>> +Date:		July 2023
> 
> Future?
Considering review time frame added future date.
> 
>> +KernelVersion:	6.3
>> +Contact:	intel-gfx@lists.freedesktop.org
> 
> s/intel-gfx/intel-xe
Sure.
> 
>> +Description:	RW. Card reactive sustained  (PL1/Tau) power limit in microwatts.
>> +
>> +		The power controller will throttle the operating frequency
>> +		if the power averaged over a window (typically seconds)
>> +		exceeds this limit. A read value of 0 means that the PL1
>> +		power limit is disabled, writing 0 disables the
>> +		limit. Writing values > 0 will enable the power limit.
>> +
>> +		Only supported for particular Intel xe graphics platforms.
>> +
>> +What:		/sys/devices/.../hwmon/hwmon<i>/power1_rated_max
>> +Date:		July 2023
>> +KernelVersion:	6.3
>> +Contact:	intel-gfx@lists.freedesktop.org
>> +Description:	RO. Card default power limit (default TDP setting).
>> +
>> +		Only supported for particular Intel xe graphics platforms.
>> +
>> diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
>> index d654f3311351..eb7210afbd2c 100644
>> --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
>> +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
>> @@ -397,4 +397,8 @@
>>   #define XEHPC_BCS5_BCS6_INTR_MASK		XE_REG(0x190118)
>>   #define XEHPC_BCS7_BCS8_INTR_MASK		XE_REG(0x19011c)
>>   
>> +#define PVC_GT0_PACKAGE_RAPL_LIMIT		XE_REG(0x281008)
>> +#define PVC_GT0_PACKAGE_POWER_SKU_UNIT		XE_REG(0x281068)
>> +#define PVC_GT0_PACKAGE_POWER_SKU		XE_REG(0x281080)
>> +
>>   #endif
>> diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> new file mode 100644
>> index 000000000000..cb2d49b5c8a9
>> --- /dev/null
>> +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
>> @@ -0,0 +1,34 @@
>> +/* SPDX-License-Identifier: MIT */
>> +/*
>> + * Copyright © 2023 Intel Corporation
>> + */
>> +
>> +#ifndef _XE_MCHBAR_REGS_H__
>> +#define _XE_MCHBAR_REGS_H_
>> +
>> +#include "regs/xe_reg_defs.h"
>> +
>> +/*
>> + * MCHBAR mirror.
>> + *
>> + * This mirrors the MCHBAR MMIO space whose location is determined by
>> + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
>> + * every way.
>> + */
>> +
>> +#define MCHBAR_MIRROR_BASE_SNB			0x140000
>> +
>> +#define PCU_CR_PACKAGE_POWER_SKU		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5930)
>> +#define   PKG_PKG_TDP				GENMASK_ULL(14, 0)
>> +#define   PKG_MIN_PWR				GENMASK_ULL(30, 16)
>> +#define   PKG_MAX_PWR				GENMASK_ULL(46, 32)
>> +
>> +#define PCU_CR_PACKAGE_POWER_SKU_UNIT		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
>> +#define   PKG_PWR_UNIT				REG_GENMASK(3, 0)
>> +
>> +#define PCU_CR_PACKAGE_RAPL_LIMIT		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
>> +#define   PKG_PWR_LIM_1				REG_GENMASK(14, 0)
>> +#define   PKG_PWR_LIM_1_EN			REG_BIT(15)
>> +
>> +#endif
>> +
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
>> index 8f653fdf4ad5..a4fba29d5d5a 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>> @@ -5,53 +5,394 @@
>>   
>>   #include <linux/hwmon.h>
>>   
>> +#include "regs/xe_mchbar_regs.h"
>>   #include "regs/xe_gt_regs.h"
>>   #include "xe_device.h"
>>   #include "xe_hwmon.h"
>> +#include "xe_mmio.h"
>> +#include "xe_gt.h"
> 
> Nit - includes should be in alphabetical order.Ok
> 
>> +
>> +enum hwm_reg_name {
>> +	pkg_rapl_limit,
>> +	pkg_power_sku,
>> +	pkg_power_sku_unit,
> 
> Upper case? I guess hwmon_power_max is lower case so is that the
> convention?
Yes, but can be made Upper case.
> 
>> +};
>> +
>> +enum hwm_reg_operation {
>> +	reg_read,
>> +	reg_write,
>> +	reg_rmw,
> 
> Upper case?
Ok, I will change this to upper case.
> 
>> +};
>> +
> 
> s/hwm_reg/xe_hwm_reg
> 
>> +/*
>> + * SF_* - scale factors for particular quantities according to hwmon spec.
>> + * - power  - microwatts
>> + */
>> +#define SF_POWER	1000000
>>   
>>   struct hwm_drvdata {
>>   	struct xe_hwmon *hwmon;
>>   	struct device *hwmon_dev;
>> +	struct xe_gt *gt;
>>   	char name[12];
>> +	bool reset_in_progress;
>> +	wait_queue_head_t waitq;
>>   };
>>   
>>   struct xe_hwmon {
>>   	struct hwm_drvdata ddat;
>>   	struct mutex hwmon_lock;
>> +	int scl_shift_power;
>>   };
>>
> 
> Same as previous patch, 1 struct seems like a better idea to me.
> 
>> +struct xe_reg hwm_get_reg(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name)
> 
> s/hwm_get_reg/xe_hwmon_get_reg
As mentioned in previous patch will follow xe_hwmon_ for global functions.
> 
>> +{
>> +	struct xe_device *xe = gt_to_xe(ddat->gt);
>> +
>> +	switch (reg_name) {
>> +	case pkg_rapl_limit:
>> +		if (xe->info.platform == XE_DG2)
>> +			return PCU_CR_PACKAGE_RAPL_LIMIT;
>> +		else if (xe->info.platform == XE_PVC)
>> +			return PVC_GT0_PACKAGE_RAPL_LIMIT;
>> +		break;
>> +	case pkg_power_sku:
>> +		if (xe->info.platform == XE_DG2)
>> +			return PCU_CR_PACKAGE_POWER_SKU;
>> +		else if (xe->info.platform == XE_PVC)
>> +			return PVC_GT0_PACKAGE_POWER_SKU;
>> +		break;
>> +	case pkg_power_sku_unit:
>> +		if (xe->info.platform == XE_DG2)
>> +			return PCU_CR_PACKAGE_POWER_SKU_UNIT;
>> +		else if (xe->info.platform == XE_PVC)
>> +			return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
>> +		break;
>> +	default:
> 
> BUG_ON or WARN_ON saying not possible?
Not needed. For attribute doesn't present it shoud return XE_REG(0).
> 
>> +		break;
>> +	}
>> +
>> +	return XE_REG(0);
>> +}
>> +
>> +int process_hwmon_reg(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name,
>> +		      enum hwm_reg_operation operation, u32 *value,
>> +		      u32 clr, u32 set)
s/process_hwmon_reg/hwmon_process_reg
>> +{
>> +	struct xe_reg reg;
>> +	int ret = 0;
>> +
>> +	reg = hwm_get_reg(ddat, reg_name);
>> +
>> +	if (!reg.raw)
>> +		return -EOPNOTSUPP;
>> +
>> +	switch (operation) {
>> +	case reg_read:
>> +		*value = xe_mmio_read32(ddat->gt, reg);
>> +		break;
>> +	case reg_write:
>> +		xe_mmio_write32(ddat->gt, reg, *value);
>> +		break;
>> +	case reg_rmw:
>> +		*value = xe_mmio_rmw32(ddat->gt, reg, clr, set);
>> +		break;
>> +	default:
> 
> BUG_ON or WARN_ON saying not possible?
Will add WARN_ON here.
> 
>> +		ret = -EOPNOTSUPP;
>> +	}
>> +
>> +	return ret;
>> +}
>> +
>> +int process_hwmon_reg_read64(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name, u64 *value)
>> +{
>> +	struct xe_reg reg;
>> +
>> +	reg = hwm_get_reg(ddat, reg_name);
>> +
>> +	if (!reg.raw)
>> +		return -EOPNOTSUPP;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> 
> Don't need this, the upper levels of the stack should have this. You
> could just do a xe_device_assert_mem_access here.
Agreed I will remove this and whereever needed will use 
xe_device_assert_mem_access .
> 
>> +
>> +	*value = xe_mmio_read64(ddat->gt, reg);
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +
>> +	return 0;
>> +}
>> +
>> +#define PL1_DISABLE 0
>> +
>> +/*
>> + * HW allows arbitrary PL1 limits to be set but silently clamps these values to
>> + * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
>> + * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
>> + * clamped values when read.
>> + */
>> +static int hwm_power_max_read(struct hwm_drvdata *ddat, long *value)
> 
> The return value is always 0, why not return value?
0 is success.
static ssize_t hwmon_attr_show(struct device *dev,
                                struct device_attribute *devattr, char *buf)
{
         struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
         long val;
         int ret;

         ret = hattr->ops->read(dev, hattr->type, hattr->attr, hattr->index,
                                &val);
         if (ret < 0)
                 return ret;

         trace_hwmon_attr_show(hattr->index + hwmon_attr_base(hattr->type),
                               hattr->name, val);

         return sprintf(buf, "%ld\n", val);
}
> 
> s/hwm_power_max_read/xe_hwmon_power_max_read
> 
>> +{
>> +	struct xe_hwmon *hwmon = ddat->hwmon;
>> +	u32 reg_val;
>> +	u64 r, min, max;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
>> +
> 
> Same as above, use xe_device_assert_mem_access.
Not required here as its added at top level function hwm_read. As 
mentioned in v2: I moved rpm calls to top level funcitons but I forgot 
to remove from here and other places. Will fix this in next rev.
> 
>> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &reg_val, 0, 0);
>> +	/* Check if PL1 limit is disabled */
>> +	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
>> +		*value = PL1_DISABLE;
>> +		xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +		return 0;
>> +	}
>> +
>> +	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
>> +	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
>> +
>> +	process_hwmon_reg_read64(ddat, pkg_power_sku, &r);
>> +	min = REG_FIELD_GET(PKG_MIN_PWR, r);
>> +	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
>> +	max = REG_FIELD_GET(PKG_MAX_PWR, r);
>> +	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
>> +
>> +	if (min && max)
>> +		*value = clamp_t(u64, *value, min, max);
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
This is also not needed.
>> +	return 0;
>> +}
>> +
>> +static int hwm_power_max_write(struct hwm_drvdata *ddat, long value)
>> +{
>> +	struct xe_hwmon *hwmon = ddat->hwmon;
>> +	DEFINE_WAIT(wait);
>> +	int ret = 0;
>> +	u32 nval;
>> +
>> +	/* Block waiting for GuC reset to complete when needed */
>> +	for (;;) {
>> +		mutex_lock(&hwmon->hwmon_lock);
>> +
>> +		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
>> +
>> +		if (!hwmon->ddat.reset_in_progress)
>> +			break;
>> +
>> +		if (signal_pending(current)) {
>> +			ret = -EINTR;
>> +			break;
>> +		}
>> +
>> +		mutex_unlock(&hwmon->hwmon_lock);
>> +
>> +		schedule();
>> +	}
>> +	finish_wait(&ddat->waitq, &wait);
>> +	if (ret)
>> +		goto unlock;
> 
> Anyway to not open code this? We similar in with
> xe_guc_submit_reset_wait, could we expose a global reset in progress in
> function which we can expose at the gt level?
Sure, I will fix this.
> 
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
>> +
> 
> This certainly is an outer most thing, e.g. doing this under
> hwmon->hwmon_lock seems dangerous. Again the upper levels of the stack
> should do the xe_device_mem_access_get, which it does. Just pointing out
> doing xe_device_mem_access_get/put under a lock isn't a good idea.
Agreed, this is not needed as its called in top level function.
> 
> Also the the loop which acquires hwmon->hwmon_lock is confusing too.
> 
>> +	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
>> +	if (value == PL1_DISABLE) {
>> +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
>> +				  PKG_PWR_LIM_1_EN, 0);
>> +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &nval,
>> +				  PKG_PWR_LIM_1_EN, 0);
>> +
>> +		if (nval & PKG_PWR_LIM_1_EN)
>> +			ret = -ENODEV;
>> +		goto exit;
>> +	}
>> +
>> +	/* Computation in 64-bits to avoid overflow. Round to nearest. */
>> +	nval = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
>> +	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
>> +
>> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
>> +			  PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
>> +exit:
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
Not needed here.
>> +unlock:
>> +	mutex_unlock(&hwmon->hwmon_lock);
>> +
> 
> Again a clear lock / unlock is desirable.
> 
>> +	return 0;
>> +}
>> +
>> +static int hwm_power_rated_max_read(struct hwm_drvdata *ddat, long *value)
>> +{
> 
> s/hwm_power_rated_max_read/xe_hwmon_power_rated_max_read
> 
>> +	struct xe_hwmon *hwmon = ddat->hwmon;
>> +	u32 reg_val;
>> +
>> +	process_hwmon_reg(ddat, pkg_power_sku, reg_read, &reg_val, 0, 0);
>> +	reg_val = REG_FIELD_GET(PKG_PKG_TDP, reg_val);
>> +	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
>> +
>> +	return 0;
>> +}
>> +
>>   static const struct hwmon_channel_info *hwm_info[] = {
> 
> s/hwm_info/xe_hwmon_info
Sure.
> 
>> +	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX),
>>   	NULL
>>   };
>>   
>> +static umode_t
>> +hwm_power_is_visible(struct hwm_drvdata *ddat, u32 attr, int chan)
>> +{
> 
> s/hwm_power_is_visible/xe_hwmon_power_is_visible
Will prefer xe_hwmon prefix for global functions.
> 
>> +	u32 reg_val;
>> +
>> +	switch (attr) {
>> +	case hwmon_power_max:
>> +		return process_hwmon_reg(ddat, pkg_rapl_limit,
>> +					 reg_read, &reg_val, 0, 0) ? 0 : 0664;
>> +	case hwmon_power_rated_max:
>> +		return process_hwmon_reg(ddat, pkg_power_sku,
>> +					 reg_read, &reg_val, 0, 0) ? 0 : 0444;
>> +	default:
>> +		return 0;
>> +	}
>> +}
>> +
>> +static int
>> +hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
>> +{
> 
> s/hwm_power_read/xe_hwmon_power_read
> 
>> +	switch (attr) {
>> +	case hwmon_power_max:
>> +		return hwm_power_max_read(ddat, val);
>> +	case hwmon_power_rated_max:
>> +		return hwm_power_rated_max_read(ddat, val);
>> +	default:
>> +		return -EOPNOTSUPP;
>> +	}
>> +}
>> +
>> +static int
>> +hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
>> +{
> 
> s/hwm_power_write/xe_hwmon_power_write
> 
>> +	switch (attr) {
>> +	case hwmon_power_max:
>> +		return hwm_power_max_write(ddat, val);
>> +	default:
>> +		return -EOPNOTSUPP;
>> +	}
>> +}
>> +
>> +void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old)
>> +{
>> +	struct xe_hwmon *hwmon = xe->hwmon;
>> +	struct hwm_drvdata *ddat = &hwmon->ddat;
>> +	u32 r;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
>> +
> 
> Upper level should have, use an assert if anything.
Aggree, Infact I am thinking to remove this as this is already called at 
top level functions.
> 
>> +	if (!hwmon || process_hwmon_reg(ddat, pkg_rapl_limit,
>> +					reg_read, &r, 0, 0))
>> +		return;
>> +
>> +	mutex_lock(&hwmon->hwmon_lock);
>> +
>> +	hwmon->ddat.reset_in_progress = true;
>> +
>> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &r,
>> +			  PKG_PWR_LIM_1_EN, 0);
>> +	*old = !!(r & PKG_PWR_LIM_1_EN);
>> +
>> +	mutex_unlock(&hwmon->hwmon_lock);
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +}
>> +
>> +void xe_hwmon_power_max_restore(struct xe_device *xe, bool old)
>> +{
>> +	struct xe_hwmon *hwmon = xe->hwmon;
>> +	struct hwm_drvdata *ddat = &hwmon->ddat;
>> +	u32 r;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> 
> Upper level should have, use an assert if anything.
Agree. Will remove this as top level function already calling this.
> 
>> +
>> +	if (!hwmon || process_hwmon_reg(ddat, pkg_rapl_limit,
>> +					reg_read, &r, 0, 0))
>> +		return;
>> +
>> +	mutex_lock(&hwmon->hwmon_lock);
>> +
>> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &r,
>> +			  PKG_PWR_LIM_1_EN, old ? PKG_PWR_LIM_1_EN : 0);
>> +
>> +	hwmon->ddat.reset_in_progress = false;
>> +	wake_up_all(&hwmon->ddat.waitq);
>> +
>> +	mutex_unlock(&hwmon->hwmon_lock);
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +}
>> +
>>   static umode_t
>>   hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
>>   	       u32 attr, int channel)
>>   {
>> +	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
>> +	int ret;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
>> +
> 
> Nit, include the get / put in previous patch.
Ok.
> 
>>   	switch (type) {
>> +	case hwmon_power:
>> +		ret = hwm_power_is_visible(ddat, attr, channel);
>> +		break;
>>   	default:
>> -		return 0;
>> +		ret = 0;
>>   	}
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +
>> +	return ret;
>>   }
>>   
>>   static int
>>   hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
>>   	 int channel, long *val)
>>   {
>> +	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
>> +	int ret;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> 
> Nit, include the get / put in previous patch.
Ok
> 
>> +
>>   	switch (type) {
>> +	case hwmon_power:
>> +		ret = hwm_power_read(ddat, attr, channel, val);
>> +		break;
>>   	default:
>> -		return -EOPNOTSUPP;
>> +		ret = -EOPNOTSUPP;
>> +		break;
>>   	}
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +
>> +	return ret;
>>   }
>>   
>>   static int
>>   hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
>>   	  int channel, long val)
>>   {
>> +	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
>> +	int ret;
>> +
>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> 
> Nit, include the get / put in previous patch.
Ok
> 
>> +
>>   	switch (type) {
>> +	case hwmon_power:
>> +		ret = hwm_power_write(ddat, attr, channel, val);
>> +		break;
>>   	default:
>> -		return -EOPNOTSUPP;
>> +		ret = -EOPNOTSUPP;
>> +		break;
>>   	}
>> +
>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>> +
>> +	return ret;
>>   }
>>   
>>   static const struct hwmon_ops hwm_ops = {
>> @@ -66,8 +407,19 @@ static const struct hwmon_chip_info hwm_chip_info = {
>>   };
>>   
>>   static void
>> -hwm_get_preregistration_info(struct xe_device *xe)
>> +hwm_get_preregistration_info(struct hwm_drvdata *ddat)
> 
> Why change the function argument?
> 
>>   {
>> +	struct xe_hwmon *hwmon = ddat->hwmon;
>> +	u32 val_sku_unit = 0;
>> +	int ret;
>> +
>> +	ret = process_hwmon_reg(ddat, pkg_power_sku_unit, reg_read, &val_sku_unit, 0, 0);
>> +	/*
>> +	 * The contents of register pkg_power_sku_unit do not change,
>> +	 * so read it once and store the shift values.
>> +	 */
>> +	if (!ret)
>> +		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
>>   }
>>   
>>   void xe_hwmon_register(struct xe_device *xe)
>> @@ -89,18 +441,24 @@ void xe_hwmon_register(struct xe_device *xe)
>>   	mutex_init(&hwmon->hwmon_lock);
>>   	ddat = &hwmon->ddat;
>>   
>> +	/* primary GT to access device level properties */
>> +	ddat->gt = xe->tiles[0].primary_gt;
>> +
>>   	ddat->hwmon = hwmon;
>>   	snprintf(ddat->name, sizeof(ddat->name), "xe");
>>   
>> -	hwm_get_preregistration_info(xe);
>> +	init_waitqueue_head(&ddat->waitq);
>>   
>> -	drm_dbg(&xe->drm, "Register HWMON interface\n");
>> +	hwm_get_preregistration_info(ddat);
>>   
>> -	/*  hwmon_dev points to device hwmon<i> */
>> +	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
>> +
>> +	/* hwmon_dev points to device hwmon<i> */
>>   	hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
>>   							 ddat,
>>   							 &hwm_chip_info,
>>   							 NULL);
>> +
> 
> Unrelated, probably delete,
> 
>>   	if (IS_ERR(hwmon_dev)) {
>>   		drm_warn(&xe->drm, "Fail to register xe hwmon\n");
> 
> Missed this the prior patch but include the error value in the print.
Sure.
> 
>>   		xe->hwmon = NULL;
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.h b/drivers/gpu/drm/xe/xe_hwmon.h
>> index a078eeb0a68b..a5dc693569c5 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.h
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.h
>> @@ -14,9 +14,13 @@ struct xe_device;
>>   #if IS_REACHABLE(CONFIG_HWMON)
>>   void xe_hwmon_register(struct xe_device *xe);
>>   void xe_hwmon_unregister(struct xe_device *xe);
>> +void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old);
>> +void xe_hwmon_power_max_restore(struct xe_device *xe, bool old);
>>   #else
>>   static inline void xe_hwmon_register(struct xe_device *xe) { };
>>   static inline void xe_hwmon_unregister(struct xe_device *xe) { };
>> +static inline void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old) { };
>> +static inline void xe_hwmon_power_max_restore(struct xe_device *xe, bool old) { };
>>   #endif
>>   
>>   #endif /* __XE_HWMON_H__ */
>> diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
>> index 70eabf567156..9df5a3a85dc3 100644
>> --- a/drivers/gpu/drm/xe/xe_uc.c
>> +++ b/drivers/gpu/drm/xe/xe_uc.c
>> @@ -13,6 +13,7 @@
>>   #include "xe_huc.h"
>>   #include "xe_uc_fw.h"
>>   #include "xe_wopcm.h"
>> +#include "xe_hwmon.h"
>>   
>>   static struct xe_gt *
>>   uc_to_gt(struct xe_uc *uc)
>> @@ -127,11 +128,15 @@ int xe_uc_init_hwconfig(struct xe_uc *uc)
>>   int xe_uc_init_hw(struct xe_uc *uc)
>>   {
>>   	int ret;
>> +	bool pl1en;
>>   
>>   	/* GuC submission not enabled, nothing to do */
>>   	if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
>>   		return 0;
>>   
>> +	/* Disable a potentially low PL1 power limit to allow freq to be raised */
>> +	xe_hwmon_power_max_disable(uc_to_xe(uc), &pl1en);
> 
> Don't love the pass by reference, how about 'pl1en =
> xe_hwmon_power_max_disable(...'
Sure, I will remove pass by reference.
> 
> Matt
> 
>> +
>>   	ret = xe_uc_sanitize_reset(uc);
>>   	if (ret)
>>   		return ret;
>> @@ -160,6 +165,7 @@ int xe_uc_init_hw(struct xe_uc *uc)
>>   	if (ret)
>>   		return ret;
>>   
>> +	xe_hwmon_power_max_restore(uc_to_xe(uc), pl1en);
> 
> Add line of white space.
Ok.

Regards,
Badal
> 
> Matt
> 
>>   	/* We don't fail the driver load if HuC fails to auth, but let's warn */
>>   	ret = xe_huc_auth(&uc->huc);
>>   	XE_WARN_ON(ret);
>> -- 
>> 2.25.1
>>
Dixit, Ashutosh Aug. 15, 2023, 11:20 p.m. UTC | #4
On Thu, 29 Jun 2023 07:09:31 -0700, Andi Shyti wrote:
>

Hi Badal/Andi/Matt,

> > > +static int hwm_power_max_write(struct hwm_drvdata *ddat, long value)
> > > +{
> > > +	struct xe_hwmon *hwmon = ddat->hwmon;
> > > +	DEFINE_WAIT(wait);
> > > +	int ret = 0;
> > > +	u32 nval;
> > > +
> > > +	/* Block waiting for GuC reset to complete when needed */
> > > +	for (;;) {
>
> with a do...while() you shouldn't need a for(;;)... your choice,
> not going to beat on that.
>
> > > +		mutex_lock(&hwmon->hwmon_lock);
> > > +
> > > +		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
> > > +
> > > +		if (!hwmon->ddat.reset_in_progress)
> > > +			break;
> > > +
> > > +		if (signal_pending(current)) {
> > > +			ret = -EINTR;
> > > +			break;
>
> cough! cough! unlock! cough! cough!

Why? It's fine as is.

>
> > > +		}
> > > +
> > > +		mutex_unlock(&hwmon->hwmon_lock);
> > > +
> > > +		schedule();
> > > +	}
> > > +	finish_wait(&ddat->waitq, &wait);
> > > +	if (ret)
> > > +		goto unlock;
> >
> > Anyway to not open code this? We similar in with
> > xe_guc_submit_reset_wait, could we expose a global reset in progress in
> > function which we can expose at the gt level?

I don't know of any way to not open code this which is guaranteed to not
deadlock (not to say there are no other ways).

> >
> > > +
> > > +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
> > > +
> >
> > This certainly is an outer most thing, e.g. doing this under
> > hwmon->hwmon_lock seems dangerous. Again the upper levels of the stack
> > should do the xe_device_mem_access_get, which it does. Just pointing out
> > doing xe_device_mem_access_get/put under a lock isn't a good idea.

Agree, this is the only change we should make to this code.

> >
> > Also the the loop which acquires hwmon->hwmon_lock is confusing too.

Confusing but correct.

> >
> > > +	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
> > > +	if (value == PL1_DISABLE) {
> > > +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
> > > +				  PKG_PWR_LIM_1_EN, 0);
> > > +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &nval,
> > > +				  PKG_PWR_LIM_1_EN, 0);
> > > +
> > > +		if (nval & PKG_PWR_LIM_1_EN)
> > > +			ret = -ENODEV;
> > > +		goto exit;
>
> cough! cough! lock! cough! cough!

Why? It's fine as is.

>
> > > +	}
> > > +
> > > +	/* Computation in 64-bits to avoid overflow. Round to nearest. */
> > > +	nval = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
> > > +	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
> > > +
> > > +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
> > > +			  PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
> > > +exit:
> > > +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
> > > +unlock:
> > > +	mutex_unlock(&hwmon->hwmon_lock);
> > > +
>
> mmhhh???... jokes apart this is so error prone that it will
> deadlock as soon as someone will think of editing this file :)

Why is it error prone and how will it deadlock? In fact this
prepare_to_wait/finish_wait pattern is widely used in the kernel (see
e.g. rpm_suspend) and is one of the few patterns guaranteed to not deadlock
(see also 6.2.5 "Advanced Sleeping" in LDD3 if needed). This is the same
code pattern we also implemented in i915 hwm_power_max_write.

In i915 first a scheme which held a mutex across GuC reset was
proposed. But that was then deemed to be risky and this complex scheme was
then implemented. Just to give some history.

Regarding editing the code, it's kernel code involving locking which needs
to be edited carefully, it's all confined to a single (or maybe a couple of
functions), but otherwise yes definitely not to mess around with :)

>
> It worried me already at the first part.
>
> Please, as Matt said, have a more linear locking here.

Afaiac I don't know of any other race-free way to do this other than what
was done in v2 (and in i915). So I want to discard the changes made in v3
and go back to the changes made in v2. If others can suggest other ways
that which they can guarantee are race-free and correct and can R-b that
code, that's fine.

But I can R-b the v2 code (with the only change of moving
xe_device_mem_access_get out of the lock). (Of course I am only talking
about R-b'ing the above scheme, other review comments may be valid).

Badal, also, if there are questions about this scheme, maybe we should move
this to a separate patch as was done in i915? We can just return -EAGAIN as
in 1b44019a93e2.

Thanks.
--
Ashutosh
Nilawar, Badal Aug. 18, 2023, 4:03 a.m. UTC | #5
On 16-08-2023 04:50, Dixit, Ashutosh wrote:
> On Thu, 29 Jun 2023 07:09:31 -0700, Andi Shyti wrote:
>>
> 
> Hi Badal/Andi/Matt,
> 
>>>> +static int hwm_power_max_write(struct hwm_drvdata *ddat, long value)
>>>> +{
>>>> +	struct xe_hwmon *hwmon = ddat->hwmon;
>>>> +	DEFINE_WAIT(wait);
>>>> +	int ret = 0;
>>>> +	u32 nval;
>>>> +
>>>> +	/* Block waiting for GuC reset to complete when needed */
>>>> +	for (;;) {
>>
>> with a do...while() you shouldn't need a for(;;)... your choice,
>> not going to beat on that.
>>
>>>> +		mutex_lock(&hwmon->hwmon_lock);
>>>> +
>>>> +		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
>>>> +
>>>> +		if (!hwmon->ddat.reset_in_progress)
>>>> +			break;
>>>> +
>>>> +		if (signal_pending(current)) {
>>>> +			ret = -EINTR;
>>>> +			break;
>>
>> cough! cough! unlock! cough! cough!
> 
> Why? It's fine as is.
> 
>>
>>>> +		}
>>>> +
>>>> +		mutex_unlock(&hwmon->hwmon_lock);
>>>> +
>>>> +		schedule();
>>>> +	}
>>>> +	finish_wait(&ddat->waitq, &wait);
>>>> +	if (ret)
>>>> +		goto unlock;
>>>
>>> Anyway to not open code this? We similar in with
>>> xe_guc_submit_reset_wait, could we expose a global reset in progress in
>>> function which we can expose at the gt level?
> 
> I don't know of any way to not open code this which is guaranteed to not
> deadlock (not to say there are no other ways).
> 
>>>
>>>> +
>>>> +	xe_device_mem_access_get(gt_to_xe(ddat->gt));
>>>> +
>>>
>>> This certainly is an outer most thing, e.g. doing this under
>>> hwmon->hwmon_lock seems dangerous. Again the upper levels of the stack
>>> should do the xe_device_mem_access_get, which it does. Just pointing out
>>> doing xe_device_mem_access_get/put under a lock isn't a good idea.
> 
> Agree, this is the only change we should make to this code.
> 
>>>
>>> Also the the loop which acquires hwmon->hwmon_lock is confusing too.
> 
> Confusing but correct.
> 
>>>
>>>> +	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
>>>> +	if (value == PL1_DISABLE) {
>>>> +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
>>>> +				  PKG_PWR_LIM_1_EN, 0);
>>>> +		process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &nval,
>>>> +				  PKG_PWR_LIM_1_EN, 0);
>>>> +
>>>> +		if (nval & PKG_PWR_LIM_1_EN)
>>>> +			ret = -ENODEV;
>>>> +		goto exit;
>>
>> cough! cough! lock! cough! cough!
> 
> Why? It's fine as is.
> 
>>
>>>> +	}
>>>> +
>>>> +	/* Computation in 64-bits to avoid overflow. Round to nearest. */
>>>> +	nval = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
>>>> +	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
>>>> +
>>>> +	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
>>>> +			  PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
>>>> +exit:
>>>> +	xe_device_mem_access_put(gt_to_xe(ddat->gt));
>>>> +unlock:
>>>> +	mutex_unlock(&hwmon->hwmon_lock);
>>>> +
>>
>> mmhhh???... jokes apart this is so error prone that it will
>> deadlock as soon as someone will think of editing this file :)
> 
> Why is it error prone and how will it deadlock? In fact this
> prepare_to_wait/finish_wait pattern is widely used in the kernel (see
> e.g. rpm_suspend) and is one of the few patterns guaranteed to not deadlock
> (see also 6.2.5 "Advanced Sleeping" in LDD3 if needed). This is the same
> code pattern we also implemented in i915 hwm_power_max_write.
> 
> In i915 first a scheme which held a mutex across GuC reset was
> proposed. But that was then deemed to be risky and this complex scheme was
> then implemented. Just to give some history.
> 
> Regarding editing the code, it's kernel code involving locking which needs
> to be edited carefully, it's all confined to a single (or maybe a couple of
> functions), but otherwise yes definitely not to mess around with :)
> 
>>
>> It worried me already at the first part.
>>
>> Please, as Matt said, have a more linear locking here.
> 
> Afaiac I don't know of any other race-free way to do this other than what
> was done in v2 (and in i915). So I want to discard the changes made in v3
> and go back to the changes made in v2. If others can suggest other ways
> that which they can guarantee are race-free and correct and can R-b that
> code, that's fine.
> 
> But I can R-b the v2 code (with the only change of moving
> xe_device_mem_access_get out of the lock). (Of course I am only talking
> about R-b'ing the above scheme, other review comments may be valid).
> 
> Badal, also, if there are questions about this scheme, maybe we should move
> this to a separate patch as was done in i915? We can just return -EAGAIN as
> in 1b44019a93e2.

Thanks Ashutosh. I think for now I will drop changes for "PL1 disable 
during GuC load" from this series and will handle it in separate patch.

Regards,
Badal
> Thanks.
> --
> Ashutosh
Andi Shyti Aug. 18, 2023, 1:55 p.m. UTC | #6
Hi Ashutosh,

> > >
> > > Also the the loop which acquires hwmon->hwmon_lock is confusing too.
> 
> Confusing but correct.

Confusing is implicitely not correct.

Might be correct now, but in some moenths someone else might miss
the point because it's confusing, mis-interpret and send the
wrong code.

Reviewers, going through the +/- commit will have a tough time
figuring out.

Let's keep things easy and simple... there are easier paths for
locking here and we've been discussing them with Badal.

Andi
diff mbox series

Patch

diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
new file mode 100644
index 000000000000..ff3465195870
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -0,0 +1,22 @@ 
+What:		/sys/devices/.../hwmon/hwmon<i>/power1_max
+Date:		July 2023
+KernelVersion:	6.3
+Contact:	intel-gfx@lists.freedesktop.org
+Description:	RW. Card reactive sustained  (PL1/Tau) power limit in microwatts.
+
+		The power controller will throttle the operating frequency
+		if the power averaged over a window (typically seconds)
+		exceeds this limit. A read value of 0 means that the PL1
+		power limit is disabled, writing 0 disables the
+		limit. Writing values > 0 will enable the power limit.
+
+		Only supported for particular Intel xe graphics platforms.
+
+What:		/sys/devices/.../hwmon/hwmon<i>/power1_rated_max
+Date:		July 2023
+KernelVersion:	6.3
+Contact:	intel-gfx@lists.freedesktop.org
+Description:	RO. Card default power limit (default TDP setting).
+
+		Only supported for particular Intel xe graphics platforms.
+
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index d654f3311351..eb7210afbd2c 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -397,4 +397,8 @@ 
 #define XEHPC_BCS5_BCS6_INTR_MASK		XE_REG(0x190118)
 #define XEHPC_BCS7_BCS8_INTR_MASK		XE_REG(0x19011c)
 
+#define PVC_GT0_PACKAGE_RAPL_LIMIT		XE_REG(0x281008)
+#define PVC_GT0_PACKAGE_POWER_SKU_UNIT		XE_REG(0x281068)
+#define PVC_GT0_PACKAGE_POWER_SKU		XE_REG(0x281080)
+
 #endif
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
new file mode 100644
index 000000000000..cb2d49b5c8a9
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -0,0 +1,34 @@ 
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MCHBAR_REGS_H__
+#define _XE_MCHBAR_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way.
+ */
+
+#define MCHBAR_MIRROR_BASE_SNB			0x140000
+
+#define PCU_CR_PACKAGE_POWER_SKU		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5930)
+#define   PKG_PKG_TDP				GENMASK_ULL(14, 0)
+#define   PKG_MIN_PWR				GENMASK_ULL(30, 16)
+#define   PKG_MAX_PWR				GENMASK_ULL(46, 32)
+
+#define PCU_CR_PACKAGE_POWER_SKU_UNIT		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
+#define   PKG_PWR_UNIT				REG_GENMASK(3, 0)
+
+#define PCU_CR_PACKAGE_RAPL_LIMIT		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
+#define   PKG_PWR_LIM_1				REG_GENMASK(14, 0)
+#define   PKG_PWR_LIM_1_EN			REG_BIT(15)
+
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 8f653fdf4ad5..a4fba29d5d5a 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -5,53 +5,394 @@ 
 
 #include <linux/hwmon.h>
 
+#include "regs/xe_mchbar_regs.h"
 #include "regs/xe_gt_regs.h"
 #include "xe_device.h"
 #include "xe_hwmon.h"
+#include "xe_mmio.h"
+#include "xe_gt.h"
+
+enum hwm_reg_name {
+	pkg_rapl_limit,
+	pkg_power_sku,
+	pkg_power_sku_unit,
+};
+
+enum hwm_reg_operation {
+	reg_read,
+	reg_write,
+	reg_rmw,
+};
+
+/*
+ * SF_* - scale factors for particular quantities according to hwmon spec.
+ * - power  - microwatts
+ */
+#define SF_POWER	1000000
 
 struct hwm_drvdata {
 	struct xe_hwmon *hwmon;
 	struct device *hwmon_dev;
+	struct xe_gt *gt;
 	char name[12];
+	bool reset_in_progress;
+	wait_queue_head_t waitq;
 };
 
 struct xe_hwmon {
 	struct hwm_drvdata ddat;
 	struct mutex hwmon_lock;
+	int scl_shift_power;
 };
 
+struct xe_reg hwm_get_reg(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name)
+{
+	struct xe_device *xe = gt_to_xe(ddat->gt);
+
+	switch (reg_name) {
+	case pkg_rapl_limit:
+		if (xe->info.platform == XE_DG2)
+			return PCU_CR_PACKAGE_RAPL_LIMIT;
+		else if (xe->info.platform == XE_PVC)
+			return PVC_GT0_PACKAGE_RAPL_LIMIT;
+		break;
+	case pkg_power_sku:
+		if (xe->info.platform == XE_DG2)
+			return PCU_CR_PACKAGE_POWER_SKU;
+		else if (xe->info.platform == XE_PVC)
+			return PVC_GT0_PACKAGE_POWER_SKU;
+		break;
+	case pkg_power_sku_unit:
+		if (xe->info.platform == XE_DG2)
+			return PCU_CR_PACKAGE_POWER_SKU_UNIT;
+		else if (xe->info.platform == XE_PVC)
+			return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
+		break;
+	default:
+		break;
+	}
+
+	return XE_REG(0);
+}
+
+int process_hwmon_reg(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name,
+		      enum hwm_reg_operation operation, u32 *value,
+		      u32 clr, u32 set)
+{
+	struct xe_reg reg;
+	int ret = 0;
+
+	reg = hwm_get_reg(ddat, reg_name);
+
+	if (!reg.raw)
+		return -EOPNOTSUPP;
+
+	switch (operation) {
+	case reg_read:
+		*value = xe_mmio_read32(ddat->gt, reg);
+		break;
+	case reg_write:
+		xe_mmio_write32(ddat->gt, reg, *value);
+		break;
+	case reg_rmw:
+		*value = xe_mmio_rmw32(ddat->gt, reg, clr, set);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+int process_hwmon_reg_read64(struct hwm_drvdata *ddat, enum hwm_reg_name reg_name, u64 *value)
+{
+	struct xe_reg reg;
+
+	reg = hwm_get_reg(ddat, reg_name);
+
+	if (!reg.raw)
+		return -EOPNOTSUPP;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
+	*value = xe_mmio_read64(ddat->gt, reg);
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+
+	return 0;
+}
+
+#define PL1_DISABLE 0
+
+/*
+ * HW allows arbitrary PL1 limits to be set but silently clamps these values to
+ * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
+ * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
+ * clamped values when read.
+ */
+static int hwm_power_max_read(struct hwm_drvdata *ddat, long *value)
+{
+	struct xe_hwmon *hwmon = ddat->hwmon;
+	u32 reg_val;
+	u64 r, min, max;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
+	process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &reg_val, 0, 0);
+	/* Check if PL1 limit is disabled */
+	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
+		*value = PL1_DISABLE;
+		xe_device_mem_access_put(gt_to_xe(ddat->gt));
+		return 0;
+	}
+
+	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
+	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+
+	process_hwmon_reg_read64(ddat, pkg_power_sku, &r);
+	min = REG_FIELD_GET(PKG_MIN_PWR, r);
+	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
+	max = REG_FIELD_GET(PKG_MAX_PWR, r);
+	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
+
+	if (min && max)
+		*value = clamp_t(u64, *value, min, max);
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+	return 0;
+}
+
+static int hwm_power_max_write(struct hwm_drvdata *ddat, long value)
+{
+	struct xe_hwmon *hwmon = ddat->hwmon;
+	DEFINE_WAIT(wait);
+	int ret = 0;
+	u32 nval;
+
+	/* Block waiting for GuC reset to complete when needed */
+	for (;;) {
+		mutex_lock(&hwmon->hwmon_lock);
+
+		prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
+
+		if (!hwmon->ddat.reset_in_progress)
+			break;
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		mutex_unlock(&hwmon->hwmon_lock);
+
+		schedule();
+	}
+	finish_wait(&ddat->waitq, &wait);
+	if (ret)
+		goto unlock;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
+	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
+	if (value == PL1_DISABLE) {
+		process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
+				  PKG_PWR_LIM_1_EN, 0);
+		process_hwmon_reg(ddat, pkg_rapl_limit, reg_read, &nval,
+				  PKG_PWR_LIM_1_EN, 0);
+
+		if (nval & PKG_PWR_LIM_1_EN)
+			ret = -ENODEV;
+		goto exit;
+	}
+
+	/* Computation in 64-bits to avoid overflow. Round to nearest. */
+	nval = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
+	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
+
+	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &nval,
+			  PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
+exit:
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+unlock:
+	mutex_unlock(&hwmon->hwmon_lock);
+
+	return 0;
+}
+
+static int hwm_power_rated_max_read(struct hwm_drvdata *ddat, long *value)
+{
+	struct xe_hwmon *hwmon = ddat->hwmon;
+	u32 reg_val;
+
+	process_hwmon_reg(ddat, pkg_power_sku, reg_read, &reg_val, 0, 0);
+	reg_val = REG_FIELD_GET(PKG_PKG_TDP, reg_val);
+	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+
+	return 0;
+}
+
 static const struct hwmon_channel_info *hwm_info[] = {
+	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX),
 	NULL
 };
 
+static umode_t
+hwm_power_is_visible(struct hwm_drvdata *ddat, u32 attr, int chan)
+{
+	u32 reg_val;
+
+	switch (attr) {
+	case hwmon_power_max:
+		return process_hwmon_reg(ddat, pkg_rapl_limit,
+					 reg_read, &reg_val, 0, 0) ? 0 : 0664;
+	case hwmon_power_rated_max:
+		return process_hwmon_reg(ddat, pkg_power_sku,
+					 reg_read, &reg_val, 0, 0) ? 0 : 0444;
+	default:
+		return 0;
+	}
+}
+
+static int
+hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
+{
+	switch (attr) {
+	case hwmon_power_max:
+		return hwm_power_max_read(ddat, val);
+	case hwmon_power_rated_max:
+		return hwm_power_rated_max_read(ddat, val);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
+{
+	switch (attr) {
+	case hwmon_power_max:
+		return hwm_power_max_write(ddat, val);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old)
+{
+	struct xe_hwmon *hwmon = xe->hwmon;
+	struct hwm_drvdata *ddat = &hwmon->ddat;
+	u32 r;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
+	if (!hwmon || process_hwmon_reg(ddat, pkg_rapl_limit,
+					reg_read, &r, 0, 0))
+		return;
+
+	mutex_lock(&hwmon->hwmon_lock);
+
+	hwmon->ddat.reset_in_progress = true;
+
+	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &r,
+			  PKG_PWR_LIM_1_EN, 0);
+	*old = !!(r & PKG_PWR_LIM_1_EN);
+
+	mutex_unlock(&hwmon->hwmon_lock);
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+}
+
+void xe_hwmon_power_max_restore(struct xe_device *xe, bool old)
+{
+	struct xe_hwmon *hwmon = xe->hwmon;
+	struct hwm_drvdata *ddat = &hwmon->ddat;
+	u32 r;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
+	if (!hwmon || process_hwmon_reg(ddat, pkg_rapl_limit,
+					reg_read, &r, 0, 0))
+		return;
+
+	mutex_lock(&hwmon->hwmon_lock);
+
+	process_hwmon_reg(ddat, pkg_rapl_limit, reg_rmw, &r,
+			  PKG_PWR_LIM_1_EN, old ? PKG_PWR_LIM_1_EN : 0);
+
+	hwmon->ddat.reset_in_progress = false;
+	wake_up_all(&hwmon->ddat.waitq);
+
+	mutex_unlock(&hwmon->hwmon_lock);
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+}
+
 static umode_t
 hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
 	       u32 attr, int channel)
 {
+	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
+	int ret;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
 	switch (type) {
+	case hwmon_power:
+		ret = hwm_power_is_visible(ddat, attr, channel);
+		break;
 	default:
-		return 0;
+		ret = 0;
 	}
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+
+	return ret;
 }
 
 static int
 hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
 	 int channel, long *val)
 {
+	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
+	int ret;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
 	switch (type) {
+	case hwmon_power:
+		ret = hwm_power_read(ddat, attr, channel, val);
+		break;
 	default:
-		return -EOPNOTSUPP;
+		ret = -EOPNOTSUPP;
+		break;
 	}
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+
+	return ret;
 }
 
 static int
 hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
 	  int channel, long val)
 {
+	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
+	int ret;
+
+	xe_device_mem_access_get(gt_to_xe(ddat->gt));
+
 	switch (type) {
+	case hwmon_power:
+		ret = hwm_power_write(ddat, attr, channel, val);
+		break;
 	default:
-		return -EOPNOTSUPP;
+		ret = -EOPNOTSUPP;
+		break;
 	}
+
+	xe_device_mem_access_put(gt_to_xe(ddat->gt));
+
+	return ret;
 }
 
 static const struct hwmon_ops hwm_ops = {
@@ -66,8 +407,19 @@  static const struct hwmon_chip_info hwm_chip_info = {
 };
 
 static void
-hwm_get_preregistration_info(struct xe_device *xe)
+hwm_get_preregistration_info(struct hwm_drvdata *ddat)
 {
+	struct xe_hwmon *hwmon = ddat->hwmon;
+	u32 val_sku_unit = 0;
+	int ret;
+
+	ret = process_hwmon_reg(ddat, pkg_power_sku_unit, reg_read, &val_sku_unit, 0, 0);
+	/*
+	 * The contents of register pkg_power_sku_unit do not change,
+	 * so read it once and store the shift values.
+	 */
+	if (!ret)
+		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
 }
 
 void xe_hwmon_register(struct xe_device *xe)
@@ -89,18 +441,24 @@  void xe_hwmon_register(struct xe_device *xe)
 	mutex_init(&hwmon->hwmon_lock);
 	ddat = &hwmon->ddat;
 
+	/* primary GT to access device level properties */
+	ddat->gt = xe->tiles[0].primary_gt;
+
 	ddat->hwmon = hwmon;
 	snprintf(ddat->name, sizeof(ddat->name), "xe");
 
-	hwm_get_preregistration_info(xe);
+	init_waitqueue_head(&ddat->waitq);
 
-	drm_dbg(&xe->drm, "Register HWMON interface\n");
+	hwm_get_preregistration_info(ddat);
 
-	/*  hwmon_dev points to device hwmon<i> */
+	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
+
+	/* hwmon_dev points to device hwmon<i> */
 	hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
 							 ddat,
 							 &hwm_chip_info,
 							 NULL);
+
 	if (IS_ERR(hwmon_dev)) {
 		drm_warn(&xe->drm, "Fail to register xe hwmon\n");
 		xe->hwmon = NULL;
diff --git a/drivers/gpu/drm/xe/xe_hwmon.h b/drivers/gpu/drm/xe/xe_hwmon.h
index a078eeb0a68b..a5dc693569c5 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.h
+++ b/drivers/gpu/drm/xe/xe_hwmon.h
@@ -14,9 +14,13 @@  struct xe_device;
 #if IS_REACHABLE(CONFIG_HWMON)
 void xe_hwmon_register(struct xe_device *xe);
 void xe_hwmon_unregister(struct xe_device *xe);
+void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old);
+void xe_hwmon_power_max_restore(struct xe_device *xe, bool old);
 #else
 static inline void xe_hwmon_register(struct xe_device *xe) { };
 static inline void xe_hwmon_unregister(struct xe_device *xe) { };
+static inline void xe_hwmon_power_max_disable(struct xe_device *xe, bool *old) { };
+static inline void xe_hwmon_power_max_restore(struct xe_device *xe, bool old) { };
 #endif
 
 #endif /* __XE_HWMON_H__ */
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 70eabf567156..9df5a3a85dc3 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -13,6 +13,7 @@ 
 #include "xe_huc.h"
 #include "xe_uc_fw.h"
 #include "xe_wopcm.h"
+#include "xe_hwmon.h"
 
 static struct xe_gt *
 uc_to_gt(struct xe_uc *uc)
@@ -127,11 +128,15 @@  int xe_uc_init_hwconfig(struct xe_uc *uc)
 int xe_uc_init_hw(struct xe_uc *uc)
 {
 	int ret;
+	bool pl1en;
 
 	/* GuC submission not enabled, nothing to do */
 	if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
 		return 0;
 
+	/* Disable a potentially low PL1 power limit to allow freq to be raised */
+	xe_hwmon_power_max_disable(uc_to_xe(uc), &pl1en);
+
 	ret = xe_uc_sanitize_reset(uc);
 	if (ret)
 		return ret;
@@ -160,6 +165,7 @@  int xe_uc_init_hw(struct xe_uc *uc)
 	if (ret)
 		return ret;
 
+	xe_hwmon_power_max_restore(uc_to_xe(uc), pl1en);
 	/* We don't fail the driver load if HuC fails to auth, but let's warn */
 	ret = xe_huc_auth(&uc->huc);
 	XE_WARN_ON(ret);