@@ -7,6 +7,7 @@
#define _XE_DEVICE_TYPES_H_
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -42,6 +42,14 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define PCODE_D3_VRAM_SELF_REFRESH 0x71
+#define PCODE_D3_VRSR_SC_DISABLE 0x0
+#define PCODE_D3_VRSR_SC_ENABLE 0x1
+#define PCODE_D3_VRSR_SC_AUX_PL_AND_PERST_DELAY 0x2
+#define PCODE_D3_VRSR_PERST_SHIFT 16
+#define POWER_D3_VRSR_PSERST_MASK REG_GENMASK(31, 16)
+#define POWER_D3_VRSR_AUX_PL_MASK REG_GENMASK(15, 0)
+
#define PCODE_FREQUENCY_CONFIG 0x6e
/* Frequency Config Sub Commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
@@ -23,6 +23,7 @@
#include "xe_guc.h"
#include "xe_irq.h"
#include "xe_mmio.h"
+#include "xe_pcode_api.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
#include "regs/xe_regs.h"
@@ -261,6 +262,95 @@ static bool xe_pm_vrsr_capable(struct xe_device *xe)
return val & VRAM_SR_SUPPORTED;
}
+static int pci_acpi_aux_power_setup(struct xe_device *xe)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct pci_dev *root_pdev;
+ int ret;
+ u32 uval;
+ u32 aux_pwr_limit;
+ u32 perst_delay;
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (!root_pdev)
+ return -EINVAL;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_D3_VRAM_SELF_REFRESH,
+ PCODE_D3_VRSR_SC_AUX_PL_AND_PERST_DELAY, 0),
+ &uval, NULL);
+
+ if (ret)
+ return ret;
+
+ aux_pwr_limit = REG_FIELD_GET(POWER_D3_VRSR_AUX_PL_MASK, uval);
+ perst_delay = REG_FIELD_GET(POWER_D3_VRSR_PSERST_MASK, uval);
+
+ drm_dbg(&xe->drm, "AUX power limit =%d\n", aux_pwr_limit);
+ drm_dbg(&xe->drm, "PERST Assertion delay =%d\n", perst_delay);
+
+ ret = pci_acpi_request_d3cold_aux_power(root_pdev, aux_pwr_limit);
+ if (ret)
+ return ret;
+
+ ret = pci_acpi_add_perst_assertion_delay(root_pdev, perst_delay);
+
+ return ret;
+}
+
+static void xe_pm_vrsr_init(struct xe_device *xe)
+{
+ int ret;
+
+ /* Check if platform support d3cold vrsr */
+ if (!xe->info.has_vrsr)
+ return;
+
+ if (!xe_pm_vrsr_capable(xe))
+ return;
+
+ /*
+ * If the VRSR initialization fails, the device will proceed with the regular
+ * D3 Cold flow
+ */
+ ret = pci_acpi_aux_power_setup(xe);
+ if (ret) {
+ drm_info(&xe->drm, "VRSR capable %s\n", "No");
+ return;
+ }
+
+ xe->d3cold.vrsr_capable = true;
+ drm_info(&xe->drm, "VRSR capable %s\n", "Yes");
+}
+
+/**
+ * xe_pm_vrsr_enable - Enable VRAM self refresh
+ * @xe: The xe device.
+ * @enable: true: Enable, false: Disable
+ *
+ * This function enables the VRSR feature in D3Cold path.
+ *
+ * Return: It returns 0 on success and errno on failure.
+ */
+int xe_pm_vrsr_enable(struct xe_device *xe, bool enable)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ int ret;
+ u32 uval = 0;
+
+ if (!xe->d3cold.vrsr_capable)
+ return -ENXIO;
+
+ if (enable)
+ ret = xe_pcode_write(root_tile, PCODE_MBOX(PCODE_D3_VRAM_SELF_REFRESH,
+ PCODE_D3_VRSR_SC_ENABLE, 0), uval);
+ else
+ ret = xe_pcode_write(root_tile, PCODE_MBOX(PCODE_D3_VRAM_SELF_REFRESH,
+ PCODE_D3_VRSR_SC_DISABLE, 0), uval);
+
+ return ret;
+}
+
static void xe_pm_runtime_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -336,7 +426,7 @@ int xe_pm_init(struct xe_device *xe)
if (err)
return err;
- xe->d3cold.vrsr_capable = xe_pm_vrsr_capable(xe);
+ xe_pm_vrsr_init(xe);
}
xe_pm_runtime_init(xe);
@@ -35,4 +35,5 @@ bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
int xe_pm_module_init(void);
+int xe_pm_vrsr_enable(struct xe_device *xe, bool enable);
#endif