@@ -1142,6 +1142,8 @@ bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_device_baco_enter(struct drm_device *dev);
+int amdgpu_device_baco_exit(struct drm_device *dev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -4256,3 +4256,64 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
+int amdgpu_device_baco_enter(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (!amdgpu_device_supports_baco(adev->ddev))
+ return -ENOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ return 0;
+ }
+}
+
+int amdgpu_device_baco_exit(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (!amdgpu_device_supports_baco(adev->ddev))
+ return -ENOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ ret = smu_baco_exit(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+
+ return 0;
+ }
+}