@@ -411,6 +411,42 @@ struct amdgpu_dm_trace_pipe_state {
unsigned int update_flags;
};
+struct amdgpu_dm_trace_dc_clocks_state {
+ int dispclk_khz;
+ int dppclk_khz;
+ int disp_dpp_voltage_level_khz;
+ int dcfclk_khz;
+ int socclk_khz;
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
+ int phyclk_khz;
+ int dramclk_khz;
+ bool p_state_change_support;
+ int pwr_state;
+ bool prev_p_state_change_support;
+ int dtm_level;
+ int max_supported_dppclk_khz;
+ int max_supported_dispclk_khz;
+ int bw_dppclk_khz;
+ int bw_dispclk_khz;
+ int safe_to_lower;
+};
+
+struct amdgpu_dm_trace_dce_clocks_state {
+ bool cpuc_state_change_enable;
+ bool cpup_state_change_enable;
+ bool stutter_mode_enable;
+ bool nbp_state_change_enable;
+ bool all_displays_in_sync;
+ int sclk_khz;
+ int sclk_deep_sleep_khz;
+ int yclk_khz;
+ int dispclk_khz;
+ int blackout_recovery_time_us;
+ int patched_disp_clk;
+ int safe_to_lower;
+};
+
#define fill_out_trace_pipe_state(trace_pipe_state, pipe_ctx) \
do { \
trace_pipe_state.pipe_idx = (pipe_ctx)->pipe_idx; \
@@ -444,6 +480,44 @@ struct amdgpu_dm_trace_pipe_state {
trace_pipe_state.update_flags = (pipe_ctx)->update_flags.raw; \
} while (0)
+#define fill_out_trace_clock_state(trace_clock_state, clocks, safe_to_lower) \
+ do { \
+ trace_clock_state.dispclk_khz = (clocks)->dispclk_khz; \
+ trace_clock_state.dppclk_khz = (clocks)->dppclk_khz; \
+ trace_clock_state.disp_dpp_voltage_level_khz = (clocks)->disp_dpp_voltage_level_khz; \
+ trace_clock_state.dcfclk_khz = (clocks)->dcfclk_khz; \
+ trace_clock_state.socclk_khz = (clocks)->socclk_khz; \
+ trace_clock_state.dcfclk_deep_sleep_khz = (clocks)->dcfclk_deep_sleep_khz; \
+ trace_clock_state.fclk_khz = (clocks)->fclk_khz; \
+ trace_clock_state.phyclk_khz = (clocks)->phyclk_khz; \
+ trace_clock_state.dramclk_khz = (clocks)->dramclk_khz; \
+ trace_clock_state.p_state_change_support = (clocks)->p_state_change_support; \
+ trace_clock_state.pwr_state = (clocks)->pwr_state; \
+ trace_clock_state.prev_p_state_change_support = (clocks)->prev_p_state_change_support; \
+ trace_clock_state.dtm_level = (clocks)->dtm_level; \
+ trace_clock_state.max_supported_dppclk_khz = (clocks)->max_supported_dppclk_khz; \
+ trace_clock_state.max_supported_dispclk_khz = (clocks)->max_supported_dispclk_khz; \
+ trace_clock_state.bw_dppclk_khz = (clocks)->bw_dppclk_khz; \
+ trace_clock_state.bw_dispclk_khz = (clocks)->bw_dispclk_khz; \
+ trace_clock_state.safe_to_lower = safe_to_lower; \
+ } while (0)
+
+#define fill_out_trace_dce_clock_state(trace_clock_state, clocks, patched_disp_clk, safe_to_lower) \
+ do { \
+ trace_clock_state.cpuc_state_change_enable = (clocks)->cpuc_state_change_enable; \
+ trace_clock_state.cpup_state_change_enable = (clocks)->cpup_state_change_enable; \
+ trace_clock_state.stutter_mode_enable = (clocks)->stutter_mode_enable; \
+ trace_clock_state.nbp_state_change_enable = (clocks)->nbp_state_change_enable; \
+ trace_clock_state.all_displays_in_sync = (clocks)->all_displays_in_sync; \
+ trace_clock_state.sclk_khz = (clocks)->sclk_khz; \
+ trace_clock_state.sclk_deep_sleep_khz = (clocks)->sclk_deep_sleep_khz; \
+ trace_clock_state.yclk_khz = (clocks)->yclk_khz; \
+ trace_clock_state.dispclk_khz = (clocks)->dispclk_khz; \
+ trace_clock_state.blackout_recovery_time_us = (clocks)->blackout_recovery_time_us; \
+ trace_clock_state.patched_disp_clk = patched_disp_clk; \
+ trace_clock_state.safe_to_lower = safe_to_lower; \
+ } while (0)
+
#endif /* _AMDGPU_DM_TRACE_STRUCTS_DEFINED_ */
TRACE_EVENT(amdgpu_dm_dc_pipe_state,
@@ -548,6 +622,130 @@ TRACE_EVENT(amdgpu_dm_dc_pipe_state,
)
);
+TRACE_EVENT(amdgpu_dm_dc_clocks_state,
+ TP_PROTO(const struct amdgpu_dm_trace_dc_clocks_state *clk),
+ TP_ARGS(clk),
+
+ TP_STRUCT__entry(
+ __field(int, dispclk_khz)
+ __field(int, dppclk_khz)
+ __field(int, disp_dpp_voltage_level_khz)
+ __field(int, dcfclk_khz)
+ __field(int, socclk_khz)
+ __field(int, dcfclk_deep_sleep_khz)
+ __field(int, fclk_khz)
+ __field(int, phyclk_khz)
+ __field(int, dramclk_khz)
+ __field(int, p_state_change_support)
+ __field(int, prev_p_state_change_support)
+ __field(int, pwr_state)
+ __field(int, dtm_level)
+ __field(int, max_supported_dppclk_khz)
+ __field(int, max_supported_dispclk_khz)
+ __field(int, bw_dppclk_khz)
+ __field(int, bw_dispclk_khz)
+ __field(bool, safe_to_lower)
+ ),
+ TP_fast_assign(
+ __entry->dispclk_khz = clk->dispclk_khz;
+ __entry->dppclk_khz = clk->dppclk_khz;
+ __entry->dcfclk_khz = clk->dcfclk_khz;
+ __entry->socclk_khz = clk->socclk_khz;
+ __entry->dcfclk_deep_sleep_khz = clk->dcfclk_deep_sleep_khz;
+ __entry->fclk_khz = clk->fclk_khz;
+ __entry->phyclk_khz = clk->phyclk_khz;
+ __entry->dramclk_khz = clk->dramclk_khz;
+ __entry->p_state_change_support = clk->p_state_change_support;
+ __entry->prev_p_state_change_support = clk->prev_p_state_change_support;
+ __entry->pwr_state = clk->pwr_state;
+ __entry->prev_p_state_change_support = clk->prev_p_state_change_support;
+ __entry->dtm_level = clk->dtm_level;
+ __entry->max_supported_dppclk_khz = clk->max_supported_dppclk_khz;
+ __entry->max_supported_dispclk_khz = clk->max_supported_dispclk_khz;
+ __entry->bw_dppclk_khz = clk->bw_dppclk_khz;
+ __entry->bw_dispclk_khz = clk->bw_dispclk_khz;
+ __entry->safe_to_lower = clk->safe_to_lower;
+ ),
+ TP_printk("dispclk_khz=%d dppclk_khz=%d disp_dpp_voltage_level_khz=%d dcfclk_khz=%d socclk_khz=%d "
+ "dcfclk_deep_sleep_khz=%d fclk_khz=%d phyclk_khz=%d "
+ "dramclk_khz=%d p_state_change_support=%d "
+ "prev_p_state_change_support=%d pwr_state=%d prev_p_state_change_support=%d "
+ "dtm_level=%d max_supported_dppclk_khz=%d max_supported_dispclk_khz=%d "
+ "bw_dppclk_khz=%d bw_dispclk_khz=%d "
+ "safe_to_lower=%d ",
+ __entry->dispclk_khz,
+ __entry->dppclk_khz,
+ __entry->disp_dpp_voltage_level_khz,
+ __entry->dcfclk_khz,
+ __entry->socclk_khz,
+ __entry->dcfclk_deep_sleep_khz,
+ __entry->fclk_khz,
+ __entry->phyclk_khz,
+ __entry->dramclk_khz,
+ __entry->p_state_change_support,
+ __entry->prev_p_state_change_support,
+ __entry->pwr_state,
+ __entry->prev_p_state_change_support,
+ __entry->dtm_level,
+ __entry->max_supported_dppclk_khz,
+ __entry->max_supported_dispclk_khz,
+ __entry->bw_dppclk_khz,
+ __entry->bw_dispclk_khz,
+ __entry->safe_to_lower
+ )
+);
+
+TRACE_EVENT(amdgpu_dm_dce_clocks_state,
+ TP_PROTO(const struct amdgpu_dm_trace_dce_clocks_state *clk),
+ TP_ARGS(clk),
+
+ TP_STRUCT__entry(
+ __field(bool, cpuc_state_change_enable)
+ __field(bool, cpup_state_change_enable)
+ __field(bool, stutter_mode_enable)
+ __field(bool, nbp_state_change_enable)
+ __field(bool, all_displays_in_sync)
+ __field(int, sclk_khz)
+ __field(int, sclk_deep_sleep_khz)
+ __field(int, yclk_khz)
+ __field(int, dispclk_khz)
+ __field(int, blackout_recovery_time_us)
+ __field(int, patched_disp_clk)
+ __field(int, safe_to_lower)
+ ),
+ TP_fast_assign(
+ __entry->cpuc_state_change_enable = clk->cpuc_state_change_enable;
+ __entry->cpup_state_change_enable = clk->cpup_state_change_enable;
+ __entry->stutter_mode_enable = clk->stutter_mode_enable;
+ __entry->nbp_state_change_enable = clk->nbp_state_change_enable;
+ __entry->all_displays_in_sync = clk->all_displays_in_sync;
+ __entry->sclk_khz = clk->sclk_khz;
+ __entry->sclk_deep_sleep_khz = clk->sclk_deep_sleep_khz;
+ __entry->yclk_khz = clk->yclk_khz;
+ __entry->dispclk_khz = clk->dispclk_khz;
+ __entry->blackout_recovery_time_us = clk->blackout_recovery_time_us;
+ __entry->patched_disp_clk = clk->patched_disp_clk;
+ __entry->safe_to_lower = clk->safe_to_lower;
+ ),
+ TP_printk("cpuc_state_change_enable=%d cpup_state_change_enable=%d stutter_mode_enable=%d "
+ "nbp_state_change_enable=%d all_displays_in_sync=%d sclk_khz=%d sclk_deep_sleep_khz=%d "
+ "yclk_khz=%d dispclk_khz=%d blackout_recovery_time_us=%d patched_disp_clk=%d "
+ "safe_to_lower=%d",
+ __entry->cpuc_state_change_enable,
+ __entry->cpup_state_change_enable,
+ __entry->stutter_mode_enable,
+ __entry->nbp_state_change_enable,
+ __entry->all_displays_in_sync,
+ __entry->sclk_khz,
+ __entry->sclk_deep_sleep_khz,
+ __entry->yclk_khz,
+ __entry->dispclk_khz,
+ __entry->blackout_recovery_time_us,
+ __entry->patched_disp_clk,
+ __entry->safe_to_lower
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
@@ -195,8 +195,10 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr_base,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dce_bw_output *dce_clocks = &context->bw_ctx.bw.dce;
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+ struct amdgpu_dm_trace_dce_clocks_state trace;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -210,6 +212,9 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
+ fill_out_trace_dce_clock_state(trace, dce_clocks, patched_disp_clk, safe_to_lower);
+ trace_amdgpu_dm_dce_clocks_state(&trace);
+
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
patched_disp_clk = dce112_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
@@ -196,6 +196,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_debug_options *debug = &dc->debug;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_funcs_rv *pp_smu = NULL;
+ struct amdgpu_dm_trace_dc_clocks_state trace;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
@@ -211,6 +212,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+ fill_out_trace_clock_state(trace, new_clocks, safe_to_lower);
+ trace_amdgpu_dm_dc_clocks_state(&trace);
+
if (display_count == 0)
enter_display_off = true;
@@ -148,6 +148,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct amdgpu_dm_trace_dc_clocks_state trace;
struct dc *dc = clk_mgr_base->ctx->dc;
struct pp_smu_funcs_nv *pp_smu = NULL;
int display_count;
@@ -247,6 +248,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.disp_dpp_voltage_level_khz / 1000);
}
+ fill_out_trace_clock_state(trace, new_clocks, safe_to_lower);
+ trace_amdgpu_dm_dc_clocks_state(&trace);
+
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
@@ -109,6 +109,7 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct amdgpu_dm_trace_dc_clocks_state trace;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
@@ -120,6 +121,9 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ fill_out_trace_clock_state(trace, new_clocks, safe_to_lower);
+ trace_amdgpu_dm_dc_clocks_state(&trace);
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -230,6 +230,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct amdgpu_dm_trace_dc_clocks_state trace;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
bool update_dppclk = false;
@@ -264,6 +265,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+ fill_out_trace_clock_state(trace, new_clocks, safe_to_lower);
+ trace_amdgpu_dm_dc_clocks_state(&trace);
+
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, clk_mgr_base->clks.dcfclk_khz / 1000);
@@ -669,9 +669,11 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
+ struct dce_bw_output *dce_clocks = &context->bw_ctx.bw.dce;
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+ struct amdgpu_dm_trace_dce_clocks_state trace;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -685,6 +687,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
+ fill_out_trace_dce_clock_state(trace, dce_clocks, patched_disp_clk, safe_to_lower);
+ trace_amdgpu_dm_dce_clocks_state(&trace);
+
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk);
clk_mgr->clks.dispclk_khz = patched_disp_clk;