diff mbox

[4/7] drm/i915/skl: New ddb allocation algorithm

Message ID 20160829123522.9532-5-mahesh1.kumar@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Kumar, Mahesh Aug. 29, 2016, 12:35 p.m. UTC
This patch implements new DDB allocation algorithm as per HW team
suggestion. This algo takecare of scenario where we allocate less DDB
for the planes with lower relative pixel rate, but they require more DDB
to work.
It also takes care of enabling same watermark level for each
plane, for efficient power saving.

Signed-off-by: Kumar, Mahesh <mahesh1.kumar@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 129 +++++++++++++++++++++-------------------
 1 file changed, 67 insertions(+), 62 deletions(-)

Comments

Maarten Lankhorst Aug. 31, 2016, 1:38 p.m. UTC | #1
Hey,

Op 29-08-16 om 14:35 schreef Kumar, Mahesh:
> This patch implements new DDB allocation algorithm as per HW team
> suggestion. This algo takecare of scenario where we allocate less DDB
> for the planes with lower relative pixel rate, but they require more DDB
> to work.
> It also takes care of enabling same watermark level for each
> plane, for efficient power saving.
Patch 2 and 3 look ok, I'm not a watermark expert though. :)

To be honest, skylake watermarks are very complicated with all the interdepencies. I can understand this helps,
but if you do separate per crtc atomic commits, it doesn't help a update for crtc1 may introduce a vblank wait on crtc2 and the other way around. This will drop the framerate to half.

Is there no way around it?

~Maarten
Kumar, Mahesh Aug. 31, 2016, 2:18 p.m. UTC | #2
Hi,
In this series, I'm not equalizing watermarks among planes of all 
CRTC's, these patches equalizing it only among Planes of single CRTC 
from DDB pool allocated for that CRTC, which doesn't require wait_for_vblank
So it'll not affect the FPS.

On Wednesday 31 August 2016 07:08 PM, Maarten Lankhorst wrote:
> Hey,
>
> Op 29-08-16 om 14:35 schreef Kumar, Mahesh:
>> This patch implements new DDB allocation algorithm as per HW team
>> suggestion. This algo takecare of scenario where we allocate less DDB
>> for the planes with lower relative pixel rate, but they require more DDB
>> to work.
>> It also takes care of enabling same watermark level for each
>> plane, for efficient power saving.
> Patch 2 and 3 look ok, I'm not a watermark expert though. :)
>
> To be honest, skylake watermarks are very complicated with all the interdepencies. I can understand this helps,
> but if you do separate per crtc atomic commits, it doesn't help a update for crtc1 may introduce a vblank wait on crtc2 and the other way around. This will drop the framerate to half.
>
> Is there no way around it?
>
> ~Maarten
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5fa02cb..9f69050 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3331,6 +3331,7 @@  skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 
 static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+		      struct skl_pipe_wm *pipe_wm,
 		      struct skl_ddb_allocation *ddb /* out */)
 {
 	struct drm_atomic_state *state = cstate->base.state;
@@ -3346,8 +3347,11 @@  skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	uint16_t *minimum = cstate->wm.skl.minimum_blocks;
 	uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
 	unsigned int total_data_rate;
+	uint16_t total_min_blocks = 0;
+	uint16_t total_level_ddb = 0;
 	int num_active;
-	int id, i;
+	int max_level, level;
+	int id, i, ret = 0;
 
 	if (WARN_ON(!state))
 		return 0;
@@ -3363,6 +3367,7 @@  skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0) {
 		memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+		memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
 		return 0;
 	}
 
@@ -3396,19 +3401,42 @@  skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	}
 
 	for (i = 0; i < PLANE_CURSOR; i++) {
-		alloc_size -= minimum[i];
-		alloc_size -= y_minimum[i];
+		total_min_blocks += minimum[i];
+		total_min_blocks += y_minimum[i];
 	}
 
-	/*
-	 * 2. Distribute the remaining space in proportion to the amount of
-	 * data each plane needs to fetch from memory.
-	 *
-	 * FIXME: we may not allocate every single block here.
-	 */
+	for (level = ilk_wm_max_level(dev); level >= 0; level--) {
+		total_level_ddb = 0;
+		for (i = 0; i < PLANE_CURSOR; i++) {
+			/*
+			 * TODO: We should calculate watermark values for Y/UV
+			 * plane both in case of NV12 format and use both values
+			 * for ddb calculation, As NV12 is disabled as of now.
+			 * using only single plane value here.
+			 */
+			uint16_t min = minimum[i] + y_minimum[i];
+			uint16_t plane_level_ddb_wm =
+				max(pipe_wm->wm[level].plane_res_b[i], min);
+			total_level_ddb += plane_level_ddb_wm;
+		}
+
+		if (total_level_ddb <= alloc_size)
+			break;
+	}
+
+	if ((level < 0) || (total_min_blocks > alloc_size)) {
+		DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
+		DRM_DEBUG_KMS("minimum required %d/%d\n", (level < 0) ?
+				total_level_ddb : total_min_blocks, alloc_size);
+		ret = -EINVAL;
+		goto exit;
+	}
+	max_level = level;
+	alloc_size -= total_level_ddb;
+
 	total_data_rate = skl_get_total_relative_data_rate(cstate);
 	if (total_data_rate == 0)
-		return 0;
+		goto exit;
 
 	start = alloc->start;
 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
@@ -3423,7 +3451,8 @@  skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		 * promote the expression to 64 bits to avoid overflowing, the
 		 * result is < available as data_rate / total_data_rate < 1
 		 */
-		plane_blocks = minimum[id];
+		plane_blocks = max(pipe_wm->wm[max_level].plane_res_b[id],
+					minimum[id]);
 		plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
 					total_data_rate);
 
@@ -3437,6 +3466,8 @@  skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
 		/*
 		 * allocation for y_plane part of planar format:
+		 * TODO: Once we start calculating watermark values for Y/UV
+		 * plane both consider it for initial allowed wm blocks.
 		 */
 		y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
 
@@ -3450,9 +3481,22 @@  skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		}
 
 		start += y_plane_blocks;
+
+		/*
+		 * Now enable all levels in WM structure which can be enabled
+		 * using current DDB allocation
+		 */
+		for (i = ilk_wm_max_level(dev); i > 0; i--) {
+			if (i > max_level || pipe_wm->wm[i].plane_res_l[id] > 31
+					|| pipe_wm->wm[i].plane_res_b[id] == 0)
+				pipe_wm->wm[i].plane_en[id] = false;
+			else
+				pipe_wm->wm[i].plane_en[id] = true;
+		}
 	}
 
-	return 0;
+exit:
+	return ret;
 }
 
 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3538,7 +3582,6 @@  static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				struct intel_crtc_state *cstate,
 				struct intel_plane_state *intel_pstate,
-				uint16_t ddb_allocation,
 				int level,
 				struct skl_pipe_wm *pipe_wm)
 {
@@ -3557,12 +3600,9 @@  static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	struct skl_wm_level *result = &pipe_wm->wm[level];
 	uint16_t *out_blocks = &result->plane_res_b[id];
 	uint8_t *out_lines = &result->plane_res_l[id];
-	bool *enabled = &result->plane_en[id];
 
-	if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
-		*enabled = false;
+	if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible)
 		return 0;
-	}
 
 	width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	height = drm_rect_height(&intel_pstate->base.src) >> 16;
@@ -3626,54 +3666,27 @@  static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 			res_blocks++;
 	}
 
-	if (res_blocks >= ddb_allocation || res_lines > 31) {
-		*enabled = false;
-
-		/*
-		 * If there are no valid level 0 watermarks, then we can't
-		 * support this display configuration.
-		 */
-		if (level) {
-			return 0;
-		} else {
-			DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
-			DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
-				      to_intel_crtc(cstate->base.crtc)->pipe,
-				      skl_wm_plane_id(to_intel_plane(pstate->plane)),
-				      res_blocks, ddb_allocation, res_lines);
-
-			return -EINVAL;
-		}
-	}
-
 	*out_blocks = res_blocks;
 	*out_lines = res_lines;
-	*enabled = true;
 
 	return 0;
 }
 
 static int
 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
-		     struct skl_ddb_allocation *ddb,
 		     struct intel_crtc_state *cstate,
 		     int level,
 		     struct skl_pipe_wm *pipe_wm)
 {
 	struct drm_atomic_state *state = cstate->base.state;
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	struct intel_plane_state *intel_pstate;
-	uint16_t ddb_blocks;
-	enum pipe pipe = intel_crtc->pipe;
 	int ret;
 
 	for_each_intel_plane_mask(&dev_priv->drm,
 				  intel_plane,
 				  cstate->base.plane_mask) {
-		int i = skl_wm_plane_id(intel_plane);
-
 		plane = &intel_plane->base;
 		intel_pstate = NULL;
 		if (state)
@@ -3699,12 +3712,9 @@  skl_compute_wm_level(const struct drm_i915_private *dev_priv,
 
 		WARN_ON(!intel_pstate->base.fb);
 
-		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
-
 		ret = skl_compute_plane_wm(dev_priv,
 					   cstate,
 					   intel_pstate,
-					   ddb_blocks,
 					   level,
 					   pipe_wm);
 		if (ret)
@@ -3761,11 +3771,15 @@  static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 	memset(pipe_wm, 0, sizeof(*pipe_wm));
 
 	for (level = 0; level <= max_level; level++) {
-		ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+		ret = skl_compute_wm_level(dev_priv, cstate,
 					   level, pipe_wm);
 		if (ret)
 			return ret;
 	}
+	ret = skl_allocate_pipe_ddb(cstate, pipe_wm, ddb);
+	if (ret)
+		return ret;
+
 	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
 
 	skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
@@ -3953,13 +3967,12 @@  pipes_modified(struct drm_atomic_state *state)
 }
 
 static int
-skl_compute_ddb(struct drm_atomic_state *state)
+skl_include_affected_pipes(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct intel_crtc *intel_crtc;
-	struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
 	uint32_t realloc_pipes = pipes_modified(state);
 	int ret;
 
@@ -4012,10 +4025,6 @@  skl_compute_ddb(struct drm_atomic_state *state)
 		if (IS_ERR(cstate))
 			return PTR_ERR(cstate);
 
-		ret = skl_allocate_pipe_ddb(cstate, ddb);
-		if (ret)
-			return ret;
-
 		ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
 		if (ret)
 			return ret;
@@ -4069,19 +4078,15 @@  skl_compute_wm(struct drm_atomic_state *state)
 	/* Clear all dirty flags */
 	results->dirty_pipes = 0;
 
-	ret = skl_compute_ddb(state);
+	ret = skl_include_affected_pipes(state);
 	if (ret)
 		return ret;
 
 	/*
 	 * Calculate WM's for all pipes that are part of this transaction.
-	 * Note that the DDB allocation above may have added more CRTC's that
-	 * weren't otherwise being modified (and set bits in dirty_pipes) if
-	 * pipe allocations had to change.
-	 *
-	 * FIXME:  Now that we're doing this in the atomic check phase, we
-	 * should allow skl_update_pipe_wm() to return failure in cases where
-	 * no suitable watermark values can be found.
+	 * Note that affected pipe calculation above may have added more
+	 * CRTC's that weren't otherwise being modified (and set bits in
+	 * dirty_pipes) if pipe allocations had to change.
 	 */
 	for_each_crtc_in_state(state, crtc, cstate, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);