@@ -8194,6 +8194,19 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
int clock_limit = dev_priv->max_dotclk_freq;
drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
+
+ /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
+ if (pipe_config->bigjoiner) {
+ pipe_mode->crtc_clock /= 2;
+ pipe_mode->crtc_hdisplay /= 2;
+ pipe_mode->crtc_hblank_start /= 2;
+ pipe_mode->crtc_hblank_end /= 2;
+ pipe_mode->crtc_hsync_start /= 2;
+ pipe_mode->crtc_hsync_end /= 2;
+ pipe_mode->crtc_htotal /= 2;
+ pipe_config->pipe_src_w /= 2;
+ }
+
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
if (INTEL_GEN(dev_priv) < 4) {
@@ -12917,6 +12930,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
+ !crtc_state->bigjoiner_slave &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
@@ -13462,6 +13476,15 @@ intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
{
const struct intel_crtc_state *from_crtc_state = crtc_state;
+ if (crtc_state->bigjoiner_slave) {
+ from_crtc_state = intel_atomic_get_new_crtc_state(state,
+ crtc_state->bigjoiner_linked_crtc);
+
+ /* No need to copy state if the master state is unchanged */
+ if (!from_crtc_state)
+ return;
+ }
+
intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
}
@@ -13497,6 +13520,47 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->hw.ctm);
}
+static int
+copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
+ const struct intel_crtc_state *from_crtc_state)
+{
+ struct intel_crtc_state *saved_state;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
+ if (!saved_state)
+ return -ENOMEM;
+
+ saved_state->uapi = crtc_state->uapi;
+ saved_state->scaler_state = crtc_state->scaler_state;
+ saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+ saved_state->crc_enabled = crtc_state->crc_enabled;
+
+ intel_crtc_free_hw_state(crtc_state);
+ memcpy(crtc_state, saved_state, sizeof(*crtc_state));
+ kfree(saved_state);
+
+ /* Re-init hw state */
+ memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
+ crtc_state->hw.enable = from_crtc_state->hw.enable;
+ crtc_state->hw.active = from_crtc_state->hw.active;
+ crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
+ crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
+
+ /* Some fixups */
+ crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
+ crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
+ crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
+ crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
+ crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
+ crtc_state->bigjoiner_slave = true;
+ crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
+ crtc_state->has_audio = false;
+
+ return 0;
+}
+
static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
@@ -15079,6 +15143,75 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
return false;
}
+static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
+ struct intel_crtc *slave, *master;
+
+ /* slave being enabled, is master is still claiming this crtc? */
+ if (old_crtc_state->bigjoiner_slave) {
+ slave = crtc;
+ master = old_crtc_state->bigjoiner_linked_crtc;
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
+ if (!master_crtc_state || !needs_modeset(master_crtc_state))
+ goto claimed;
+ }
+
+ if (!new_crtc_state->bigjoiner)
+ return 0;
+
+ if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
+ DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
+ "CRTC + 1 to be used, doesn't exist\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ slave = new_crtc_state->bigjoiner_linked_crtc =
+ intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
+ slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
+ master = crtc;
+ if (IS_ERR(slave_crtc_state))
+ return PTR_ERR(slave_crtc_state);
+
+ /* master being enabled, slave was already configured? */
+ if (slave_crtc_state->uapi.enable)
+ goto claimed;
+
+ DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
+ slave->base.base.id, slave->base.name);
+
+ return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
+
+claimed:
+ DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
+ "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
+ slave->base.base.id, slave->base.name,
+ master->base.base.id, master->base.name);
+ return -EINVAL;
+}
+
+static int kill_bigjoiner_slave(struct intel_atomic_state *state,
+ struct intel_crtc_state *master_crtc_state)
+{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_crtc_state(&state->base,
+ master_crtc_state->bigjoiner_linked_crtc);
+
+ if (IS_ERR(slave_crtc_state))
+ return PTR_ERR(slave_crtc_state);
+
+ slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
+ slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
+ slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
+ intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
+ return 0;
+}
+
/**
* DOC: asynchronous flip implementation
*
@@ -15246,16 +15379,33 @@ static int intel_atomic_check(struct drm_device *dev,
continue;
}
+ /* Kill old bigjoiner link, we may re-establish afterwards */
+ if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
+ ret = kill_bigjoiner_slave(state, new_crtc_state);
+ if (ret)
+ goto fail;
+ }
+
+ if (!new_crtc_state->uapi.enable) {
+ if (!new_crtc_state->bigjoiner_slave) {
+ intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
+ any_ms = true;
+ }
+ continue;
+ }
+
ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
if (ret)
goto fail;
- if (!new_crtc_state->hw.enable)
- continue;
-
ret = intel_modeset_pipe_config(state, new_crtc_state);
if (ret)
goto fail;
+
+ ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ goto fail;
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -1079,6 +1079,15 @@ struct intel_crtc_state {
/* enable pipe csc? */
bool csc_enable;
+ /* enable pipe big joiner? */
+ bool bigjoiner;
+
+ /* big joiner slave crtc? */
+ bool bigjoiner_slave;
+
+ /* linked crtc for bigjoiner, either slave or master */
+ struct intel_crtc *bigjoiner_linked_crtc;
+
/* Display Stream compression state */
struct {
bool compression_enable;
@@ -2403,12 +2403,12 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
- false);
+ pipe_config->bigjoiner);
dsc_dp_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
- false);
+ pipe_config->bigjoiner);
if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
drm_dbg_kms(&dev_priv->drm,
"Compressed BPP/Slice Count not supported\n");
@@ -2424,14 +2424,15 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
- if (pipe_config->dsc.slice_count > 1) {
- pipe_config->dsc.dsc_split = true;
- } else {
+ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
+ pipe_config->bigjoiner) {
+ if (pipe_config->dsc.slice_count < 2) {
drm_dbg_kms(&dev_priv->drm,
"Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
+
+ pipe_config->dsc.dsc_split = true;
}
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
@@ -2502,6 +2503,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
+ if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
+ adjusted_mode->crtc_hdisplay > 5120) &&
+ intel_dp_can_bigjoiner(intel_dp))
+ pipe_config->bigjoiner = true;
+
/*
* Optimize for slow and wide. This is the place to add alternative
* optimization policy.
@@ -2510,7 +2516,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
- if (ret || intel_dp->force_dsc_en) {
+ if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) {
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
if (ret < 0)