@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
fmt, ##__VA_ARGS__)
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
{
enum intel_pch ret = PCH_NOP;
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@@ -178,12 +178,14 @@ static void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
@@ -217,7 +219,8 @@ static void intel_detect_pch(struct drm_device *dev)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type = intel_virt_detect_pch(dev);
+ dev_priv->pch_type =
+ intel_virt_detect_pch(dev_priv);
} else
continue;
@@ -2657,7 +2657,7 @@ struct drm_i915_cmd_table {
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVYBRIDGE(dev_priv) (dev_priv->info.is_ivybridge)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
INTEL_DEVID(dev_priv) == 0x0152 || \
INTEL_DEVID(dev_priv) == 0x015a)
@@ -4360,7 +4360,7 @@ i915_gem_init_hw(struct drm_device *dev)
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev_priv)) {
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
@@ -192,7 +192,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(to_i915(dev))) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -3726,7 +3726,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
@@ -3751,7 +3751,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
@@ -4537,7 +4537,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
/* Write the TU size bits before fdi link training, so that error
@@ -4851,7 +4851,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
@@ -12248,7 +12248,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (fb->modifier[0] != old_fb->modifier[0])
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
engine = i915_gem_active_get_engine(&obj->last_write,
@@ -12524,7 +12524,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
- if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
@@ -2191,14 +2191,15 @@ static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
wm[3] *= 2;
}
@@ -2294,7 +2295,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
sizeof(dev_priv->wm.pri_latency));
intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
@@ -2522,7 +2523,7 @@ static void ilk_wm_merge(struct drm_device *dev,
int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
@@ -4617,7 +4618,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev))
+ else if (IS_IVYBRIDGE(dev_priv))
hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
@@ -1084,7 +1084,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
case 7:
case 8:
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(to_i915(dev))) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {