@@ -7774,8 +7774,9 @@ enum {
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
-#define FORCEWAKE_KERNEL 0x1
-#define FORCEWAKE_USER 0x2
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -69,17 +69,104 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
HRTIMER_MODE_REL);
}
+static inline int
+__wait_for_ack(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack,
+ const u32 value)
+{
+ return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+ FORCEWAKE_ACK_TIMEOUT_MS);
+}
+
+static inline int
+wait_ack_clear(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack)
+{
+ return __wait_for_ack(i915, d, ack, 0);
+}
+
+static inline int
+wait_ack_set(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack)
+{
+ return __wait_for_ack(i915, d, ack, ack);
+}
+
static inline void
fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
+ if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
+enum ack_type {
+ ACK_CLEAR = 0,
+ ACK_SET
+};
+
+static int
+fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const enum ack_type type)
+{
+ const u32 ack_bit = FORCEWAKE_KERNEL;
+ const u32 value = type == ACK_SET ? ack_bit : 0;
+ unsigned int pass;
+ bool ack_detected;
+
+ /*
+ * There is a possibility of driver's wake request colliding
+ * with hardware's own wake requests and that can cause
+ * hardware to not deliver the driver's ack message.
+ *
+ * Use a fallback bit toggle to kick the gpu state machine
+ * in the hope that the original ack will be delivered along with
+ * the fallback ack.
+ *
+ * This workaround is described in HSDES #1604254524
+ */
+
+ pass = 1;
+ do {
+ wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+
+ __raw_i915_write32(i915, d->reg_set,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+ /* Give gt some time to relax before the polling frenzy */
+ udelay(10 * pass);
+ wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+
+ ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+
+ __raw_i915_write32(i915, d->reg_set,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+ } while (!ack_detected && pass++ < 10);
+
+ DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
+ intel_uncore_forcewake_domain_to_str(d->id),
+ type == ACK_SET ? "set" : "clear",
+ __raw_i915_read32(i915, d->reg_ack),
+ pass);
+
+ return ack_detected ? 0 : -ETIMEDOUT;
+}
+
+static inline void
+fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
+{
+ if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+ return;
+
+ if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
+ fw_domain_wait_ack_clear(i915, d);
+}
+
static inline void
fw_domain_get(struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
@@ -88,17 +175,26 @@ fw_domain_get(struct drm_i915_private *i915,
}
static inline void
-fw_domain_wait_ack(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
+ if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
static inline void
+fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
+{
+ if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+ return;
+
+ if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
+ fw_domain_wait_ack_set(i915, d);
+}
+
+static inline void
fw_domain_put(const struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
@@ -119,7 +215,27 @@ fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
}
for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack(i915, d);
+ fw_domain_wait_ack_set(i915, d);
+
+ i915->uncore.fw_domains_active |= fw_domains;
+}
+
+static void
+fw_domains_get_with_fallback(struct drm_i915_private *i915,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
+
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+ fw_domain_wait_ack_clear_fallback(i915, d);
+ fw_domain_get(i915, d);
+ }
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_wait_ack_set_fallback(i915, d);
i915->uncore.fw_domains_active |= fw_domains;
}
@@ -1142,7 +1258,8 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
}
if (INTEL_GEN(dev_priv) >= 9) {
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,