@@ -1898,6 +1898,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
@@ -1302,3 +1302,40 @@ int i915_reg_read_ioctl(struct drm_device *dev,
return 0;
}
+
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ int ret;
+
+ if (args->ctx_id == 0 && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ring = &dev_priv->ring[RCS];
+
+ hs = i915_gem_context_get_hang_stats(ring, file, args->ctx_id);
+ if (IS_ERR_OR_NULL(hs)) {
+ mutex_unlock(&dev->struct_mutex);
+ return hs == NULL ? -ENODEV : PTR_ERR(hs);
+ }
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
@@ -1896,6 +1896,8 @@ extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
#ifdef CONFIG_DEBUG_FS
@@ -198,6 +198,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_SET_CACHING 0x2f
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
+#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -247,6 +248,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -980,4 +982,19 @@ struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
+
+struct drm_i915_reset_stats {
+ __u32 ctx_id;
+ __u32 flags;
+
+ /* For all contexts */
+ __u32 reset_count;
+
+ /* For this context */
+ __u32 batch_active;
+ __u32 batch_pending;
+
+ __u32 pad;
+};
+
#endif /* _UAPI_I915_DRM_H_ */
This ioctl returns reset stats for specified context. The struct returned contains context loss counters. reset_count: all resets across all contexts batch_active: active batches lost on resets batch_pending: pending batches lost on resets v2: get rid of state tracking completely and deliver only counts. Idea from Chris Wilson. v3: fix commit message v4: default context handled inside i915_gem_contest_get_hang_stats v5: reset_count only for priviledged process v6: ctx=0 needs CAP_SYS_ADMIN for batch_* counters (Chris Wilson) Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Ian Romanick <idr@freedesktop.org> Cc: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.c | 37 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ include/uapi/drm/i915_drm.h | 17 +++++++++++++++++ 4 files changed, 57 insertions(+)