@@ -1846,6 +1846,134 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/* If we assume registers are dword aligned, second check is redundant */
+#define REG_IN_RANGE(reg, begin, end) \
+ ((reg <= end && reg >= begin) || \
+ ((reg+3) <= end && (reg+3) >= begin))
+
+/*
+ * This is here to avoid hanging the GPU from userspace.
+ * The ranges are defined the in "Register Address Map" section of the
+ * documents.
+ */
+static inline int
+reg_in_rsvd_range(struct drm_device *dev, u32 reg)
+{
+ if (INTEL_INFO(dev)->gen < 6) {
+ if (REG_IN_RANGE(reg, 0x1000, 0x1fff) ||
+ ((IS_BROADWATER(dev) || IS_CRESTLINE(dev)) ?
+ REG_IN_RANGE(reg, 0x4000, 0x4fff) : 0) ||
+ REG_IN_RANGE(reg, 0x9000, 0x9fff) ||
+ REG_IN_RANGE(reg, 0xb000, 0xffff) ||
+ REG_IN_RANGE(reg, 0x14000, 0x2ffff) ||
+ REG_IN_RANGE(reg, 0x40000, 0x5ffff))
+ return 1;
+
+ if (reg > 0x73fff)
+ return 1;
+ } else {
+ if (REG_IN_RANGE(reg, 0x1000, 0x1fff) ||
+ REG_IN_RANGE(reg, 0x8800, 0x88ff) ||
+ REG_IN_RANGE(reg, 0x9000, 0x9fff) ||
+ REG_IN_RANGE(reg, 0xb000, 0xffff) ||
+ REG_IN_RANGE(reg, 0x15000, 0x21fff) ||
+ REG_IN_RANGE(reg, 0x23000, 0x23fff) ||
+ REG_IN_RANGE(reg, 0x25000, 0x2ffff) ||
+ REG_IN_RANGE(reg, 0x40000, 0xfffff) ||
+ REG_IN_RANGE(reg, 0x108000, 0x13ffff))
+ return 1;
+
+ if (reg > 0x17ffff)
+ return 1;
+ }
+
+ return 0;
+}
+
+
+#define REG_IN_READ_BLACKLIST(reg) 0
+#define REG_IN_WRITE_BLACKLIST(reg) 0
+
+static inline int
+reg_read_allowed(struct drm_device *dev, u32 offset)
+{
+ if (offset & 0x3)
+ return 0;
+
+ if (reg_in_rsvd_range(dev, offset))
+ return 0;
+
+ if (REG_IN_READ_BLACKLIST(offset))
+ return 0;
+
+ return 1;
+}
+
+static inline int
+reg_write_allowed(struct drm_device *dev, u32 offset)
+{
+ if (offset & 0x3)
+ return 0;
+
+ if (reg_in_rsvd_range(dev, offset))
+ return 0;
+
+ if (REG_IN_WRITE_BLACKLIST(offset))
+ return 0;
+
+ return 1;
+}
+
+static int
+i915_read_register_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_write_reg *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!reg_read_allowed(dev, args->offset)) {
+ args->value = 0xffffffff;
+ return -EINVAL;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ DRM_DEBUG_DRIVER("Couldn't acquire mutex, reading anyway\n");
+
+ args->value = (u32)i915_gt_read(dev_priv, args->offset);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int
+i915_write_register_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_read_reg *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!reg_write_allowed(dev, args->offset))
+ return -EINVAL;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ DRM_DEBUG_DRIVER("Couldn't acquire mutex, writing anyway\n");
+
+ DRM_INFO("User space write %x %x\n", args->offset, (u32)args->value);
+ dev_priv->user_tainted = true;
+
+ i915_gt_write(dev_priv, args->offset, (u32)args->value);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
/**
* Tells the intel_ips driver that the i915 driver is now loaded, if
* IPS got loaded first.
@@ -2276,6 +2404,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_READ_REGISTER, i915_read_register_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_WRITE_REGISTER, i915_write_register_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
@@ -703,6 +703,8 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
struct drm_property *broadcast_rgb_property;
+
+ bool user_tainted;
} drm_i915_private_t;
struct drm_i915_gem_object {
@@ -198,6 +198,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_READ_REGISTER 0x2a
+#define DRM_I915_WRITE_REGISTER 0x2b
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -239,6 +241,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_READ_REGISTER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_READ_REGISTER, struct drm_intel_read_reg)
+#define DRM_IOCTL_I915_WRITE_REGISTER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_WRITE_REGISTER, struct drm_intel_write_reg)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -844,4 +848,27 @@ struct drm_intel_overlay_attrs {
__u32 gamma5;
};
+struct drm_intel_read_reg {
+ /* register offset to read */
+ __u32 offset;
+
+ /* register size, RFU */
+ __u8 size;
+
+ /* return value, high 4 bytes RFU */
+ __u64 value;
+};
+
+struct drm_intel_write_reg {
+ /* register offset to read */
+ __u32 offset;
+
+ /* register size, RFU */
+ __u8 size;
+
+ /* value to write, high 4 bytes RFU */
+ __u64 value;
+
+};
+
#endif /* _I915_DRM_H_ */