@@ -29,8 +29,30 @@
#include "i915_drm.h"
#include "intel_drv.h"
+static int
+i915_context_gen_id(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_context *ctx)
+{
+ int ret, id;
+again:
+ if (idr_pre_get(&dev_priv->i915_ctx_idr, GFP_KERNEL) == 0) {
+ return -ENOMEM;
+ }
+
+ spin_lock(&dev_priv->i915_ctx_lock);
+ /* Don't start at 0 to make finding non-context users easier */
+ ret = idr_get_new_above(&dev_priv->i915_ctx_idr, ctx, 1, &id);
+ if (ret == -EAGAIN) {
+ spin_unlock(&dev_priv->i915_ctx_lock);
+ goto again;
+ }
+ spin_unlock(&dev_priv->i915_ctx_lock);
+
+ return id;
+}
+
static struct drm_i915_gem_context *
-i915_gem_lookup_ctx_id(struct drm_device *dev,
+i915_context_lookup_id(struct drm_device *dev,
uint32_t id)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -38,23 +60,169 @@ i915_gem_lookup_ctx_id(struct drm_device *dev,
}
static void
-i915_gem_del_ctx_id(struct drm_device *dev,
+i915_context_del_id(struct drm_device *dev,
struct drm_i915_gem_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
idr_remove(&dev_priv->i915_ctx_idr, ctx->ctx_id);
}
-int
-i915_gem_alloc_hw_context(struct drm_device *dev,
+/**
+ * i915_context_alloc_backing_obj - Allocate and pin space in the global GTT for
+ * use by the HW to save, and restore context information.
+ */
+static struct drm_gem_object *
+i915_context_alloc_backing_obj(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret;
+ size_t size = dev_priv->ctx_size;
+ if (!size) {
+ /* XXX size is HW specific, this needs to be fixed!! */
+ size = 4096;
+ dev_priv->ctx_size = size;
+ }
+
+ obj = i915_gem_alloc_object(dev, size);
+
+ if (!obj) {
+ DRM_DEBUG("Failed to allocate context\n");
+ return NULL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_pin(obj, 0x1000, false);
+ if (ret) {
+ DRM_ERROR("Failed to pin context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret) {
+ DRM_ERROR("failed to set domain on context: %d\n", ret);
+ goto err_unpin;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return &obj->base;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+/**
+ * i915_context_hw_init - Create a valid context for the given object
+ * on the specified ring. This may need to be hardware dependent in the future,
+ * but for now just creates a CCID for the context, saves state, and then
+ * restores that stte.
+ * @dev: drm
+ * @obj: object where the context will be stored.
+ * @ring: ring to submit commands to. Render ring only for now.
+ */
+static void i915_context_hw_init(struct drm_device *dev,
+ struct drm_i915_gem_context *ctx,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->context_switch == NULL)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (intel_ring_begin(ring, 2)) {
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP | (1 << 22) | ctx->ctx_id);
+ intel_ring_advance(ring);
+ ring->context_switch(ring, ctx, I915_CONTEXT_SAVE_ONLY);
+ ring->context_switch(ring, ctx, I915_CONTEXT_NORMAL_SWITCH);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static int i915_context_hw_fini(struct drm_device *dev,
+ struct drm_i915_gem_context *ctx,
+ struct intel_ring_buffer *ring)
+{
+ /* XXX We can prevent restoring contexts, but not saving them
+ * so if we're going to take away our backing context object
+ * of the last context, we have to switch now.
+ */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (ring->last_context == ctx && ctx != dev_priv->default_context) {
+ mutex_lock(&dev->struct_mutex);
+ ring->context_switch(ring, dev_priv->default_context,
+ I915_CONTEXT_NORMAL_SWITCH);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static int
+i915_alloc_hw_context(struct drm_device *dev,
uint64_t aperture_size,
struct drm_i915_gem_context **ctx_out)
{
- return -ENOMEM;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_context *ctx;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(struct drm_i915_gem_context), GFP_KERNEL);
+ if (ctx == NULL)
+ return -ENOMEM;
+
+ ctx->ctx_id = i915_context_gen_id(dev_priv, ctx);
+ if (ctx->ctx_id < 0) {
+ ret = ctx->ctx_id;
+ goto out;
+ }
+
+ ctx->ctx_obj = i915_context_alloc_backing_obj(dev);
+ if (ctx->ctx_obj == NULL) {
+ ret = -ENOMEM;
+ goto id_out;
+ }
+
+ if (!aperture_size)
+ aperture_size = 256 << 20;
+ ctx->aperture_size_mb = aperture_size >> 20;
+
+ i915_context_hw_init(dev, ctx, &dev_priv->ring[RCS]);
+
+ mutex_init(&ctx->slot_mtx);
+ INIT_LIST_HEAD(&ctx->context_list);
+ INIT_LIST_HEAD(&ctx->file_list);
+
+ *ctx_out = ctx;
+ return ret;
+
+id_out:
+ i915_context_del_id(dev, ctx);
+out:
+ kfree(ctx);
+ return ret;
+}
+
+/**
+ * Destroy everything associated with a context but does not free the context's
+ * memory.
+ */
+static int
+i915_destroy_hw_context(struct drm_device *dev,
+ struct drm_i915_gem_context *ctx)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_context_hw_fini(dev, ctx, (&dev_priv->ring[RCS]));
+ i915_gem_free_object(ctx->ctx_obj);
+ i915_context_del_id(dev, ctx);
+ return 0;
}
int
-i915_gem_ctx_create_ioctl(struct drm_device *dev, void *data,
+i915_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_ctx_create *args = data;
@@ -63,19 +231,22 @@ i915_gem_ctx_create_ioctl(struct drm_device *dev, void *data,
int slots = args->slot_count;
int i, ret = 0;
- /* TODO: sanitize aperture_size*/
- ret = i915_gem_alloc_hw_context(dev, args->aperture_size, &ctx);
+ if(args->aperture_size & (args->aperture_size - 1))
+ return -EINVAL;
+
+ ret = i915_alloc_hw_context(dev, args->aperture_size, &ctx);
if (ret) {
- goto out;
+ DRM_DEBUG_DRIVER("Couldn't allocate a HW context\n");
+ return -ENOMEM;
}
ctx->bufs = drm_malloc_ab(slots + 1, sizeof(struct drm_gem_object*));
if (ctx->bufs == NULL) {
+ i915_destroy_hw_context(dev, ctx);
kfree(ctx);
- drm_free_large(ctx->bufs);
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
+
ctx->slot_count = slots;
for(i = 0; i < slots + 1; i++) {
ctx->bufs[i] = NULL;
@@ -84,30 +255,86 @@ i915_gem_ctx_create_ioctl(struct drm_device *dev, void *data,
list_add(&ctx->file_list, &file_priv->context.context_list);
args->ctx_id = ctx->ctx_id;
-out:
+
return ret;
}
+static int
+do_context_destroy(struct drm_device *dev, struct drm_i915_gem_context *ctx)
+{
+ list_del(&ctx->file_list);
+ drm_free_large(ctx->bufs);
+ i915_destroy_hw_context(dev, ctx);
+ kfree(ctx);
+ return 0;
+}
+
int
-i915_gem_ctx_destroy_ioctl(struct drm_device *dev, void *data,
+i915_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_ctx_destroy *args = data;
- struct drm_i915_gem_context *ctx, *ctx_temp;
- struct list_head *pos, *n;
- struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context *ctx;
- ctx = i915_gem_lookup_ctx_id(dev, args->ctx_id);
+ ctx = i915_context_lookup_id(dev, args->ctx_id);
if (ctx == NULL) {
return -EINVAL;
}
+ return do_context_destroy(dev, ctx);
+}
- list_for_each_safe(pos, n, &file_priv->context.context_list) {
- ctx_temp = list_entry(pos, struct drm_i915_gem_context, context_list);
+void i915_context_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context *ctx, *next;
+ list_for_each_entry_safe(ctx, next, &file_priv->context.context_list,
+ file_list) {
+ do_context_destroy(dev, ctx);
}
- i915_gem_del_ctx_id(dev, ctx);
- kfree(ctx);
+}
- /* TODO: ring switch may be needed */
+void i915_context_init(struct drm_device *dev)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!HAS_HW_CONTEXTS(dev)) {
+ DRM_DEBUG_DRIVER("Context support disabled, device does not support\n");
+ dev_priv->ctx_disable = 1;
+ return;
+ }
+
+ idr_init(&dev_priv->i915_ctx_idr);
+ spin_lock_init(&dev_priv->i915_ctx_lock);
+ ret = i915_alloc_hw_context(dev, 0, &dev_priv->default_context);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Context support disabled [%d]\n", ret);
+ dev_priv->ctx_disable = 1;
+ idr_destroy(&dev_priv->i915_ctx_idr);
+ } else {
+ DRM_DEBUG_DRIVER("Context support enabled\n", ret);
+ }
+}
+
+static
+int i915_context_idr_cleanup(int id, void *p, void *data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct drm_i915_gem_context *ctx = (struct drm_i915_gem_context *)p;
+ DRM_DEBUG_DRIVER("Destroying abandoned context %d\n", ctx->ctx_id);
+ do_context_destroy(dev, ctx);
return 0;
}
+
+void i915_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->ctx_disable)
+ return;
+
+ i915_destroy_hw_context(dev, dev_priv->default_context);
+ spin_lock(&dev_priv->i915_ctx_lock);
+ idr_for_each(&dev_priv->i915_ctx_idr, i915_context_idr_cleanup, dev);
+ idr_remove_all(&dev_priv->i915_ctx_idr);
+ idr_destroy(&dev_priv->i915_ctx_idr);
+ spin_unlock(&dev_priv->i915_ctx_lock);
+}
@@ -2035,6 +2035,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
ips_ping_for_i915_load();
+ i915_context_init(dev);
return 0;
out_gem_unload:
@@ -2060,6 +2061,8 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ i915_context_fini(dev);
+
spin_lock(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
@@ -2159,6 +2162,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
+ mutex_init(&file_priv->context.mtx);
+ INIT_LIST_HEAD(&file_priv->context.context_list);
return 0;
}
@@ -2197,6 +2202,7 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
i915_gem_release(dev, file_priv);
+ i915_context_close(dev, file_priv);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
@@ -2249,8 +2255,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CTX_CREATE, i915_gem_ctx_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CTX_DESTROY, i915_gem_ctx_destroy_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CTX_CREATE, i915_context_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CTX_DESTROY, i915_context_destroy_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
@@ -282,7 +282,7 @@ typedef struct drm_i915_private {
/** Device specific context info */
uint32_t ctx_size;
- struct drm_i915_gem_context *default_ctx;
+ struct drm_i915_gem_context *default_context;
struct spinlock i915_ctx_lock;
struct idr i915_ctx_idr;
bool ctx_disable;
@@ -1120,9 +1120,9 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_ctx_create_ioctl(struct drm_device *dev, void *data,
+int i915_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_gem_ctx_destroy_ioctl(struct drm_device *dev, void *data,
+int i915_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
@@ -1316,6 +1316,11 @@ extern void intel_display_print_error_state(struct seq_file *m,
struct intel_display_error_state *error);
#endif
+/* context */
+extern void i915_context_init(struct drm_device *dev);
+extern void i915_context_fini(struct drm_device *dev);
+extern void i915_context_close(struct drm_device *dev, struct drm_file *file);
+
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
#define BEGIN_LP_RING(n) \
@@ -750,6 +750,17 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
+static void
+render_ring_ctx_switch(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_context *ctx,
+ uint32_t flags)
+{
+ if (ring->last_context == ctx)
+ return;
+
+ ring->last_context = ctx;
+}
+
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1015,7 +1026,8 @@ static const struct intel_ring_buffer render_ring = {
.irq_get = render_ring_get_irq,
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
+ .cleanup = render_ring_cleanup,
+ .context_switch = render_ring_ctx_switch,
};
/* ring buffer for bit-stream decoder */
@@ -35,6 +35,9 @@ struct intel_hw_status_page {
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
+#define I915_CONTEXT_NORMAL_SWITCH 0
+#define I915_CONTEXT_SAVE_ONLY 1
+
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
@@ -76,6 +79,10 @@ struct intel_ring_buffer {
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
+ struct drm_i915_gem_context *last_context;
+ void (*context_switch)(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_context *ctx,
+ uint32_t flags);
/**
* List of objects currently involved in rendering from the