@@ -3958,6 +3958,11 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
+ <title>Batchbuffer Pools</title>
+!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
+!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
+ </sect2>
+ <sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
@@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
+ i915_gem_batch_pool.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
@@ -1126,6 +1126,12 @@ struct intel_l3_parity {
int which_slice;
};
+struct i915_gem_batch_pool {
+ struct drm_device *dev;
+ struct list_head active_list;
+ struct list_head inactive_list;
+};
+
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
@@ -1797,6 +1803,8 @@ struct drm_i915_gem_object {
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
+ struct list_head batch_pool_list;
+
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
@@ -2758,6 +2766,15 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+/* i915_gem_batch_pool.c */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+struct drm_i915_gem_object*
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
+void i915_gem_batch_pool_put(struct i915_gem_batch_pool *pool,
+ struct drm_i915_gem_object *obj);
+
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@ -4337,6 +4337,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
+ INIT_LIST_HEAD(&obj->batch_pool_list);
obj->ops = ops;
new file mode 100644
@@ -0,0 +1,153 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: batch pool
+ *
+ * In order to submit batch buffers as 'secure', the software command parser
+ * must ensure that a batch buffer cannot be modified after parsing. It does
+ * this by copying the user provided batch buffer contents to a kernel owned
+ * buffer from which the hardware will actually execute, and by carefully
+ * managing the address space bindings for such buffers.
+ *
+ * The batch pool framework provides a mechanism for the driver to manage a
+ * set of scratch buffers to use for this purpose. The framework can be
+ * extended to support other uses cases should they arise.
+ */
+
+/**
+ * i915_gem_batch_pool_init() - initialize a batch buffer pool
+ * @dev: the drm device
+ * @pool: the batch buffer pool
+ */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool)
+{
+ pool->dev = dev;
+ INIT_LIST_HEAD(&pool->active_list);
+ INIT_LIST_HEAD(&pool->inactive_list);
+}
+
+/**
+ * i915_gem_batch_pool_fini() - clean up a batch buffer pool
+ * @pool: the pool to clean up
+ *
+ * Note: Callers must hold the struct_mutex. Callers must also ensure
+ * that all buffers have been returned to the pool.
+ */
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
+{
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ WARN_ON(!list_empty(&pool->active_list));
+
+ while (!list_empty(&pool->inactive_list)) {
+ struct drm_i915_gem_object *obj =
+ list_first_entry(&pool->inactive_list,
+ struct drm_i915_gem_object,
+ batch_pool_list);
+
+ list_del_init(&obj->batch_pool_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+}
+
+/**
+ * i915_gem_batch_pool_get() - select a buffer from the pool
+ * @pool: the batch buffer pool
+ * @size: the minimum desired size of the returned buffer
+ *
+ * Finds or allocates a batch buffer in the pool with at least the requested
+ * size. The buffer will not be used to satisfy further
+ * i915_gem_batch_pool_get() requests until the corresponding
+ * i915_gem_batch_pool_put() call. The caller is responsible for any domain or
+ * active/inactive management for the returned buffer.
+ *
+ * Note: Callers must hold the struct_mutex
+ *
+ * Return: the selected batch buffer object
+ */
+struct drm_i915_gem_object *
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
+ size_t size)
+{
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_i915_gem_object *tmp;
+ bool was_purged;
+
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ do {
+ was_purged = false;
+
+ list_for_each_entry(tmp, &pool->inactive_list, batch_pool_list) {
+ /*
+ * Select a buffer that is at least as big as needed
+ * but not 'too much' bigger. A better way to do this
+ * might be to bucket the pool objects based on size.
+ */
+ if (tmp->base.size >= size &&
+ tmp->base.size <= (2 * size)) {
+ obj = tmp;
+ break;
+ }
+ }
+
+ if (obj && obj->madv == __I915_MADV_PURGED) {
+ was_purged = true;
+ list_del(&obj->batch_pool_list);
+ drm_gem_object_unreference(&obj->base);
+ obj = NULL;
+ }
+ } while (was_purged);
+
+ if (!obj) {
+ obj = i915_gem_alloc_object(pool->dev, size);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ list_add_tail(&obj->batch_pool_list, &pool->inactive_list);
+ }
+
+ obj->madv = I915_MADV_WILLNEED;
+ list_move_tail(&obj->batch_pool_list, &pool->active_list);
+
+ return obj;
+}
+
+/**
+ * i915_gem_batch_pool_put() - return a buffer to the pool
+ * @pool: the batch buffer pool
+ * @obj: the batch buffer object
+ *
+ * Note: Callers must hold the struct_mutex
+ */
+void i915_gem_batch_pool_put(struct i915_gem_batch_pool *pool,
+ struct drm_i915_gem_object *obj)
+{
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ obj->madv = I915_MADV_DONTNEED;
+ list_move_tail(&obj->batch_pool_list, &pool->inactive_list);
+}