@@ -223,6 +223,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
+#define DRM_I915_GEM_CREATE2 0x34
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -253,6 +254,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE2, struct drm_i915_gem_create2)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -434,6 +436,111 @@ struct drm_i915_gem_create {
__u32 pad;
};
+struct drm_i915_gem_create2 {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+
+ /**
+ * Requested offset for the object.
+ *
+ * Can be used for "soft-pinning" the object into the per-process
+ * GTT of the target context upon creation. Only possible if using
+ * contexts and per-process GTTs.
+ *
+ * The address must be page-aligned, and have the valid bit set.
+ */
+ __u64 offset;
+#define I915_CREATE_OFFSET_VALID (1<<0)
+
+ /**
+ * Target context of the object.
+ *
+ * The context of the object can be used for setting the initial offset
+ * of the object in the per-process GTT.
+ */
+ __u32 context;
+
+ /**
+ * Requested placement (which memory domain)
+ *
+ * You can request that the object be created from special memory
+ * rather than regular system pages. Such irregular objects may
+ * have certain restrictions (such as CPU access to a stolen
+ * object is verboten).
+ */
+ __u32 placement;
+#define I915_CREATE_PLACEMENT_SYSTEM 0
+#define I915_CREATE_PLACEMENT_STOLEN 1 /* Cannot use CPU mmaps or pread/pwrite */
+ /**
+ * Requested domain (which cache domain)
+ *
+ * You can request that the object be created from memory in a
+ * certain cache domain (such as RENDER, CPU or GTT). In some cases,
+ * this then may allocate from a pool of such pages to avoid any
+ * migration overhead, but it is always equivalent to performing
+ * an explicit set-domain(read=DOMAIN, write=DOMAIN) on the
+ * constructed object.
+ *
+ * Set to 0, to leave the initial domain unspecified and defaulting
+ * to the domain set by the constructor.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_DOMAIN
+ */
+ __u32 domain;
+
+ /**
+ * Requested cache level.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_CACHING
+ */
+ __u32 caching;
+
+ /**
+ * Requested tiling mode.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_TILING
+ */
+ __u32 tiling_mode;
+ /**
+ * Requested stride for tiling.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_TILING
+ */
+ __u32 stride;
+
+ /**
+ * Requested madvise priority.
+ *
+ * See DRM_IOCTL_I915_GEM_MADVISE
+ */
+ __u32 madvise;
+
+ /**
+ * Additional miscellaneous flags
+ *
+ * Reserved for future use, must be zero.
+ */
+ __u32 flags;
+
+ /**
+ * Padding for 64-bit struct alignment.
+ *
+ * Reserved for future use, must be zero.
+ */
+ __u32 pad;
+
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
@@ -69,6 +69,13 @@ drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
tiling_mode, pitch, flags);
}
+drm_intel_bo *
+drm_intel_bo_alloc_from_params(drm_intel_bufmgr *bufmgr,
+ drm_intel_bo_alloc_params *params)
+{
+ return bufmgr->bo_alloc_from_params(bufmgr, params);
+}
+
void drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
@@ -105,6 +105,22 @@ typedef struct _drm_intel_aub_annotation {
uint32_t ending_offset;
} drm_intel_aub_annotation;
+typedef struct _drm_intel_bo_alloc_params {
+ uint32_t width;
+ uint32_t height;
+ uint32_t cpp;
+ uint32_t tiling_mode;
+ unsigned long stride;
+ unsigned long flags;
+ uint32_t offset;
+ uint32_t context;
+ uint32_t cache_domain;
+ uint32_t placement;
+ uint32_t caching;
+ uint32_t madvise;
+ char *name;
+} drm_intel_bo_alloc_params;
+
#define BO_ALLOC_FOR_RENDER (1<<0)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
@@ -119,6 +135,10 @@ drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
+drm_intel_bo *
+drm_intel_bo_alloc_from_params(drm_intel_bufmgr *bufmgr,
+ drm_intel_bo_alloc_params *params);
+
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
@@ -775,6 +775,89 @@ retry:
}
static drm_intel_bo *
+drm_intel_gem_bo_alloc_create2(drm_intel_bufmgr *bufmgr,
+ drm_intel_bo_alloc_params *params,
+ unsigned long size)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ struct drm_i915_gem_create2 create;
+ int ret;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = size;
+
+ VG_CLEAR(create);
+ create.size = size;
+ if(params->offset & I915_CREATE_OFFSET_VALID) {
+ create.offset = params->offset;
+ create.context = params->context;
+ }
+
+ if (params->tiling_mode == I915_TILING_NONE)
+ create.stride = 0;
+ else
+ create.stride = params->stride;
+
+ create.placement = params->placement;
+ create.domain = params->cache_domain;
+ create.caching = params->caching;
+ create.tiling_mode = params->tiling_mode;
+ create.madvise = params->madvise;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_CREATE2,
+ &create);
+ bo_gem->gem_handle = create.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ if (ret != 0) {
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.bufmgr = bufmgr;
+
+ /* We have to call the set_tiling ioctl, as create2 ioctl
+ * doesn't return the swizzle mode
+ * TODO: Is this required? Can the functionality be put in
+ * create2 ioctl?
+ */
+ bo_gem->tiling_mode = create.tiling_mode;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->stride = create.stride;
+
+ if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
+ create.tiling_mode,
+ create.stride)) {
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ return NULL;
+ }
+
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
+ bo_gem->name = params->name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ /* set reusable to false, as caching is not employed currently*/
+ bo_gem->reusable = false;
+ bo_gem->aub_annotations = NULL;
+ bo_gem->aub_annotation_count = 0;
+
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
+ DBG("bo_create: buf %d (%s) %ldb\n",
+ bo_gem->gem_handle, bo_gem->name, size);
+
+ return &bo_gem->bo;
+}
+
+static drm_intel_bo *
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
@@ -847,6 +930,57 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
tiling, stride);
}
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_from_params(drm_intel_bufmgr *bufmgr,
+ drm_intel_bo_alloc_params *params)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ unsigned long size, stride;
+ uint32_t tiling_prev, tiling;
+
+ tiling = tiling_prev = params->tiling_mode;
+ do {
+ unsigned long aligned_y, height_alignment;
+ uint32_t x = params->width;
+ uint32_t y = params->height;
+ uint32_t cpp = params->cpp;
+
+ tiling_prev = tiling;
+
+ /* If we're tiled, our allocations are in 8 or 32-row blocks,
+ * so failure to align our height means that we won't allocate
+ * enough pages.
+ *
+ * If we're untiled, we still have to align to 2 rows high
+ * because the data port accesses 2x2 blocks even if the
+ * bottom row isn't to be rendered, so failure to align means
+ * we could walk off the end of the GTT and fault. This is
+ * documented on 965, and may be the case on older chipsets
+ * too so we try to be careful.
+ */
+ aligned_y = y;
+ height_alignment = 2;
+
+ if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
+ height_alignment = 16;
+ else if (tiling == I915_TILING_X
+ || (IS_915(bufmgr_gem->pci_device)
+ && tiling == I915_TILING_Y))
+ height_alignment = 8;
+ else if (tiling == I915_TILING_Y)
+ height_alignment = 32;
+ aligned_y = ALIGN(y, height_alignment);
+
+ stride = x * cpp;
+ stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, &tiling);
+ size = stride * aligned_y;
+ size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, &tiling);
+ } while (tiling_prev != tiling);
+
+ params->stride = stride;
+ params->tiling_mode = tiling;
+ return drm_intel_gem_bo_alloc_create2(bufmgr, params, size);
+}
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
@@ -3330,6 +3464,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_alloc_for_render =
drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
+ bufmgr_gem->bufmgr.bo_alloc_from_params =
+ drm_intel_gem_bo_alloc_from_params;
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
@@ -83,6 +83,22 @@ struct _drm_intel_bufmgr {
unsigned long *pitch,
unsigned long flags);
+ /**
+ * Allocate a buffer object from params.
+ * Params include fields such as tiling mode, caching, domain,
+ * placement, madvise, gtt offset etc.
+ * The params are then used to populate the fields of create2 ioctl,
+ * which creates buffer obj, based on these parameters.
+ *
+ * Note the tiling format in params may be rejected; callers should
+ * check the 'tiling_mode' field on return, as well as the pitch
+ * value, which may have been rounded up to accommodate for tiling
+ * restrictions.
+ */
+ drm_intel_bo *(*bo_alloc_from_params) (drm_intel_bufmgr *bufmgr,
+ drm_intel_bo_alloc_params *params);
+
+
/** Takes a reference on a buffer object */
void (*bo_reference) (drm_intel_bo *bo);