@@ -279,6 +279,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_CREATE_CACHED 14
typedef struct drm_i915_getparam {
int param;
@@ -360,6 +361,8 @@ struct drm_i915_gem_init {
__u64 gtt_end;
};
+#define DRM_I915_GEM_CREATE_CACHED 1
+
struct drm_i915_gem_create {
/**
* Requested size for the object.
@@ -373,7 +376,7 @@ struct drm_i915_gem_create {
* Object handles are nonzero.
*/
__u32 handle;
- __u32 pad;
+ __u32 flags;
};
struct drm_i915_gem_pread {
@@ -84,6 +84,7 @@ struct _drm_intel_bo {
};
#define BO_ALLOC_FOR_RENDER (1<<0)
+#define BO_ALLOC_FORCE_CACHED (1<<1)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
@@ -97,6 +98,8 @@ drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
+drm_intel_bo *drm_intel_gem_bo_alloc_cached(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
@@ -103,6 +103,7 @@ typedef struct _drm_intel_bufmgr_gem {
unsigned int has_blt : 1;
unsigned int has_relaxed_fencing : 1;
unsigned int bo_reuse : 1;
+ unsigned int has_create_cached : 1;
char fenced_relocs;
} drm_intel_bufmgr_gem;
@@ -584,7 +585,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
unsigned long stride)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
- drm_intel_bo_gem *bo_gem;
+ drm_intel_bo_gem *bo_gem = NULL;
unsigned int page_size = getpagesize();
int ret;
struct drm_intel_gem_bo_bucket *bucket;
@@ -592,6 +593,15 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
unsigned long bo_size;
int for_render = 0;
+ if (flags & BO_ALLOC_FORCE_CACHED) {
+ DBG("bo_alloc_internal: alloc bo %s cached\n", name);
+ alloc_from_cache = 0;
+ bo_size = size;
+ if (bo_size < page_size)
+ bo_size = page_size;
+ goto kernel_alloc;
+ }
+
if (flags & BO_ALLOC_FOR_RENDER)
for_render = 1;
@@ -658,6 +668,7 @@ retry:
}
pthread_mutex_unlock(&bufmgr_gem->lock);
+kernel_alloc:
if (!alloc_from_cache) {
struct drm_i915_gem_create create;
@@ -668,6 +679,8 @@ retry:
bo_gem->bo.size = bo_size;
memset(&create, 0, sizeof(create));
create.size = bo_size;
+ if (flags & BO_ALLOC_FORCE_CACHED)
+ create.flags = DRM_I915_GEM_CREATE_CACHED;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_CREATE,
@@ -729,6 +742,25 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
I915_TILING_NONE, 0);
}
+drm_intel_bo *
+drm_intel_gem_bo_alloc_cached(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+
+ if (bufmgr_gem->has_create_cached)
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
+ BO_ALLOC_FORCE_CACHED,
+ I915_TILING_NONE, 0);
+ else {
+ DBG("bo_alloc_cached: kernel not support cached alloc!\n");
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
+ I915_TILING_NONE, 0);
+ }
+}
+
static drm_intel_bo *
drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
@@ -2146,6 +2178,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
+ gp.param = I915_PARAM_HAS_CREATE_CACHED;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_create_cached = ret == 0;
+
if (bufmgr_gem->gen < 4) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;