@@ -106,6 +106,7 @@ typedef struct _drm_intel_aub_annotation {
} drm_intel_aub_annotation;
#define BO_ALLOC_FOR_RENDER (1<<0)
+#define BO_ALLOC_CPU_MAP_NOT_NEEDED (1<<1)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
@@ -658,6 +658,10 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
/* Round the allocated size up to a power of two number of pages. */
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
+ /* Avoid this object's allocation from regular B0 cache */
+ if(flags & BO_ALLOC_CPU_MAP_NOT_NEEDED)
+ bucket = NULL;
+
/* If we don't have caching at this size, don't actually round the
* allocation up.
*/
@@ -730,6 +734,11 @@ retry:
VG_CLEAR(create);
create.size = bo_size;
+ if(flags & BO_ALLOC_CPU_MAP_NOT_NEEDED)
+ create.flags = I915_CPU_MAP_NOT_NEEDED;
+ else
+ create.flags =0;
+
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_CREATE,
&create);
@@ -762,10 +771,14 @@ retry:
bo_gem->reloc_tree_fences = 0;
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
- bo_gem->reusable = true;
bo_gem->aub_annotations = NULL;
bo_gem->aub_annotation_count = 0;
+ if(flags & BO_ALLOC_CPU_MAP_NOT_NEEDED)
+ bo_gem->reusable = false;
+ else
+ bo_gem->reusable = true;
+
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
DBG("bo_create: buf %d (%s) %ldb\n",