@@ -206,6 +206,9 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
static void
drm_intel_gem_bo_unreference(drm_intel_bo *bo);
+static void
+drm_intel_gem_bo_free(drm_intel_bo *bo);
+
static struct drm_intel_gem_bo_bucket *
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long size)
@@ -330,6 +333,50 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
return (ret == 0 && busy.busy);
}
+static int
+drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem,
+ int state)
+{
+ struct drm_i915_gem_madvise madv;
+
+ madv.handle = bo_gem->gem_handle;
+ madv.madv = state;
+ madv.retained = 1;
+ ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+
+ return madv.retained;
+}
+
+/* drop the oldest entries that have been purged by the kernel */
+static void
+drm_intel_gem_bo_cache_purge(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ int i;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+ struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+ drm_intel_bo_gem *bo_gem;
+
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
+
+ if (drm_intel_gem_bo_madvise (bufmgr, bo_gem, I915_MADV_DONTNEED))
+ break;
+
+ DRMLISTDEL(&bo_gem->head);
+ bucket->num_entries--;
+
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+}
+
static drm_intel_bo *
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment,
@@ -406,6 +453,9 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
return NULL;
}
bo_gem->bo.bufmgr = bufmgr;
+ } else {
+ if(!drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_WILLNEED))
+ drm_intel_gem_bo_cache_purge(bufmgr);
}
bo_gem->name = name;
@@ -615,6 +665,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
bucket->num_entries++;
+ drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
} else {
drm_intel_gem_bo_free(bo);
@@ -206,6 +206,7 @@ typedef struct drm_i915_sarea {
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -244,6 +245,7 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
/* Asynchronous page flipping:
*/
@@ -727,4 +729,18 @@ struct drm_i915_get_pipe_from_crtc_id {
uint32_t pipe;
};
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice. */
+ uint32_t handle;
+
+ /** Advice. */
+ uint32_t madv;
+
+ /** Whether or not the backing store still exists */
+ uint32_t retained;
+};
+
#endif /* _I915_DRM_H_ */
Set the DONTNEED flag on cached buffers so that the kernel is free to discard those when under memory pressure. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- libdrm/intel/intel_bufmgr_gem.c | 51 +++++++++++++++++++++++++++++++++++++++ shared-core/i915_drm.h | 16 ++++++++++++ 2 files changed, 67 insertions(+), 0 deletions(-)