From patchwork Fri Jan 7 16:07:36 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 464411 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p07G7t5o021707 for ; Fri, 7 Jan 2011 16:08:21 GMT Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 9F5559EFD8 for ; Fri, 7 Jan 2011 08:07:55 -0800 (PST) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from fireflyinternet.com (server109-228-6-236.live-servers.net [109.228.6.236]) by gabe.freedesktop.org (Postfix) with ESMTP id 80F839E768; Fri, 7 Jan 2011 08:07:43 -0800 (PST) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.66.37; Received: from arrandale.alporthouse.com (unverified [78.156.66.37]) by fireflyinternet.com (Firefly Internet SMTP) with ESMTP id 22225983-1500050 for multiple; Fri, 07 Jan 2011 16:10:23 +0000 From: Chris Wilson To: intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org Subject: =?UTF-8?q?=5BPATCH=5D=20Mapping=20snooped=20user=20pages=20into=20the=20GTT?= Date: Fri, 7 Jan 2011 16:07:36 +0000 Message-Id: <1294416456-5410-1-git-send-email-chris@chris-wilson.co.uk> X-Mailer: git-send-email 1.7.2.3 MIME-Version: 1.0 X-Originating-IP: 78.156.66.37 X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.11 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Fri, 07 Jan 2011 16:08:21 +0000 (UTC) X-MIME-Autoconverted: from base64 to 8bit by demeter1.kernel.org id p07G7t5o021707 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ea1c4b0..adb886a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -425,7 +425,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { - fput(obj->filp); + if (obj->filp) + fput(obj->filp); } EXPORT_SYMBOL(drm_gem_object_release); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 07a351f..4b901c5 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -13,6 +13,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_gem_gtt.o \ i915_gem_io.o \ i915_gem_tiling.o \ + i915_gem_vmap.o \ i915_trace_points.o \ intel_display.o \ intel_crt.o \ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8def614..52efa11 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -783,7 +783,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = INTEL_INFO(dev)->gen >= 4; break; case I915_PARAM_HAS_2D_IO: - /* depends on GEM */ + case I915_PARAM_HAS_VMAP: value = dev_priv->has_gem; break; default: @@ -2256,6 +2256,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PREAD_2D, i915_gem_pread_2d_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE_2D, i915_gem_pwrite_2d_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_VMAP, i915_gem_vmap_ioctl, DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 64033cc..6899bde 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -695,6 +695,11 @@ typedef struct drm_i915_private { struct intel_fbdev *fbdev; } drm_i915_private_t; +struct drm_i915_gem_object_ops { + int (*get_pages)(struct drm_i915_gem_object *, gfp_t, u32 *offset); + void (*put_pages)(struct drm_i915_gem_object *); +}; + struct drm_i915_gem_object { struct drm_gem_object base; @@ -782,6 +787,7 @@ struct drm_i915_gem_object { unsigned int fenced_gpu_access:1; struct page **pages; + int num_pages; /** * DMAR support @@ -1097,6 +1103,7 @@ void i915_gem_flush_ring(struct drm_device *dev, uint32_t flush_domains); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); +void i915_gem_object_init(struct drm_i915_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, @@ -1113,6 +1120,11 @@ void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, u32 seqno); +/* i915_gem_vmap.c */ +int +i915_gem_vmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + /** * Returns true if seq1 is later than seq2. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 439ad78..d529de4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -513,6 +513,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto unlock; } + if (obj->agp_type == AGP_USER_CACHED_MEMORY) { + /* XXX worth handling? */ + ret = -EINVAL; + goto out; + } + /* Bounds check source. */ if (args->offset > obj->base.size || args->size > obj->base.size - args->offset) { @@ -954,6 +960,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto unlock; } + if (obj->agp_type == AGP_USER_CACHED_MEMORY) { + /* XXX worth handling? */ + ret = -EINVAL; + goto out; + } + /* Bounds check destination. */ if (args->offset > obj->base.size || args->size > obj->base.size - args->offset) { @@ -1125,6 +1137,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; + if (to_intel_bo(obj)->agp_type == AGP_USER_CACHED_MEMORY) { + drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + if (obj->size > dev_priv->mm.gtt_mappable_end) { drm_gem_object_unreference_unlocked(obj); return -E2BIG; @@ -1484,25 +1501,26 @@ unlock: static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, - gfp_t gfpmask) + gfp_t gfpmask, + u32 *offset) { - int page_count, i; struct address_space *mapping; struct inode *inode; struct page *page; + int i; /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ - page_count = obj->base.size / PAGE_SIZE; + obj->num_pages = obj->base.size / PAGE_SIZE; BUG_ON(obj->pages != NULL); - obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); + obj->pages = drm_malloc_ab(obj->num_pages, sizeof(struct page *)); if (obj->pages == NULL) return -ENOMEM; inode = obj->base.filp->f_path.dentry->d_inode; mapping = inode->i_mapping; - for (i = 0; i < page_count; i++) { + for (i = 0; i < obj->num_pages; i++) { page = read_cache_page_gfp(mapping, i, GFP_HIGHUSER | __GFP_COLD | @@ -1517,6 +1535,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_do_bit_17_swizzle(obj); + *offset = 0; return 0; err_pages: @@ -1531,7 +1550,6 @@ err_pages: static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - int page_count = obj->base.size / PAGE_SIZE; int i; BUG_ON(obj->madv == __I915_MADV_PURGED); @@ -1542,7 +1560,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; - for (i = 0; i < page_count; i++) { + for (i = 0; i < obj->num_pages; i++) { if (obj->dirty) set_page_dirty(obj->pages[i]); @@ -1643,6 +1661,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) { struct inode *inode; + if (obj->base.filp == NULL) + return; + /* Our goal here is to return as much of the memory as * is possible back to the system as we are called from OOM. * To do this we must instruct the shmfs to drop all of its @@ -2090,6 +2111,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { + const struct drm_i915_gem_object_ops *ops = obj->base.driver_private; int ret = 0; if (obj->gtt_space == NULL) @@ -2127,7 +2149,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return ret; i915_gem_gtt_unbind_object(obj); - i915_gem_object_put_pages_gtt(obj); + ops->put_pages(obj); list_del_init(&obj->gtt_list); list_del_init(&obj->mm_list); @@ -2667,11 +2689,13 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable) { + const struct drm_i915_gem_object_ops *ops = obj->base.driver_private; struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment, unfenced_alignment; + u32 offset; bool mappable, fenceable; int ret; @@ -2737,7 +2761,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, goto search_free; } - ret = i915_gem_object_get_pages_gtt(obj, gfpmask); + ret = ops->get_pages(obj, gfpmask, &offset); if (ret) { drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; @@ -2765,7 +2789,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ret = i915_gem_gtt_bind_object(obj); if (ret) { - i915_gem_object_put_pages_gtt(obj); + ops->put_pages(obj); drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; @@ -2787,11 +2811,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - obj->gtt_offset = obj->gtt_space->start; + obj->gtt_offset = obj->gtt_space->start + offset; fenceable = obj->gtt_space->size == fence_size && - (obj->gtt_space->start & (fence_alignment -1)) == 0; + (obj->gtt_offset & (fence_alignment -1)) == 0; mappable = obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; @@ -2809,7 +2833,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ - if (obj->pages == NULL) + if (obj->pages == NULL || obj->agp_type == AGP_USER_CACHED_MEMORY) return; trace_i915_gem_object_clflush(obj); @@ -3464,6 +3488,31 @@ unlock: return ret; } +void +i915_gem_object_init(struct drm_i915_gem_object *obj) +{ + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + + obj->agp_type = AGP_USER_MEMORY; + + obj->fence_reg = I915_FENCE_REG_NONE; + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->gtt_list); + INIT_LIST_HEAD(&obj->ring_list); + INIT_LIST_HEAD(&obj->exec_list); + INIT_LIST_HEAD(&obj->gpu_write_list); + obj->madv = I915_MADV_WILLNEED; + + /* Avoid an unnecessary call to unbind on the first bind. */ + obj->map_and_fenceable = true; +} + +static const struct drm_i915_gem_object_ops i915_gem_object_ops = { + .get_pages = i915_gem_object_get_pages_gtt, + .put_pages = i915_gem_object_put_pages_gtt, +}; + struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size) { @@ -3479,22 +3528,10 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, return NULL; } - i915_gem_info_add_obj(dev_priv, size); + obj->base.driver_private = (void *)&i915_gem_object_ops; - obj->base.write_domain = I915_GEM_DOMAIN_CPU; - obj->base.read_domains = I915_GEM_DOMAIN_CPU; - - obj->agp_type = AGP_USER_MEMORY; - obj->base.driver_private = NULL; - obj->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj->mm_list); - INIT_LIST_HEAD(&obj->gtt_list); - INIT_LIST_HEAD(&obj->ring_list); - INIT_LIST_HEAD(&obj->exec_list); - INIT_LIST_HEAD(&obj->gpu_write_list); - obj->madv = I915_MADV_WILLNEED; - /* Avoid an unnecessary call to unbind on the first bind. */ - obj->map_and_fenceable = true; + i915_gem_object_init(obj); + i915_gem_info_add_obj(dev_priv, size); return obj; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 92161bb..429f529 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -64,7 +64,7 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) if (dev_priv->mm.gtt->needs_dmar) { ret = intel_gtt_map_memory(obj->pages, - obj->base.size >> PAGE_SHIFT, + obj->num_pages, &obj->sg_list, &obj->num_sg); if (ret != 0) @@ -76,7 +76,7 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) obj->agp_type); } else intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, + obj->num_pages, obj->pages, obj->agp_type); diff --git a/drivers/gpu/drm/i915/i915_gem_io.c b/drivers/gpu/drm/i915/i915_gem_io.c index 6c1def1..e83b8e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_io.c +++ b/drivers/gpu/drm/i915/i915_gem_io.c @@ -549,6 +549,11 @@ i915_gem_pwrite_2d_ioctl(struct drm_device *dev, goto unlock; } + if (obj->agp_type == AGP_USER_CACHED_MEMORY) { + ret = -EINVAL; + goto unref; + } + /* Bounds check destination. */ offset = args->dst_x * args->cpp + args->dst_y * args->dst_stride; size = args->dst_stride * (args->height-1) + args->width * args->cpp; @@ -961,6 +966,11 @@ i915_gem_pread_2d_ioctl(struct drm_device *dev, goto unlock; } + if (obj->agp_type == AGP_USER_CACHED_MEMORY) { + ret = -EINVAL; + goto unref; + } + /* Bounds check source. */ offset = args->src_x * args->cpp + args->src_y * args->src_stride; size = args->src_stride * (args->height-1) + args->width * args->cpp; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9..3a9c88e 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -294,6 +294,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; + if (obj->agp_type == AGP_USER_CACHED_MEMORY) { + drm_gem_object_unreference_unlocked(&obj->base); + return -EINVAL; + } + if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); diff --git a/drivers/gpu/drm/i915/i915_gem_vmap.c b/drivers/gpu/drm/i915/i915_gem_vmap.c new file mode 100644 index 0000000..780a514 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_vmap.c @@ -0,0 +1,145 @@ +/* + * Copyright © 2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include + +struct i915_gem_vmap_object { + struct drm_i915_gem_object gem; + uintptr_t user_ptr; + int read_only; +}; + +static struct i915_gem_vmap_object *to_vmap_object(struct drm_i915_gem_object *obj) +{ + return container_of(obj, struct i915_gem_vmap_object, gem); +} + +static int +i915_gem_vmap_get_pages(struct drm_i915_gem_object *obj, gfp_t gfp, u32 *offset) +{ + struct i915_gem_vmap_object *vmap = to_vmap_object(obj); + loff_t first_data_page, last_data_page; + int pinned_pages, i; + + if (!access_ok(vmap->read_only ? VERIFY_READ : VERIFY_WRITE, + (char __user *)vmap->user_ptr, + vmap->gem.base.size)) + return -EFAULT; + + first_data_page = vmap->user_ptr / PAGE_SIZE; + last_data_page = (vmap->user_ptr + vmap->gem.base.size - 1) / PAGE_SIZE; + vmap->gem.num_pages = last_data_page - first_data_page + 1; + + vmap->gem.pages = drm_malloc_ab(vmap->gem.num_pages, + sizeof(struct page *)); + if (vmap->gem.pages == NULL) + return -ENOMEM; + + pinned_pages = get_user_pages_fast(vmap->user_ptr, + vmap->gem.num_pages, + !vmap->read_only, + vmap->gem.pages); + if (pinned_pages < vmap->gem.num_pages) { + for (i = 0; i < pinned_pages; i++) + page_cache_release(vmap->gem.pages[i]); + drm_free_large(vmap->gem.pages); + return -EFAULT; + } + + *offset = vmap->user_ptr & ~PAGE_MASK; + return 0; +} + +static void +i915_gem_vmap_put_pages(struct drm_i915_gem_object *obj) +{ + int i; + + for (i = 0; i < obj->num_pages; i++) { + if (obj->dirty) + set_page_dirty(obj->pages[i]); + + mark_page_accessed(obj->pages[i]); + page_cache_release(obj->pages[i]); + } + + obj->dirty = 0; + drm_free_large(obj->pages); +} + +static const struct drm_i915_gem_object_ops i915_gem_vmap_ops = { + .get_pages = i915_gem_vmap_get_pages, + .put_pages = i915_gem_vmap_put_pages, +}; + +/** + * Creates a new mm object that wraps some user memory. + */ +int +i915_gem_vmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_vmap *args = data; + struct i915_gem_vmap_object *obj; + int ret; + u32 handle; + + /* Allocate the new object */ + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (obj == NULL) + return -ENOMEM; + + obj->gem.base.driver_private = (void *)&i915_gem_vmap_ops; + + obj->gem.base.dev = dev; + obj->gem.base.size = args->user_size; + + kref_init(&obj->gem.base.refcount); + atomic_set(&obj->gem.base.handle_count, 0); + + i915_gem_object_init(&obj->gem); + obj->gem.agp_type = AGP_USER_CACHED_MEMORY; + + obj->user_ptr = args->user_ptr; + obj->read_only = args->flags & I915_VMAP_READ_ONLY; + + ret = drm_gem_handle_create(file, &obj->gem.base, &handle); + if (ret) { + drm_gem_object_release(&obj->gem.base); + kfree(obj); + return ret; + } + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference(&obj->gem.base); + + args->handle = handle; + return 0; +} diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index d3c93be..3b8b7f9 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -200,6 +200,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_EXECBUFFER2 0x29 #define DRM_I915_GEM_PREAD_2D 0x2a #define DRM_I915_GEM_PWRITE_2D 0x2b +#define DRM_I915_GEM_VMAP 0x2c #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -243,6 +244,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) #define DRM_IOCTL_I915_GEM_PREAD_2D DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD_2D, struct drm_i915_gem_pread_2d) #define DRM_IOCTL_I915_GEM_PWRITE_2D DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE_2D, struct drm_i915_gem_pwrite_2d) +#define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -295,6 +297,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_COHERENT_RINGS 13 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 #define I915_PARAM_HAS_2D_IO 15 +#define I915_PARAM_HAS_VMAP 16 typedef struct drm_i915_getparam { int param; @@ -392,6 +395,19 @@ struct drm_i915_gem_create { __u32 pad; }; +struct drm_i915_gem_vmap { + __u64 user_ptr; + __u32 user_size; + __u32 flags; +#define I915_VMAP_READ_ONLY 0x1 + /** * Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; +}; + struct drm_i915_gem_pread { /** Handle for the object being read. */ __u32 handle;