@@ -927,40 +927,16 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
return NULL;
}
-static u32 *vmap_batch(struct drm_i915_gem_object *obj,
+static u32 *map_batch(struct drm_i915_gem_object *obj,
unsigned start, unsigned len)
{
- int i;
- void *addr = NULL;
- struct sg_page_iter sg_iter;
- int first_page = start >> PAGE_SHIFT;
- int last_page = (len + start + 4095) >> PAGE_SHIFT;
- int npages = last_page - first_page;
- struct page **pages;
-
- pages = drm_malloc_ab(npages, sizeof(*pages));
- if (pages == NULL) {
- DRM_DEBUG_DRIVER("Failed to get space for pages\n");
- goto finish;
- }
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
- pages[i++] = sg_page_iter_page(&sg_iter);
- if (i == npages)
- break;
- }
+ unsigned long first, npages;
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- if (addr == NULL) {
- DRM_DEBUG_DRIVER("Failed to vmap pages\n");
- goto finish;
- }
+ /* Convert [start, len) to pages */
+ first = start >> PAGE_SHIFT;
+ npages = DIV_ROUND_UP(start + len, PAGE_SIZE) - first;
-finish:
- if (pages)
- drm_free_large(pages);
- return (u32*)addr;
+ return i915_gem_object_map_range(obj, first, npages);
}
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
@@ -987,7 +963,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
return ERR_PTR(ret);
}
- src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
+ src_base = map_batch(src_obj, batch_start_offset, batch_len);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
@@ -1000,7 +976,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
goto unmap_src;
}
- dst = vmap_batch(dest_obj, 0, batch_len);
+ dst = map_batch(dest_obj, 0, batch_len);
if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
ret = -ENOMEM;
@@ -1014,7 +990,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
memcpy(dst, src, batch_len);
unmap_src:
- vunmap(src_base);
+ i915_gem_addr_unmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
@@ -1262,7 +1238,7 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
ret = -EINVAL;
}
- vunmap(batch_base);
+ i915_gem_addr_unmap(batch_base);
return ret;
}
@@ -3005,6 +3005,32 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
}
/**
+ * i915_gem_object_map_range - map some or all of a GEM object into kernel space
+ * @obj: the GEM object to be mapped
+ * @first: offset in pages of the start of the range to be mapped
+ * @npages: length in pages of the range to be mapped. For convenience, a
+ * length of zero is taken to mean "the remainder of the object"
+ *
+ * Map a given range of a GEM object into kernel virtual space. The caller must
+ * make sure the associated pages are gathered and pinned before calling this
+ * function, and is responsible for unmapping the returned address when it is no
+ * longer required, by calling i915_gem_addr_unmap().
+ *
+ * Returns the address at which the object has been mapped, or NULL on failure.
+ */
+void *__must_check i915_gem_object_map_range(const struct drm_i915_gem_object *obj,
+ unsigned long first,
+ unsigned long npages);
+
+static inline void i915_gem_addr_unmap(void *mapped_addr)
+{
+ if (is_vmalloc_addr(mapped_addr))
+ vunmap(mapped_addr);
+ else
+ kunmap(kmap_to_page(mapped_addr));
+}
+
+/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj - the object to map into kernel address space
*
@@ -2233,10 +2233,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
list_del(&obj->global_list);
if (obj->mapping) {
- if (is_vmalloc_addr(obj->mapping))
- vunmap(obj->mapping);
- else
- kunmap(kmap_to_page(obj->mapping));
+ i915_gem_addr_unmap(obj->mapping);
obj->mapping = NULL;
}
@@ -2408,6 +2405,55 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
}
+void *i915_gem_object_map_range(const struct drm_i915_gem_object *obj,
+ unsigned long first,
+ unsigned long npages)
+{
+ unsigned long max_pages = obj->base.size >> PAGE_SHIFT;
+ struct scatterlist *sg = obj->pages->sgl;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+ unsigned long i = 0;
+ void *addr = NULL;
+
+ /* Minimal range check */
+ if (first + npages > max_pages) {
+ DRM_DEBUG_DRIVER("Invalid page range\n");
+ return NULL;
+ }
+
+ /* npages==0 is shorthand for "the rest of the object" */
+ if (npages == 0)
+ npages = max_pages - first;
+
+ /* A single page can always be kmapped */
+ if (npages == 1)
+ return kmap(sg_page(sg));
+
+ pages = drm_malloc_gfp(npages, sizeof(*pages), GFP_TEMPORARY);
+ if (pages == NULL) {
+ DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+ return NULL;
+ }
+
+ for_each_sg_page(sg, &sg_iter, max_pages, first) {
+ pages[i] = sg_page_iter_page(&sg_iter);
+ if (++i == npages) {
+ addr = vmap(pages, npages, 0, PAGE_KERNEL);
+ break;
+ }
+ }
+
+ /* We should have got here via the 'break' above */
+ WARN_ON(i != npages);
+ if (addr == NULL)
+ DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+
+ drm_free_large(pages);
+
+ return addr;
+}
+
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
@@ -2421,27 +2467,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj);
if (obj->mapping == NULL) {
- struct page **pages;
-
- pages = NULL;
- if (obj->base.size == PAGE_SIZE)
- obj->mapping = kmap(sg_page(obj->pages->sgl));
- else
- pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
- sizeof(*pages),
- GFP_TEMPORARY);
- if (pages != NULL) {
- struct sg_page_iter sg_iter;
- int n;
-
- n = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter,
- obj->pages->nents, 0)
- pages[n++] = sg_page_iter_page(&sg_iter);
-
- obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
- drm_free_large(pages);
- }
+ obj->mapping = i915_gem_object_map_range(obj, 0, 0);
if (obj->mapping == NULL) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);