@@ -257,6 +257,54 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
+static int
+i915_gem_get_user_pages(struct drm_device *dev,
+ unsigned long addr,
+ bool write,
+ int *num_pages,
+ struct page ***pages_out)
+{
+ struct page **pages;
+ int pinned, ret;
+ int n = *num_pages;
+
+ pages = kmalloc(n*sizeof(struct page *), GFP_KERNEL);
+ if (pages == NULL) {
+ pages = drm_malloc_ab(n, sizeof(struct page *));
+ if (pages == NULL) {
+ *num_pages = 0;
+ return -ENOMEM;
+ }
+ }
+
+ pinned = __get_user_pages_fast(addr, n, write, pages);
+ if (pinned < n) {
+ struct mm_struct *mm = current->mm;
+
+ mutex_unlock(&dev->struct_mutex);
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm,
+ addr + (pinned << PAGE_SHIFT),
+ n - pinned,
+ write, 0,
+ pages + pinned,
+ NULL);
+ up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ if (ret > 0)
+ pinned += ret;
+ }
+
+ ret = 0;
+ if (pinned < n)
+ ret = -EFAULT;
+
+ *num_pages = pinned;
+ *pages_out = pages;
+ return ret;
+}
+
+
static inline void
slow_shmem_copy(struct page *dst_page,
int dst_offset,
@@ -398,11 +446,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
+ loff_t offset;
+ loff_t first_data_page, last_data_page;
+ int num_pages, i;
int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
@@ -420,20 +468,10 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 1, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
+ ret = i915_gem_get_user_pages(dev, data_ptr, true,
+ &num_pages, &user_pages);
+ if (ret)
goto out;
- }
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
@@ -494,7 +532,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
}
out:
- for (i = 0; i < pinned_pages; i++) {
+ for (i = 0; i < num_pages; i++) {
SetPageDirty(user_pages[i]);
mark_page_accessed(user_pages[i]);
page_cache_release(user_pages[i]);
@@ -679,10 +717,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
- loff_t first_data_page, last_data_page, num_pages;
- loff_t pinned_pages, i;
+ loff_t first_data_page, last_data_page;
+ int num_pages, i;
struct page **user_pages;
- struct mm_struct *mm = current->mm;
int gtt_page_offset, data_page_offset, data_page_index, page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
@@ -697,28 +734,18 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out_unpin_pages;
- }
+ ret = i915_gem_get_user_pages(dev, data_ptr, false,
+ &num_pages, &user_pages);
+ if (ret)
+ goto out;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
- goto out_unpin_pages;
+ goto out;
ret = i915_gem_object_put_fence(obj);
if (ret)
- goto out_unpin_pages;
+ goto out;
offset = obj->gtt_offset + args->offset;
@@ -753,8 +780,8 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
data_ptr += page_length;
}
-out_unpin_pages:
- for (i = 0; i < pinned_pages; i++)
+out:
+ for (i = 0; i < num_pages; i++)
page_cache_release(user_pages[i]);
drm_free_large(user_pages);
@@ -803,11 +830,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap_atomic(page, KM_USER0);
+ vaddr = kmap_atomic(page);
ret = __copy_from_user_inatomic(vaddr + page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
set_page_dirty(page);
mark_page_accessed(page);
@@ -842,11 +869,10 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
+ loff_t first_data_page, last_data_page, offset;
+ int num_pages, i;
int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
@@ -864,20 +890,10 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
+ ret = i915_gem_get_user_pages(dev, data_ptr, false,
+ &num_pages, &user_pages);
+ if (ret)
goto out;
- }
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
@@ -940,7 +956,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
}
out:
- for (i = 0; i < pinned_pages; i++)
+ for (i = 0; i < num_pages; i++)
page_cache_release(user_pages[i]);
drm_free_large(user_pages);