@@ -40,11 +40,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- struct sg_table *st;
- struct scatterlist *src, *dst;
- int ret, i;
+ struct drm_device *dev = obj->base.dev;
+ struct i915_sg_create_state *state;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ int ret;
- ret = i915_mutex_lock_interruptible(obj->base.dev);
+ ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err;
@@ -55,40 +57,33 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
- st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
+ state = i915_sg_create(obj->pages->nents);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
goto err_unpin;
}
- ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
- if (ret)
- goto err_free;
-
- src = obj->pages->sgl;
- dst = st->sgl;
- for (i = 0; i < obj->pages->nents; i++) {
- sg_set_page(dst, sg_page(src), src->length, 0);
- dst = sg_next(dst);
- src = sg_next(src);
- }
+ for_each_sgt_page(page, sgt_iter, obj->pages)
+ i915_sg_add_page(state, page);
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ i915_sg_complete(state);
+
+ if (!dma_map_sg(attachment->dev,
+ state->st->sgl, state->st->nents, dir)) {
ret =-ENOMEM;
goto err_free_sg;
}
- mutex_unlock(&obj->base.dev->struct_mutex);
- return st;
+ mutex_unlock(&dev->struct_mutex);
+
+ return state->st;
err_free_sg:
- sg_free_table(st);
-err_free:
- kfree(st);
+ i915_sg_abort(state);
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
- mutex_unlock(&obj->base.dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
err:
return ERR_PTR(ret);
}