@@ -864,6 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
+ vmw_resource_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
@@ -323,3 +323,54 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
+
+
+/*
+ * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_dma_buffer_map_and_cache().
+ */
+void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/*
+ * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
+}
@@ -92,6 +92,8 @@ struct vmw_dma_buffer {
s32 pin_count;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
+ /* Protected by reservation */
+ struct ttm_bo_kmap_obj map;
};
/**
@@ -673,6 +675,7 @@ extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
@@ -701,6 +704,8 @@ extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
+extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
+extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -43,8 +43,6 @@ struct vmw_fb_par {
struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
- struct ttm_bo_kmap_obj map;
- void *bo_ptr;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
@@ -174,11 +172,13 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
unsigned long irq_flags;
- s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+ s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
u32 cpp, max_x, max_y;
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
+ struct vmw_dma_buffer *vbo = par->vmw_bo;
+ void *virtual;
if (vmw_priv->suspended)
return;
@@ -188,10 +188,16 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
if (!cur_fb)
goto out_unlock;
+ (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
+ (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ virtual = vmw_dma_buffer_map_and_cache(vbo);
+ if (!virtual)
+ goto out_unreserve;
+
spin_lock_irqsave(&par->dirty.lock, irq_flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
- goto out_unlock;
+ goto out_unreserve;
}
/*
@@ -221,7 +227,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
if (w && h) {
- dst_ptr = (u8 *)par->bo_ptr +
+ dst_ptr = (u8 *)virtual +
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
src_ptr = (u8 *)par->vmalloc +
((dst_y1 + par->fb_y) * info->fix.line_length +
@@ -237,7 +243,12 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
clip.x2 = dst_x2;
clip.y1 = dst_y1;
clip.y2 = dst_y2;
+ }
+out_unreserve:
+ ttm_bo_unreserve(&vbo->base);
+ ttm_read_unlock(&vmw_priv->reservation_sem);
+ if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
vmw_fifo_flush(vmw_priv, false);
@@ -504,18 +515,8 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
par->set_fb = NULL;
}
- if (par->vmw_bo && detach_bo) {
- struct vmw_private *vmw_priv = par->vmw_priv;
-
- if (par->bo_ptr) {
- ttm_bo_kunmap(&par->map);
- par->bo_ptr = NULL;
- }
- if (unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
- else if (vmw_priv->active_display_unit != vmw_du_legacy)
- vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
- }
+ if (par->vmw_bo && detach_bo && unref_bo)
+ vmw_dmabuf_unreference(&par->vmw_bo);
return 0;
}
@@ -636,38 +637,6 @@ static int vmw_fb_set_par(struct fb_info *info)
if (ret)
goto out_unlock;
- if (!par->bo_ptr) {
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
-
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us. LDU doesn't require
- * additional pinning because set_config() would've pinned
- * it already
- */
- if (vmw_priv->active_display_unit != vmw_du_legacy) {
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev "
- "framebuffer.\n");
- goto out_unlock;
- }
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- if (vmw_priv->active_display_unit != vmw_du_legacy)
- vfb->unpin(vfb);
-
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- goto out_unlock;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
-
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
@@ -354,6 +354,7 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_dma_buffer_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -361,6 +362,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ vmw_dma_buffer_unmap(&vmw_user_bo->dma);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -1239,6 +1241,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+ /*
+ * Kill any cached kernel maps before move. An optimization could
+ * be to do this iff source or destination memory type is VRAM.
+ */
+ vmw_dma_buffer_unmap(dma_buf);
+
if (mem->mem_type != VMW_PL_MOB) {
struct vmw_resource *res, *n;
struct ttm_validate_buffer val_buf;
@@ -1262,6 +1270,21 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
}
+/**
+ * vmw_resource_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+}
+
/**
* vmw_query_readback_all - Read back cached query states