From patchwork Mon Aug 15 08:14:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: oushixiong X-Patchwork-Id: 12943384 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 3D5BDC00140 for ; Mon, 15 Aug 2022 09:49:49 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 61F1D11AC2D; Mon, 15 Aug 2022 09:49:42 +0000 (UTC) Received: from mailgw.kylinos.cn (unknown [124.126.103.232]) by gabe.freedesktop.org (Postfix) with ESMTPS id 5B6E911382A for ; Mon, 15 Aug 2022 09:49:14 +0000 (UTC) X-UUID: 3f618f1b57eb4e66bf4b267ffaa0b010-20220815 X-GW-Reason: 13103 X-Policy-Incident: 5pS25Lu25Lq66LaF6L+HMTDkurrpnIDopoHlrqHmoLg= X-Content-Feature: ica/max.line-size 103 audit/email.address 2 dict/adv 1 dict/notice 2 meta/cnt.alert 1 X-UUID: 3f618f1b57eb4e66bf4b267ffaa0b010-20220815 X-User: oushixiong@kylinos.cn Received: from localhost.localdomain [(116.128.244.169)] by mailgw (envelope-from ) (Generic MTA) with ESMTP id 1275458966; Mon, 15 Aug 2022 16:14:47 +0800 From: oushixiong To: Dave Airlie Subject: [PATCH] drm/ast: add dmabuf/prime buffer sharing support Date: Mon, 15 Aug 2022 16:14:30 +0800 Message-Id: <20220815081430.2919066-1-oushixiong@kylinos.cn> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: oushixiong , Thomas Zimmermann , David Airlie , dri-devel@lists.freedesktop.org, Sumit Semwal , linaro-mm-sig@lists.linaro.org, kernel test robot , =?utf-8?q?Christian_K=C3=B6nig?= , linux-media@vger.kernel.org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" This patch adds ast specific codes for DRM prime feature. Add the prime function to solve the xorg conflict problem when AST and AMD are in place at the same time, so that both can be displayed. Signed-off-by: oushixiong Reported-by: kernel test robot --- drivers/gpu/drm/ast/ast_drv.c | 22 ++++++ drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++- 2 files changed, 146 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 7465c4f0156a..6c1f75174368 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -50,6 +51,23 @@ module_param_named(modeset, ast_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(ast_fops); +static struct drm_gem_object *ast_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + struct drm_gem_vram_object *gbo; + struct dma_resv *resv = attach->dmabuf->resv; + + ww_mutex_lock(&resv->lock, NULL); + gbo = drm_gem_vram_create(dev, attach->dmabuf->size, 0); + ww_mutex_unlock(&resv->lock); + + if (IS_ERR(gbo)) + return NULL; + + return &gbo->bo.base; +} + static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | @@ -63,6 +81,10 @@ static const struct drm_driver ast_driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = ast_gem_prime_import_sg_table, + DRM_GEM_VRAM_DRIVER }; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 45b56b39ad47..ebe732705e34 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -48,6 +48,8 @@ #include "ast_drv.h" #include "ast_tables.h" +MODULE_IMPORT_NS(DMA_BUF); + static inline void ast_load_palette_index(struct ast_private *ast, u8 index, u8 red, u8 green, u8 blue) @@ -1535,8 +1537,129 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; +static int ast_handle_damage(struct drm_framebuffer *fb, int x, int y, + int width, int height) +{ + struct drm_gem_vram_object *dst_bo = NULL; + void *dst = NULL; + int ret = 0, i; + unsigned long offset = 0; + bool unmap = false; + unsigned int bytesPerPixel; + struct iosys_map map; + struct iosys_map dmabuf_map; + + bytesPerPixel = fb->format->cpp[0]; + + if (!fb->obj[0]->import_attach) + return -EINVAL; + + if (!fb->obj[0]->import_attach->dmabuf->vmap_ptr.vaddr) { + ret = dma_buf_vmap(fb->obj[0]->import_attach->dmabuf, &dmabuf_map); + if (ret) + return 0; + } else + dmabuf_map.vaddr = fb->obj[0]->import_attach->dmabuf->vmap_ptr.vaddr; + + dst_bo = drm_gem_vram_of_gem(fb->obj[0]); + + ret = drm_gem_vram_pin(dst_bo, 0); + if (ret) { + DRM_ERROR("ast_bo_pin failed\n"); + goto error; + } + + if (!dst_bo->map.vaddr) { + ret = drm_gem_vram_vmap(dst_bo, &map); + if (ret) { + DRM_ERROR("failed to vmap fbcon\n"); + drm_gem_vram_unpin(dst_bo); + goto error; + } + unmap = true; + } + dst = dst_bo->map.vaddr; + + for (i = y; i < y + height; i++) { + offset = i * fb->pitches[0] + (x * bytesPerPixel); + memcpy_toio(dst + offset, dmabuf_map.vaddr + offset, + width * bytesPerPixel); + } + + if (unmap) + drm_gem_vram_vunmap(dst_bo, &map); + + drm_gem_vram_unpin(dst_bo); +error: + return 0; +} + + +static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int flags, + unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + int i, ret = 0; + + drm_modeset_lock_all(fb->dev); + if (fb->obj[0]->import_attach) { + ret = dma_buf_begin_cpu_access(fb->obj[0]->import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + goto unlock; + } + + for (i = 0; i < num_clips; i++) { + ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1, + clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1); + if (ret) + break; + } + + if (fb->obj[0]->import_attach) { + dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf, + DMA_FROM_DEVICE); + } + +unlock: + drm_modeset_unlock_all(fb->dev); + + return ret; +} + +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct iosys_map dmabuf_map; + + if (fb->obj[0]->import_attach) { + dmabuf_map.vaddr = fb->obj[0]->import_attach->dmabuf->vmap_ptr.vaddr; + if (dmabuf_map.vaddr) + dma_buf_vunmap(fb->obj[0]->import_attach->dmabuf, + &dmabuf_map); + } + + drm_gem_fb_destroy(fb); +} + +static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = { + .destroy = ast_user_framebuffer_destroy, + .create_handle = drm_gem_fb_create_handle, + .dirty = ast_user_framebuffer_dirty, +}; + +static struct drm_framebuffer * +ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, + &ast_gem_fb_funcs_dirtyfb); +} + static const struct drm_mode_config_funcs ast_mode_config_funcs = { - .fb_create = drm_gem_fb_create, + .fb_create = ast_gem_fb_create_with_dirty, .mode_valid = drm_vram_helper_mode_valid, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit,