From patchwork Thu Sep 11 15:36:20 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lionel Landwerlin X-Patchwork-Id: 4889121 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 18A54C0338 for ; Thu, 11 Sep 2014 15:37:31 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 5378820256 for ; Thu, 11 Sep 2014 15:37:26 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 597BC2016C for ; Thu, 11 Sep 2014 15:37:21 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id E51436E030; Thu, 11 Sep 2014 08:37:19 -0700 (PDT) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by gabe.freedesktop.org (Postfix) with ESMTP id B969F6E116 for ; Thu, 11 Sep 2014 08:37:18 -0700 (PDT) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 11 Sep 2014 08:30:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,506,1406617200"; d="scan'208";a="571818441" Received: from unknown (HELO ivy.amr.corp.intel.com) ([10.255.12.173]) by orsmga001.jf.intel.com with ESMTP; 11 Sep 2014 08:36:24 -0700 From: Lionel Landwerlin To: dri-devel@lists.freedesktop.org Subject: [PATCH] intel: make bufmgr_gem shareable from different API Date: Thu, 11 Sep 2014 16:36:20 +0100 Message-Id: <1410449780-21729-2-git-send-email-lionel.g.landwerlin@intel.com> X-Mailer: git-send-email 2.0.1 In-Reply-To: <1410449780-21729-1-git-send-email-lionel.g.landwerlin@intel.com> References: <1410449780-21729-1-git-send-email-lionel.g.landwerlin@intel.com> X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" X-Spam-Status: No, score=-6.7 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP When using Mesa and LibVA in the same process, one would like to be able bind buffers from the output of the decoder to a GL texture through an EGLImage. LibVA can reuse buffers allocated by Gbm through a file descriptor. It will then wrap it into a drm_intel_bo with drm_intel_bo_gem_create_from_prime(). The problem at the moment is that both library get a different drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init() even though they're using the same drm file descriptor. As a result, instead of manipulating the same buffer object for a given file descriptor, they get 2 different drm_intel_bo objects and 2 different refcounts, leading one of the library to get errors from the kernel on invalid BO when one of the 2 library is done with a shared buffer. This patch modifies drm_intel_bufmgr_gem_init() so, given a file descriptor, it will look for an already existing drm_intel_bufmgr using the same file descriptor and return that object. Signed-off-by: Lionel Landwerlin --- intel/intel_bufmgr_gem.c | 82 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 70 insertions(+), 12 deletions(-) diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index 0e1cb0d..ce43bc6 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket { typedef struct _drm_intel_bufmgr_gem { drm_intel_bufmgr bufmgr; + atomic_t refcount; + int fd; int max_relocs; @@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem { int num_buckets; time_t time; + drmMMListHead managers; + drmMMListHead named; drmMMListHead vma_cache; int vma_count, vma_open, vma_max; @@ -3186,6 +3190,65 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, bo_gem->aub_annotation_count = count; } +static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER; +static drmMMListHead bufmgr_list = { NULL, NULL }; + +static drm_intel_bufmgr_gem * +drm_intel_bufmgr_gem_find_or_create_for_fd(int fd, int *found) +{ + drm_intel_bufmgr_gem *bufmgr_gem; + + assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0); + + if (bufmgr_list.next == NULL) { + DRMINITLISTHEAD(&bufmgr_list); + } else { + DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) { + if (bufmgr_gem->fd == fd) { + atomic_inc(&bufmgr_gem->refcount); + *found = 1; + goto exit; + } + } + } + + bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); + if (bufmgr_gem == NULL) + goto exit; + + bufmgr_gem->fd = fd; + atomic_set(&bufmgr_gem->refcount, 1); + + DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list); + + assert(pthread_mutex_init(&bufmgr_gem->lock, NULL) == 0); + + pthread_mutex_lock(&bufmgr_gem->lock); + + *found = 0; + +exit: + pthread_mutex_unlock(&bufmgr_list_mutex); + + return bufmgr_gem; +} + +static void +drm_intel_bufmgr_gem_unref (drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + + if (atomic_dec_and_test(&bufmgr_gem->refcount)) { + assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0); + + DRMLISTDEL(&bufmgr_gem->managers); + + pthread_mutex_unlock(&bufmgr_list_mutex); + + drm_intel_bufmgr_gem_destroy(bufmgr); + } +} + /** * Initializes the GEM buffer manager, which uses the kernel to allocate, map, * and manage map buffer objections. @@ -3201,16 +3264,9 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) int ret, tmp; bool exec2 = false; - bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); - if (bufmgr_gem == NULL) - return NULL; - - bufmgr_gem->fd = fd; - - if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { - free(bufmgr_gem); - return NULL; - } + bufmgr_gem = drm_intel_bufmgr_gem_find_or_create_for_fd(fd, &ret); + if (bufmgr_gem && ret) + return &bufmgr_gem->bufmgr; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, @@ -3245,7 +3301,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) else if (IS_GEN8(bufmgr_gem->pci_device)) bufmgr_gem->gen = 8; else { - free(bufmgr_gem); + drm_intel_bufmgr_gem_unref(&bufmgr_gem->bufmgr); return NULL; } @@ -3357,7 +3413,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; - bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy; + bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref; bufmgr_gem->bufmgr.debug = 0; bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space; @@ -3373,5 +3429,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) DRMINITLISTHEAD(&bufmgr_gem->vma_cache); bufmgr_gem->vma_max = -1; /* unlimited by default */ + pthread_mutex_unlock(&bufmgr_gem->lock); + return &bufmgr_gem->bufmgr; }