From patchwork Fri Aug 9 22:26:10 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matthew Auld X-Patchwork-Id: 11087761 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A5468912 for ; Fri, 9 Aug 2019 22:27:08 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 92B18205FD for ; Fri, 9 Aug 2019 22:27:08 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 871D921FAC; Fri, 9 Aug 2019 22:27:08 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id EA11E205FD for ; Fri, 9 Aug 2019 22:27:07 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id BF0B16EEED; Fri, 9 Aug 2019 22:26:56 +0000 (UTC) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by gabe.freedesktop.org (Postfix) with ESMTPS id 8F6FC6EEE7; Fri, 9 Aug 2019 22:26:52 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 09 Aug 2019 15:26:52 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,366,1559545200"; d="scan'208";a="176926986" Received: from jmath3-mobl1.ger.corp.intel.com (HELO mwahaha-bdw.ger.corp.intel.com) ([10.252.5.86]) by fmsmga007.fm.intel.com with ESMTP; 09 Aug 2019 15:26:50 -0700 From: Matthew Auld To: intel-gfx@lists.freedesktop.org Subject: [PATCH v3 04/37] drm/i915/region: support continuous allocations Date: Fri, 9 Aug 2019 23:26:10 +0100 Message-Id: <20190809222643.23142-5-matthew.auld@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190809222643.23142-1-matthew.auld@intel.com> References: <20190809222643.23142-1-matthew.auld@intel.com> MIME-Version: 1.0 X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Abdiel Janulgue , dri-devel@lists.freedesktop.org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" X-Virus-Scanned: ClamAV using ClamSMTP Some objects may need to be allocated as a continuous block, thinking ahead the various kernel io_mapping interfaces seem to expect it. Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Abdiel Janulgue --- .../gpu/drm/i915/gem/i915_gem_object_types.h | 4 + drivers/gpu/drm/i915/gem/i915_gem_region.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_region.h | 3 +- .../drm/i915/selftests/intel_memory_region.c | 152 +++++++++++++++++- drivers/gpu/drm/i915/selftests/mock_region.c | 5 +- 5 files changed, 166 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 5e2fa37e9bc0..eb92243d473b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -116,6 +116,10 @@ struct drm_i915_gem_object { I915_SELFTEST_DECLARE(struct list_head st_link); + unsigned long flags; +#define I915_BO_ALLOC_CONTIGUOUS BIT(0) +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS) + /* * Is the object to be mapped as read-only to the GPU * Only honoured if hardware has relevant pte bit diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index be126e70c90f..d9cd722b5dbf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -42,6 +42,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) return -ENOMEM; } + if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) + flags = I915_ALLOC_CONTIGUOUS; + ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks); if (ret) goto err_free_sg; @@ -98,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) } void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, - struct intel_memory_region *mem) + struct intel_memory_region *mem, + unsigned long flags) { INIT_LIST_HEAD(&obj->mm.blocks); obj->mm.region= mem; + obj->flags = flags; mutex_lock(&mem->obj_lock); list_add(&obj->mm.region_link, &mem->objects); @@ -125,6 +130,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem, if (!mem) return ERR_PTR(-ENODEV); + if (flags & ~I915_BO_ALLOC_FLAGS) + return ERR_PTR(-EINVAL); + size = round_up(size, mem->min_page_size); GEM_BUG_ON(!size); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h index ebddc86d78f7..f2ff6f8bff74 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h @@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, struct sg_table *pages); void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, - struct intel_memory_region *mem); + struct intel_memory_region *mem, + unsigned long flags); void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 2f13e4c1d999..70b467d4e811 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -81,17 +81,17 @@ static int igt_mock_fill(void *arg) static void igt_mark_evictable(struct drm_i915_gem_object *obj) { - i915_gem_object_unpin_pages(obj); + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); obj->mm.madv = I915_MADV_DONTNEED; list_move(&obj->mm.region_link, &obj->mm.region->purgeable); } -static int igt_mock_shrink(void *arg) +static int igt_frag_region(struct intel_memory_region *mem, + struct list_head *objects) { - struct intel_memory_region *mem = arg; struct drm_i915_gem_object *obj; unsigned long n_objects; - LIST_HEAD(objects); resource_size_t target; resource_size_t total; int err = 0; @@ -109,7 +109,7 @@ static int igt_mock_shrink(void *arg) goto err_close_objects; } - list_add(&obj->st_link, &objects); + list_add(&obj->st_link, objects); err = i915_gem_object_pin_pages(obj); if (err) @@ -123,6 +123,39 @@ static int igt_mock_shrink(void *arg) igt_mark_evictable(obj); } + return 0; + +err_close_objects: + close_objects(objects); + return err; +} + +static void igt_defrag_region(struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, objects, st_link) { + if (obj->mm.madv == I915_MADV_WILLNEED) + igt_mark_evictable(obj); + } +} + +static int igt_mock_shrink(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_gem_object *obj; + LIST_HEAD(objects); + resource_size_t target; + resource_size_t total; + int err; + + err = igt_frag_region(mem, &objects); + if (err) + return err; + + total = resource_size(&mem->region); + target = mem->mm.chunk_size; + while (target <= total / 2) { obj = i915_gem_object_create_region(mem, target, 0); if (IS_ERR(obj)) { @@ -154,11 +187,120 @@ static int igt_mock_shrink(void *arg) return err; } +static int igt_mock_continuous(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_gem_object *obj; + LIST_HEAD(objects); + resource_size_t target; + resource_size_t total; + int err; + + err = igt_frag_region(mem, &objects); + if (err) + return err; + + total = resource_size(&mem->region); + target = total / 2; + + /* + * Sanity check that we can allocate all of the available fragmented + * space. + */ + obj = i915_gem_object_create_region(mem, target, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to allocate available space\n"); + goto err_close_objects; + } + + igt_mark_evictable(obj); + + /* Try the smallest possible size -- should succeed */ + obj = i915_gem_object_create_region(mem, mem->mm.chunk_size, + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to allocate smallest possible size\n"); + goto err_close_objects; + } + + igt_mark_evictable(obj); + + if (obj->mm.pages->nents != 1) { + pr_err("[1]object spans multiple sg entries\n"); + err = -EINVAL; + goto err_close_objects; + } + + /* + * Even though there is enough free space for the allocation, we + * shouldn't be able to allocate it, given that it is fragmented, and + * non-continuous. + */ + obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (!err) { + pr_err("expected allocation to fail\n"); + err = -EINVAL; + goto err_close_objects; + } + + igt_defrag_region(&objects); + + /* Should now succeed */ + obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to allocate from defraged area\n"); + goto err_close_objects; + } + + if (obj->mm.pages->nents != 1) { + pr_err("object spans multiple sg entries\n"); + err = -EINVAL; + } + +err_close_objects: + close_objects(&objects); + + return err; +} + int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_mock_fill), SUBTEST(igt_mock_shrink), + SUBTEST(igt_mock_continuous), }; struct intel_memory_region *mem; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index cc97250dca62..d73f37712c44 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -23,6 +23,9 @@ mock_object_create(struct intel_memory_region *mem, struct drm_i915_gem_object *obj; unsigned int cache_level; + if (flags & I915_BO_ALLOC_CONTIGUOUS) + size = roundup_pow_of_two(size); + if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size) return ERR_PTR(-E2BIG); @@ -38,7 +41,7 @@ mock_object_create(struct intel_memory_region *mem, cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); - i915_gem_object_init_memory_region(obj, mem); + i915_gem_object_init_memory_region(obj, mem, flags); return obj; }