@@ -47,8 +47,17 @@
#define STRIDE (WIDTH*4)
#define HEIGHT 512
#define SIZE (HEIGHT*STRIDE)
-
-static bool uses_full_ppgtt(int fd)
+#define K (1024l)
+#define M (1024l * K)
+#define G (1024l * M)
+#define LOCAL_EXEC_OBJECT_SUPPORTS_48BADDRESS (1<<3)
+/*
+ * 0 - No PPGTT
+ * 1 - Aliasing PPGTT
+ * 2 - Full PPGTT (32b)
+ * 3 - Full PPGTT (48b)
+ */
+static bool __uses_full_ppgtt(int fd, int min)
{
struct drm_i915_getparam gp;
int val = 0;
@@ -61,7 +70,17 @@ static bool uses_full_ppgtt(int fd)
return 0;
errno = 0;
- return val > 1;
+ return val >= min;
+}
+
+static bool uses_full_ppgtt(int fd)
+{
+ return __uses_full_ppgtt(fd, 2);
+}
+
+static bool uses_48b_full_ppgtt(int fd)
+{
+ return __uses_full_ppgtt(fd, 3);
}
static drm_intel_bo *create_bo(drm_intel_bufmgr *bufmgr,
@@ -216,7 +235,7 @@ static void surfaces_check(dri_bo **bo, int count, uint32_t expected)
}
}
-static uint64_t exec_and_get_offset(int fd, uint32_t batch)
+static uint64_t exec_and_get_offset(int fd, uint32_t batch, bool needs_32b_addr)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec[1];
@@ -226,6 +245,7 @@ static uint64_t exec_and_get_offset(int fd, uint32_t batch)
memset(exec, 0, sizeof(exec));
exec[0].handle = batch;
+ exec[0].flags = (needs_32b_addr) ? 0 : LOCAL_EXEC_OBJECT_SUPPORTS_48BADDRESS;
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = (uintptr_t)exec;
@@ -252,7 +272,7 @@ static void flink_and_close(void)
fd2 = drm_open_any();
flinked_bo = gem_open(fd2, name);
- offset = exec_and_get_offset(fd2, flinked_bo);
+ offset = exec_and_get_offset(fd2, flinked_bo, 0);
gem_sync(fd2, flinked_bo);
gem_close(fd2, flinked_bo);
@@ -260,7 +280,7 @@ static void flink_and_close(void)
* same size should get the same offset
*/
new_bo = gem_create(fd2, 4096);
- offset_new = exec_and_get_offset(fd2, new_bo);
+ offset_new = exec_and_get_offset(fd2, new_bo, 0);
gem_close(fd2, new_bo);
igt_assert_eq(offset, offset_new);
@@ -270,6 +290,124 @@ static void flink_and_close(void)
close(fd2);
}
+static bool is_32b(uint64_t offset)
+{
+ return (offset < (1ULL << 32));
+}
+
+static void reusebo_and_compare_offsets(uint32_t fd,
+ uint64_t buf_size)
+{
+ uint32_t bo;
+ uint64_t offset, offset2;
+
+ bo = gem_create(fd, buf_size);
+ /* support 48b addresses */
+ offset = exec_and_get_offset(fd, bo, 0);
+ gem_sync(fd, bo);
+
+ /* require 32b address */
+ offset2 = exec_and_get_offset(fd, bo, 1);
+ gem_sync(fd, bo);
+
+ igt_assert(is_32b(offset2));
+ igt_assert_neq(offset, offset2);
+ gem_close(fd, bo);
+}
+
+static void flinkbo_and_compare_offsets(uint32_t fd, uint32_t fd2,
+ uint64_t buf_size)
+{
+ uint32_t bo, flinked_bo, name;
+ uint64_t offset, offset2;
+
+ bo = gem_create(fd, buf_size);
+ name = gem_flink(fd, bo);
+
+ /* support 48b addresses */
+ offset = exec_and_get_offset(fd, bo, 0);
+ gem_sync(fd, bo);
+
+ /* require 32b address */
+ flinked_bo = gem_open(fd2, name);
+ offset2 = exec_and_get_offset(fd2, flinked_bo, 1);
+ gem_sync(fd2, flinked_bo);
+
+ igt_assert(is_32b(offset2));
+ igt_assert_neq(offset, offset2);
+ gem_close(fd2, flinked_bo);
+ gem_close(fd, bo);
+}
+
+static void createbo_and_compare_offsets(uint32_t fd, uint32_t fd2,
+ uint64_t buf_size,
+ bool needs_32b, bool needs_32b2)
+{
+ uint32_t bo, bo2;
+ uint64_t offset, offset2;
+
+ bo = gem_create(fd, buf_size);
+ offset = exec_and_get_offset(fd, bo, needs_32b);
+ gem_sync(fd, bo);
+
+ bo2 = gem_create(fd2, buf_size);
+ offset2 = exec_and_get_offset(fd2, bo2, needs_32b2);
+ gem_sync(fd2, bo2);
+
+ if (needs_32b == needs_32b2)
+ igt_assert_eq(offset, offset2);
+ else
+ igt_assert_neq(offset, offset2);
+
+
+ /* lower PDPs of each PPGTT are reserved for the objects
+ * requiring this workaround
+ */
+ if (needs_32b)
+ igt_assert(is_32b(offset));
+
+ if (needs_32b2)
+ igt_assert(is_32b(offset2));
+
+ gem_close(fd, bo);
+ gem_close(fd2, bo2);
+}
+
+static void wa_32b_offset_test(void)
+{
+ uint32_t fd, fd2, gb;
+
+ fd = drm_open_any();
+ igt_require(uses_48b_full_ppgtt(fd));
+
+ intel_require_memory(1, 4*G, CHECK_RAM);
+
+ fd2 = drm_open_any();
+
+ for (gb = 1; gb < 4; gb++) {
+ /* same bo test */
+ reusebo_and_compare_offsets(fd, gb*G);
+
+
+ /* allow full addr range */
+ createbo_and_compare_offsets(fd, fd2, gb*G, 0, 0);
+
+ /* limit 32b addr range */
+ createbo_and_compare_offsets(fd, fd2, gb*G, 1, 1);
+
+ /* mixed */
+ createbo_and_compare_offsets(fd, fd2, gb*G, 0, 1);
+ createbo_and_compare_offsets(fd, fd2, gb*G, 1, 0);
+
+ /* flink obj */
+ flinkbo_and_compare_offsets(fd, fd2, gb*G);
+ }
+
+ close(fd);
+ close(fd2);
+}
+
+
#define N_CHILD 8
int main(int argc, char **argv)
{
@@ -302,5 +440,8 @@ int main(int argc, char **argv)
igt_subtest("flink-and-close-vma-leak")
flink_and_close();
+ igt_subtest("wa-32b-offset-test")
+ wa_32b_offset_test();
+
igt_exit();
}
Test EXEC_OBJECT_SUPPORTS_48BADDRESS flag to use +32-bit segment. Driver will try to use lower PDPs of each PPGTT for the objects requiring Wa32bitGeneralStateOffset or Wa32bitInstructionBaseOffset. v2: Add flink cases, (suggested by Daniel Vetter). v3: 48-bit support flab moved to exec_object. Signed-off-by: Michel Thierry <michel.thierry@intel.com> --- tests/gem_ppgtt.c | 153 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 147 insertions(+), 6 deletions(-)