diff mbox series

[i-g-t] i915/gem_mmap_offset: Partial mmap and munmap

Message ID 20240412004255.288046-1-andi.shyti@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [i-g-t] i915/gem_mmap_offset: Partial mmap and munmap | expand

Commit Message

Andi Shyti April 12, 2024, 12:42 a.m. UTC
From: Chris Wilson <chris.p.wilson@linux.intel.com>

Based on a test case developed by Lionel Landwerlin, this exercises
creation of partial mmaps using both direct methods of a partial mmap()
(where the mmap() only covers a portion of the object) and
munmap() to do the same.

Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
---
 tests/intel/gem_mmap_offset.c | 84 +++++++++++++++++++++++++++++++++++
 1 file changed, 84 insertions(+)

Comments

Nirmoy Das April 17, 2024, 1:10 p.m. UTC | #1
On 4/12/2024 2:42 AM, Andi Shyti wrote:
> From: Chris Wilson <chris.p.wilson@linux.intel.com>
>
> Based on a test case developed by Lionel Landwerlin, this exercises
> creation of partial mmaps using both direct methods of a partial mmap()
> (where the mmap() only covers a portion of the object) and
> munmap() to do the same.
>
> Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
> ---
>   tests/intel/gem_mmap_offset.c | 84 +++++++++++++++++++++++++++++++++++
>   1 file changed, 84 insertions(+)
>
> diff --git a/tests/intel/gem_mmap_offset.c b/tests/intel/gem_mmap_offset.c
> index 95d2158ca88f..0ba2f9591f85 100644
> --- a/tests/intel/gem_mmap_offset.c
> +++ b/tests/intel/gem_mmap_offset.c
> @@ -56,6 +56,8 @@
>    * SUBTEST: isolation
>    * SUBTEST: oob-read
>    * SUBTEST: open-flood
> + * SUBTEST: partial-mmap
> + * SUBTEST: partial-unmap
>    * SUBTEST: perf
>    * SUBTEST: pf-nonblock
>    * SUBTEST: ptrace
> @@ -874,6 +876,83 @@ static void blt_coherency(int i915)
>   	igt_assert_f(compare_ok, "Problem with coherency, flush is too late\n");
>   }
>   
> +static void partial_mmap(int i915)
> +{
> +	uint32_t handle;
> +
> +	handle = gem_create(i915, SZ_2M);
> +
> +	for_each_mmap_offset_type(i915, t) {
> +		struct drm_i915_gem_mmap_offset arg = {
> +			.handle = handle,
> +			.flags = t->type,
> +		};
> +		uint32_t *ptr;
> +
> +		if (mmap_offset_ioctl(i915, &arg))
> +			continue;
> +
> +		ptr = mmap(0, SZ_4K, PROT_WRITE, MAP_SHARED, i915, arg.offset);
> +		if (ptr == MAP_FAILED)
> +			continue;
> +
> +		memset(ptr, 0xcc, SZ_4K);
> +		munmap(ptr, SZ_4K);
> +
> +		ptr = mmap(0, SZ_4K, PROT_READ, MAP_SHARED, i915, arg.offset + SZ_2M - SZ_4K);
> +		igt_assert(ptr != MAP_FAILED);
> +
> +		for (uint32_t i = 0; i < SZ_4K / sizeof(uint32_t); i++)
> +			igt_assert_eq_u32(ptr[i], 0);
> +
> +		munmap(ptr, SZ_4K);
> +	}
> +
> +	gem_close(i915, handle);
> +}
> +
> +static void partial_unmap(int i915)
> +{
> +	uint32_t handle;
> +
> +	handle = gem_create(i915, SZ_2M);
> +
> +	for_each_mmap_offset_type(i915, t) {
> +		uint8_t *ptr_a, *ptr_b;
> +
> +		/* mmap the same GEM BO twice */
> +		ptr_a = __mmap_offset(i915, handle, 0, SZ_2M,
> +				PROT_READ | PROT_WRITE,
> +				t->type);
> +		if (!ptr_a)
> +			continue;
> +
> +		ptr_b = __mmap_offset(i915, handle, 0, SZ_2M,
> +				PROT_READ | PROT_WRITE,
> +				t->type);
> +		if (!ptr_b)
> +			continue;
> +
> +		/* unmap the first mapping but the last 4k */
> +		munmap(ptr_a, SZ_2M - SZ_4K);
> +
> +		/* memset that remaining 4k with 0xcc */
> +		memset(ptr_a + SZ_2M - SZ_4K, 0xcc, SZ_4K);
> +
> +		/* memset the first page of the 2Mb with 0xdd */
> +		memset(ptr_b, 0xdd, SZ_4K);
> +
> +		for (uint32_t i = 0; i < SZ_4K; i++)
> +			igt_assert_eq_u32(ptr_a[SZ_2M - SZ_4K + i], 0xcc);
> +
> +		munmap(ptr_a + SZ_2M - SZ_4K, SZ_4K);
> +		memset(ptr_b, 0, SZ_2M);

Do we need this extra memset() ? Otherwise

Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>


> +		munmap(ptr_b, SZ_2M);
> +	}
> +
> +	gem_close(i915, handle);
> +}
> +
>   static int mmap_gtt_version(int i915)
>   {
>   	int gtt_version = -1;
> @@ -931,6 +1010,11 @@ igt_main
>   	igt_subtest_f("open-flood")
>   		open_flood(i915, 20);
>   
> +	igt_subtest_f("partial-mmap")
> +		partial_mmap(i915);
> +	igt_subtest_f("partial-unmap")
> +		partial_unmap(i915);
> +
>   	igt_subtest_with_dynamic("clear") {
>   		for_each_memory_region(r, i915) {
>   			igt_dynamic_f("%s", r->name)
Kamil Konieczny April 17, 2024, 4:11 p.m. UTC | #2
Hi Andi,
On 2024-04-12 at 02:42:55 +0200, Andi Shyti wrote:
> From: Chris Wilson <chris.p.wilson@linux.intel.com>
> 
> Based on a test case developed by Lionel Landwerlin, this exercises
> creation of partial mmaps using both direct methods of a partial mmap()
> (where the mmap() only covers a portion of the object) and
> munmap() to do the same.
> 
> Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
> ---
>  tests/intel/gem_mmap_offset.c | 84 +++++++++++++++++++++++++++++++++++
>  1 file changed, 84 insertions(+)
> 
> diff --git a/tests/intel/gem_mmap_offset.c b/tests/intel/gem_mmap_offset.c
> index 95d2158ca88f..0ba2f9591f85 100644
> --- a/tests/intel/gem_mmap_offset.c
> +++ b/tests/intel/gem_mmap_offset.c
> @@ -56,6 +56,8 @@
>   * SUBTEST: isolation
>   * SUBTEST: oob-read
>   * SUBTEST: open-flood
> + * SUBTEST: partial-mmap
> + * SUBTEST: partial-unmap
>   * SUBTEST: perf
>   * SUBTEST: pf-nonblock
>   * SUBTEST: ptrace
> @@ -874,6 +876,83 @@ static void blt_coherency(int i915)
>  	igt_assert_f(compare_ok, "Problem with coherency, flush is too late\n");
>  }
>  
> +static void partial_mmap(int i915)
> +{
> +	uint32_t handle;
> +
> +	handle = gem_create(i915, SZ_2M);
> +
> +	for_each_mmap_offset_type(i915, t) {
> +		struct drm_i915_gem_mmap_offset arg = {
> +			.handle = handle,
> +			.flags = t->type,
> +		};
> +		uint32_t *ptr;
> +
> +		if (mmap_offset_ioctl(i915, &arg))
> +			continue;
> +
> +		ptr = mmap(0, SZ_4K, PROT_WRITE, MAP_SHARED, i915, arg.offset);
> +		if (ptr == MAP_FAILED)
> +			continue;
> +
> +		memset(ptr, 0xcc, SZ_4K);
> +		munmap(ptr, SZ_4K);
> +
> +		ptr = mmap(0, SZ_4K, PROT_READ, MAP_SHARED, i915, arg.offset + SZ_2M - SZ_4K);
> +		igt_assert(ptr != MAP_FAILED);
> +
> +		for (uint32_t i = 0; i < SZ_4K / sizeof(uint32_t); i++)
> +			igt_assert_eq_u32(ptr[i], 0);
> +
> +		munmap(ptr, SZ_4K);
> +	}
> +
> +	gem_close(i915, handle);
> +}
> +
> +static void partial_unmap(int i915)
> +{
> +	uint32_t handle;
> +
> +	handle = gem_create(i915, SZ_2M);
> +
> +	for_each_mmap_offset_type(i915, t) {
> +		uint8_t *ptr_a, *ptr_b;
> +
> +		/* mmap the same GEM BO twice */
> +		ptr_a = __mmap_offset(i915, handle, 0, SZ_2M,
> +				PROT_READ | PROT_WRITE,
> +				t->type);
> +		if (!ptr_a)
> +			continue;
> +
> +		ptr_b = __mmap_offset(i915, handle, 0, SZ_2M,
> +				PROT_READ | PROT_WRITE,
> +				t->type);
> +		if (!ptr_b)
> +			continue;

Nit here: before continue ptr_a should be unmaped.

Regards,
Kamil

> +
> +		/* unmap the first mapping but the last 4k */
> +		munmap(ptr_a, SZ_2M - SZ_4K);
> +
> +		/* memset that remaining 4k with 0xcc */
> +		memset(ptr_a + SZ_2M - SZ_4K, 0xcc, SZ_4K);
> +
> +		/* memset the first page of the 2Mb with 0xdd */
> +		memset(ptr_b, 0xdd, SZ_4K);
> +
> +		for (uint32_t i = 0; i < SZ_4K; i++)
> +			igt_assert_eq_u32(ptr_a[SZ_2M - SZ_4K + i], 0xcc);
> +
> +		munmap(ptr_a + SZ_2M - SZ_4K, SZ_4K);
> +		memset(ptr_b, 0, SZ_2M);
> +		munmap(ptr_b, SZ_2M);
> +	}
> +
> +	gem_close(i915, handle);
> +}
> +
>  static int mmap_gtt_version(int i915)
>  {
>  	int gtt_version = -1;
> @@ -931,6 +1010,11 @@ igt_main
>  	igt_subtest_f("open-flood")
>  		open_flood(i915, 20);
>  
> +	igt_subtest_f("partial-mmap")
> +		partial_mmap(i915);
> +	igt_subtest_f("partial-unmap")
> +		partial_unmap(i915);
> +
>  	igt_subtest_with_dynamic("clear") {
>  		for_each_memory_region(r, i915) {
>  			igt_dynamic_f("%s", r->name)
> -- 
> 2.43.0
>
diff mbox series

Patch

diff --git a/tests/intel/gem_mmap_offset.c b/tests/intel/gem_mmap_offset.c
index 95d2158ca88f..0ba2f9591f85 100644
--- a/tests/intel/gem_mmap_offset.c
+++ b/tests/intel/gem_mmap_offset.c
@@ -56,6 +56,8 @@ 
  * SUBTEST: isolation
  * SUBTEST: oob-read
  * SUBTEST: open-flood
+ * SUBTEST: partial-mmap
+ * SUBTEST: partial-unmap
  * SUBTEST: perf
  * SUBTEST: pf-nonblock
  * SUBTEST: ptrace
@@ -874,6 +876,83 @@  static void blt_coherency(int i915)
 	igt_assert_f(compare_ok, "Problem with coherency, flush is too late\n");
 }
 
+static void partial_mmap(int i915)
+{
+	uint32_t handle;
+
+	handle = gem_create(i915, SZ_2M);
+
+	for_each_mmap_offset_type(i915, t) {
+		struct drm_i915_gem_mmap_offset arg = {
+			.handle = handle,
+			.flags = t->type,
+		};
+		uint32_t *ptr;
+
+		if (mmap_offset_ioctl(i915, &arg))
+			continue;
+
+		ptr = mmap(0, SZ_4K, PROT_WRITE, MAP_SHARED, i915, arg.offset);
+		if (ptr == MAP_FAILED)
+			continue;
+
+		memset(ptr, 0xcc, SZ_4K);
+		munmap(ptr, SZ_4K);
+
+		ptr = mmap(0, SZ_4K, PROT_READ, MAP_SHARED, i915, arg.offset + SZ_2M - SZ_4K);
+		igt_assert(ptr != MAP_FAILED);
+
+		for (uint32_t i = 0; i < SZ_4K / sizeof(uint32_t); i++)
+			igt_assert_eq_u32(ptr[i], 0);
+
+		munmap(ptr, SZ_4K);
+	}
+
+	gem_close(i915, handle);
+}
+
+static void partial_unmap(int i915)
+{
+	uint32_t handle;
+
+	handle = gem_create(i915, SZ_2M);
+
+	for_each_mmap_offset_type(i915, t) {
+		uint8_t *ptr_a, *ptr_b;
+
+		/* mmap the same GEM BO twice */
+		ptr_a = __mmap_offset(i915, handle, 0, SZ_2M,
+				PROT_READ | PROT_WRITE,
+				t->type);
+		if (!ptr_a)
+			continue;
+
+		ptr_b = __mmap_offset(i915, handle, 0, SZ_2M,
+				PROT_READ | PROT_WRITE,
+				t->type);
+		if (!ptr_b)
+			continue;
+
+		/* unmap the first mapping but the last 4k */
+		munmap(ptr_a, SZ_2M - SZ_4K);
+
+		/* memset that remaining 4k with 0xcc */
+		memset(ptr_a + SZ_2M - SZ_4K, 0xcc, SZ_4K);
+
+		/* memset the first page of the 2Mb with 0xdd */
+		memset(ptr_b, 0xdd, SZ_4K);
+
+		for (uint32_t i = 0; i < SZ_4K; i++)
+			igt_assert_eq_u32(ptr_a[SZ_2M - SZ_4K + i], 0xcc);
+
+		munmap(ptr_a + SZ_2M - SZ_4K, SZ_4K);
+		memset(ptr_b, 0, SZ_2M);
+		munmap(ptr_b, SZ_2M);
+	}
+
+	gem_close(i915, handle);
+}
+
 static int mmap_gtt_version(int i915)
 {
 	int gtt_version = -1;
@@ -931,6 +1010,11 @@  igt_main
 	igt_subtest_f("open-flood")
 		open_flood(i915, 20);
 
+	igt_subtest_f("partial-mmap")
+		partial_mmap(i915);
+	igt_subtest_f("partial-unmap")
+		partial_unmap(i915);
+
 	igt_subtest_with_dynamic("clear") {
 		for_each_memory_region(r, i915) {
 			igt_dynamic_f("%s", r->name)