diff mbox series

kmsan: fix memcpy tests

Message ID 20221205132558.63484-1-glider@google.com (mailing list archive)
State New
Headers show
Series kmsan: fix memcpy tests | expand

Commit Message

Alexander Potapenko Dec. 5, 2022, 1:25 p.m. UTC
Recent Clang changes may cause it to delete calls of memcpy(), if the
source is an uninitialized volatile local.
This happens because passing a pointer to a volatile local into memcpy()
discards the volatile qualifier, giving the compiler a free hand to
optimize the memcpy() call away.

To outsmart the compiler, we call __msan_memcpy() instead of memcpy()
in test_memcpy_aligned_to_aligned(), test_memcpy_aligned_to_unaligned()
and test_memcpy_aligned_to_unaligned2(), because it's the behavior of
__msan_memcpy() we are testing here anyway.

Signed-off-by: Alexander Potapenko <glider@google.com>
---
 mm/kmsan/kmsan_test.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

Comments

Marco Elver Dec. 5, 2022, 2:11 p.m. UTC | #1
On Mon, 5 Dec 2022 at 14:26, Alexander Potapenko <glider@google.com> wrote:
>
> Recent Clang changes may cause it to delete calls of memcpy(), if the
> source is an uninitialized volatile local.
> This happens because passing a pointer to a volatile local into memcpy()
> discards the volatile qualifier, giving the compiler a free hand to
> optimize the memcpy() call away.
>
> To outsmart the compiler, we call __msan_memcpy() instead of memcpy()
> in test_memcpy_aligned_to_aligned(), test_memcpy_aligned_to_unaligned()
> and test_memcpy_aligned_to_unaligned2(), because it's the behavior of
> __msan_memcpy() we are testing here anyway.
>
> Signed-off-by: Alexander Potapenko <glider@google.com>

It might be nice to retain memcpy() calls somehow, as that tests
end-to-end that the compiler does the right thing here i.e. replacing
the memcpy() calls with instrumented versions.

Does OPTIMIZER_HIDE_VAR() help? This should prevent the compiler from
seeing it's uninitialized.

> ---
>  mm/kmsan/kmsan_test.c | 16 +++++++++++++---
>  1 file changed, 13 insertions(+), 3 deletions(-)
>
> diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
> index 9a29ea2dbfb9b..8e4f206a900ae 100644
> --- a/mm/kmsan/kmsan_test.c
> +++ b/mm/kmsan/kmsan_test.c
> @@ -406,6 +406,16 @@ static void test_printk(struct kunit *test)
>         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
>  }
>
> +/*
> + * The test_memcpy_xxx tests below should be calling memcpy() to copy an
> + * uninitialized value from a volatile int. But such calls discard the volatile
> + * qualifier, so Clang may optimize them away, breaking the tests.
> + * Because KMSAN instrumentation pass would just replace memcpy() with
> + * __msan_memcpy(), do that explicitly to trick the optimizer into preserving
> + * the calls.
> + */
> +void *__msan_memcpy(void *, const void *, size_t);
> +
>  /*
>   * Test case: ensure that memcpy() correctly copies uninitialized values between
>   * aligned `src` and `dst`.
> @@ -419,7 +429,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test)
>         kunit_info(
>                 test,
>                 "memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
> -       memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
> +       __msan_memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
>         kmsan_check_memory((void *)&dst, sizeof(dst));
>         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
>  }
> @@ -441,7 +451,7 @@ static void test_memcpy_aligned_to_unaligned(struct kunit *test)
>         kunit_info(
>                 test,
>                 "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
> -       memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
> +       __msan_memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
>         kmsan_check_memory((void *)dst, 4);
>         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
>  }
> @@ -464,7 +474,7 @@ static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
>         kunit_info(
>                 test,
>                 "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
> -       memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
> +       __msan_memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
>         kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
>         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
>  }
> --
> 2.39.0.rc0.267.gcb52ba06e7-goog
>
Alexander Potapenko Dec. 5, 2022, 2:49 p.m. UTC | #2
On Mon, Dec 5, 2022 at 3:12 PM Marco Elver <elver@google.com> wrote:

> On Mon, 5 Dec 2022 at 14:26, Alexander Potapenko <glider@google.com>
> wrote:
> >
> > Recent Clang changes may cause it to delete calls of memcpy(), if the
> > source is an uninitialized volatile local.
> > This happens because passing a pointer to a volatile local into memcpy()
> > discards the volatile qualifier, giving the compiler a free hand to
> > optimize the memcpy() call away.
> >
> > To outsmart the compiler, we call __msan_memcpy() instead of memcpy()
> > in test_memcpy_aligned_to_aligned(), test_memcpy_aligned_to_unaligned()
> > and test_memcpy_aligned_to_unaligned2(), because it's the behavior of
> > __msan_memcpy() we are testing here anyway.
> >
> > Signed-off-by: Alexander Potapenko <glider@google.com>
>
> It might be nice to retain memcpy() calls somehow, as that tests
> end-to-end that the compiler does the right thing here i.e. replacing
> the memcpy() calls with instrumented versions.
>
> Does OPTIMIZER_HIDE_VAR() help? This should prevent the compiler from
> seeing it's uninitialized.
>
> It indeed does, thanks!
Let me send a v2.
diff mbox series

Patch

diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
index 9a29ea2dbfb9b..8e4f206a900ae 100644
--- a/mm/kmsan/kmsan_test.c
+++ b/mm/kmsan/kmsan_test.c
@@ -406,6 +406,16 @@  static void test_printk(struct kunit *test)
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
 
+/*
+ * The test_memcpy_xxx tests below should be calling memcpy() to copy an
+ * uninitialized value from a volatile int. But such calls discard the volatile
+ * qualifier, so Clang may optimize them away, breaking the tests.
+ * Because KMSAN instrumentation pass would just replace memcpy() with
+ * __msan_memcpy(), do that explicitly to trick the optimizer into preserving
+ * the calls.
+ */
+void *__msan_memcpy(void *, const void *, size_t);
+
 /*
  * Test case: ensure that memcpy() correctly copies uninitialized values between
  * aligned `src` and `dst`.
@@ -419,7 +429,7 @@  static void test_memcpy_aligned_to_aligned(struct kunit *test)
 	kunit_info(
 		test,
 		"memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
-	memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
+	__msan_memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
 	kmsan_check_memory((void *)&dst, sizeof(dst));
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
@@ -441,7 +451,7 @@  static void test_memcpy_aligned_to_unaligned(struct kunit *test)
 	kunit_info(
 		test,
 		"memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
-	memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+	__msan_memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
 	kmsan_check_memory((void *)dst, 4);
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
@@ -464,7 +474,7 @@  static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
 	kunit_info(
 		test,
 		"memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
-	memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+	__msan_memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
 	kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }