@@ -6,6 +6,7 @@
*/
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
+#include <linux/kmsan-checks.h>
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
@@ -174,6 +175,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
+ kmsan_unpoison_shadow(&(x), sizeof(*(ptr))); \
__builtin_expect(__ret_gu, 0); \
})
@@ -248,6 +250,7 @@ extern void __put_user_8(void);
__chk_user_ptr(ptr); \
might_fault(); \
__pu_val = x; \
+ kmsan_check_memory(&(__pu_val), sizeof(*(ptr))); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_x(1, __pu_val, ptr, __ret_pu); \
@@ -270,7 +273,9 @@ extern void __put_user_8(void);
#define __put_user_size(x, ptr, size, label) \
do { \
+ __typeof__(*(ptr)) __pus_val = x; \
__chk_user_ptr(ptr); \
+ kmsan_check_memory(&(__pus_val), size); \
switch (size) { \
case 1: \
__put_user_goto(x, ptr, "b", "b", "iq", label); \
@@ -295,7 +300,9 @@ do { \
*/
#define __put_user_size_ex(x, ptr, size) \
do { \
+ __typeof__(*(ptr)) __puse_val = x; \
__chk_user_ptr(ptr); \
+ kmsan_check_memory(&(__puse_val), size); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
@@ -363,6 +370,7 @@ do { \
default: \
(x) = __get_user_bad(); \
} \
+ kmsan_unpoison_shadow(&(x), size); \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -413,6 +421,7 @@ do { \
default: \
(x) = __get_user_bad(); \
} \
+ kmsan_unpoison_shadow(&(x), size); \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -433,6 +442,7 @@ do { \
__typeof__(ptr) __pu_ptr = (ptr); \
__typeof__(size) __pu_size = (size); \
__uaccess_begin(); \
+ kmsan_check_memory(&(__pu_val), size); \
__put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
__pu_err = 0; \
__pu_label: \
@@ -4,6 +4,7 @@
/* Keep includes the same across arches. */
#include <linux/mm.h>
+#include <linux/kmsan-checks.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
@@ -99,6 +100,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ kmsan_check_memory(src, len); \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
@@ -106,7 +108,10 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+ do { \
+ memcpy(dst, src, len); \
+ kmsan_unpoison_shadow(dst, len); \
+ } while (0)
#endif
#endif /* __ASM_CACHEFLUSH_H */
@@ -142,7 +142,11 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
+ int n;
+
+ n = raw_copy_to_user(ptr, x, size);
+ kmsan_copy_to_user(ptr, x, size, n);
+ return unlikely(n) ? -EFAULT : 0;
}
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
@@ -203,7 +207,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
+ int res;
+
+ res = raw_copy_from_user(x, ptr, size);
+ kmsan_unpoison_shadow(x, size - res);
+ return unlikely(res) ? -EFAULT : 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -5,6 +5,7 @@
#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
+#include <linux/kmsan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,18 +59,26 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res;
+
instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, n - res);
+ return res;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res;
+
might_fault();
instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, n - res);
+ return res;
}
/**
@@ -88,18 +97,26 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
+ unsigned long res;
+
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
- return raw_copy_to_user(to, from, n);
+ res = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user((const void *)to, from, n, res);
+ return res;
}
static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ unsigned long res;
+
might_fault();
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
- return raw_copy_to_user(to, from, n);
+ res = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user((const void *)to, from, n, res);
+ return res;
}
#ifdef INLINE_COPY_FROM_USER
@@ -107,10 +124,12 @@ static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
+
might_fault();
if (likely(access_ok(from, n))) {
instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, n - res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -125,12 +144,15 @@ _copy_from_user(void *, const void __user *, unsigned long);
static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ unsigned long res = n;
+
might_fault();
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ res = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, n, res);
}
- return n;
+ return res;
}
#else
extern __must_check unsigned long
@@ -138,20 +138,26 @@
static int copyout(void __user *to, const void *from, size_t n)
{
+ int res;
+
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ res = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, n, res);
}
- return n;
+ return res;
}
static int copyin(void *to, const void __user *from, size_t n)
{
+ size_t res;
+
if (access_ok(from, n)) {
instrument_copy_from_user(to, from, n);
- n = raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, n - res);
}
- return n;
+ return res;
}
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/instrumented.h>
+#include <linux/kmsan-checks.h>
#include <linux/uaccess.h>
/* out-of-line parts */
@@ -13,6 +14,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
if (likely(access_ok(from, n))) {
instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, n - res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -24,12 +26,14 @@ EXPORT_SYMBOL(_copy_from_user);
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ unsigned long res;
might_fault();
if (likely(access_ok(to, n))) {
instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ res = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, n, res);
}
- return n;
+ return res;
}
EXPORT_SYMBOL(_copy_to_user);
#endif
Memory that is copied from userspace must be unpoisoned. Before copying memory to userspace, check it and report an error if it contains uninitialized bits. Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Marco Elver <elver@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: linux-mm@kvack.org --- v3: - fixed compilation errors reported by kbuild test bot v4: - minor variable fixes as requested by Andrey Konovalov - simplified code around copy_to_user() hooks v5: - fixed use of uninitialized value spotted by kbuild test robot <lkp@intel.com> Change-Id: I38428b9c7d1909b8441dcec1749b080494a7af99 --- arch/x86/include/asm/uaccess.h | 10 ++++++++++ include/asm-generic/cacheflush.h | 7 ++++++- include/asm-generic/uaccess.h | 12 +++++++++-- include/linux/uaccess.h | 34 ++++++++++++++++++++++++++------ lib/iov_iter.c | 14 +++++++++---- lib/usercopy.c | 8 ++++++-- 6 files changed, 70 insertions(+), 15 deletions(-)