@@ -4392,7 +4392,9 @@ module_init(slab_proc_init);
#ifdef CONFIG_HARDENED_USERCOPY
/*
- * Rejects objects that are incorrectly sized.
+ * Rejects incorrectly sized objects and objects that are to be copied
+ * to/from userspace but do not fall entirely within the containing slab
+ * cache's usercopy region.
*
* Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error.
@@ -4412,11 +4414,29 @@ int __check_heap_object(const void *ptr, unsigned long n, struct page *page,
/* Find offset within object. */
offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
- /* Allow address range falling entirely within object size. */
- if (offset <= cachep->object_size && n <= cachep->object_size - offset)
- return 0;
+ /* Make sure object falls entirely within cache's usercopy region. */
+ if (offset < cachep->useroffset ||
+ offset - cachep->useroffset > cachep->usersize ||
+ n > cachep->useroffset - offset + cachep->usersize) {
+ /*
+ * If the copy is still within the allocated object, produce
+ * a warning instead of rejecting the copy. This is intended
+ * to be a temporary method to find any missing usercopy
+ * whitelists.
+ */
+ if (offset <= cachep->object_size &&
+ n <= cachep->object_size - offset) {
+ WARN_ONCE(1, "unexpected usercopy %s with bad or missing whitelist with SLAB object '%s' (offset %lu, size %lu)",
+ to_user ? "exposure" : "overwrite",
+ cachep->name, offset, n);
+ return 0;
+ }
- return report_usercopy("SLAB object", cachep->name, to_user, offset, n);
+ return report_usercopy("SLAB object", cachep->name, to_user,
+ offset, n);
+ }
+
+ return 0;
}
#endif /* CONFIG_HARDENED_USERCOPY */
@@ -3813,7 +3813,9 @@ EXPORT_SYMBOL(__kmalloc_node);
#ifdef CONFIG_HARDENED_USERCOPY
/*
- * Rejects objects that are incorrectly sized.
+ * Rejects incorrectly sized objects and objects that are to be copied
+ * to/from userspace but do not fall entirely within the containing slab
+ * cache's usercopy region.
*
* Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error.
@@ -3823,11 +3825,9 @@ int __check_heap_object(const void *ptr, unsigned long n, struct page *page,
{
struct kmem_cache *s;
unsigned long offset;
- size_t object_size;
/* Find object and usable object size. */
s = page->slab_cache;
- object_size = slab_ksize(s);
/* Reject impossible pointers. */
if (ptr < page_address(page))
@@ -3845,11 +3845,31 @@ int __check_heap_object(const void *ptr, unsigned long n, struct page *page,
offset -= s->red_left_pad;
}
- /* Allow address range falling entirely within object size. */
- if (offset <= object_size && n <= object_size - offset)
- return 0;
+ /* Make sure object falls entirely within cache's usercopy region. */
+ if (offset < s->useroffset ||
+ offset - s->useroffset > s->usersize ||
+ n > s->useroffset - offset + s->usersize) {
+ size_t object_size;
- return report_usercopy("SLUB object", s->name, to_user, offset, n);
+ /*
+ * If the copy is still within the allocated object, produce
+ * a warning instead of rejecting the copy. This is intended
+ * to be a temporary method to find any missing usercopy
+ * whitelists.
+ */
+ object_size = slab_ksize(s);
+ if ((offset <= object_size && n <= object_size - offset)) {
+ WARN_ONCE(1, "unexpected usercopy %s with bad or missing whitelist with SLUB object '%s' (offset %lu size %lu)",
+ to_user ? "exposure" : "overwrite",
+ s->name, offset, n);
+ return 0;
+ }
+
+ return report_usercopy("SLUB object", s->name, to_user,
+ offset, n);
+ }
+
+ return 0;
}
#endif /* CONFIG_HARDENED_USERCOPY */
@@ -58,6 +58,18 @@ static noinline int check_stack_object(const void *obj, unsigned long len)
return GOOD_STACK;
}
+/*
+ * If this function is reached, then CONFIG_HARDENED_USERCOPY has found an
+ * unexpected state during a copy_from_user() or copy_to_user() call.
+ * There are several checks being performed on the buffer by the
+ * __check_object_size() function. Normal stack buffer usage should never
+ * trip the checks, and kernel text addressing will always trip the check.
+ * For cache objects, it is checking that only the whitelisted range of
+ * bytes for a given cache is being accessed (via the cache's usersize and
+ * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
+ * kmem_cache_create_usercopy() function to create the cache (and
+ * carefully audit the whitelist range).
+ */
int report_usercopy(const char *name, const char *detail, bool to_user,
unsigned long offset, unsigned long len)
{