diff mbox series

[v2,4/4] mm: apply __must_check to vmap_pages_range_noflush()

Message ID 20230413131223.4135168-4-glider@google.com (mailing list archive)
State New
Headers show
Series [v2,1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() | expand

Commit Message

Alexander Potapenko April 13, 2023, 1:12 p.m. UTC
To prevent errors when vmap_pages_range_noflush() or
__vmap_pages_range_noflush() silently fail (see the link below for an
example), annotate them with __must_check so that the callers do not
unconditionally assume the mapping succeeded.

Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Signed-off-by: Alexander Potapenko <glider@google.com>
---
 mm/internal.h | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

Marco Elver April 18, 2023, 10:10 a.m. UTC | #1
On Thu, 13 Apr 2023 at 15:12, Alexander Potapenko <glider@google.com> wrote:
>
> To prevent errors when vmap_pages_range_noflush() or
> __vmap_pages_range_noflush() silently fail (see the link below for an
> example), annotate them with __must_check so that the callers do not
> unconditionally assume the mapping succeeded.
>
> Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
> Signed-off-by: Alexander Potapenko <glider@google.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  mm/internal.h | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 7920a8b7982ec..a646cf7c41e8a 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -833,20 +833,20 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
>   * mm/vmalloc.c
>   */
>  #ifdef CONFIG_MMU
> -int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> -                pgprot_t prot, struct page **pages, unsigned int page_shift);
> +int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> +               pgprot_t prot, struct page **pages, unsigned int page_shift);
>  #else
>  static inline
> -int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> -                pgprot_t prot, struct page **pages, unsigned int page_shift)
> +int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> +               pgprot_t prot, struct page **pages, unsigned int page_shift)
>  {
>         return -EINVAL;
>  }
>  #endif
>
> -int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> -                              pgprot_t prot, struct page **pages,
> -                              unsigned int page_shift);
> +int __must_check __vmap_pages_range_noflush(
> +       unsigned long addr, unsigned long end, pgprot_t prot,
> +       struct page **pages, unsigned int page_shift);
>
>  void vunmap_range_noflush(unsigned long start, unsigned long end);
>
> --
> 2.40.0.577.gac1e443424-goog
>
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 7920a8b7982ec..a646cf7c41e8a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -833,20 +833,20 @@  size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
  * mm/vmalloc.c
  */
 #ifdef CONFIG_MMU
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
-                pgprot_t prot, struct page **pages, unsigned int page_shift);
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+		pgprot_t prot, struct page **pages, unsigned int page_shift);
 #else
 static inline
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
-                pgprot_t prot, struct page **pages, unsigned int page_shift)
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+		pgprot_t prot, struct page **pages, unsigned int page_shift)
 {
 	return -EINVAL;
 }
 #endif
 
-int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
-			       pgprot_t prot, struct page **pages,
-			       unsigned int page_shift);
+int __must_check __vmap_pages_range_noflush(
+	unsigned long addr, unsigned long end, pgprot_t prot,
+	struct page **pages, unsigned int page_shift);
 
 void vunmap_range_noflush(unsigned long start, unsigned long end);