diff options
author | Alexander Potapenko <[email protected]> | 2023-04-13 15:12:23 +0200 |
---|---|---|
committer | Andrew Morton <[email protected]> | 2023-04-18 16:30:10 -0700 |
commit | d905ae2b0f7eaf8fb37febfe4833ccf3f8c1c27a (patch) | |
tree | a631e3fab4646d3e6a0e45051e6b6ae9b9306dd7 | |
parent | bb1508c24c9c361e6344308c8de2cb81d7f228ba (diff) |
mm: apply __must_check to vmap_pages_range_noflush()
To prevent errors when vmap_pages_range_noflush() or
__vmap_pages_range_noflush() silently fail (see the link below for an
example), annotate them with __must_check so that the callers do not
unconditionally assume the mapping succeeded.
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Alexander Potapenko <[email protected]>
Reported-by: Dipanjan Das <[email protected]>
Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Reviewed-by: Marco Elver <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Uladzislau Rezki (Sony) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r-- | mm/internal.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/internal.h b/mm/internal.h index 92ddd3a05b74..6483db57a31f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -885,7 +885,7 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, */ #ifdef CONFIG_MMU void __init vmalloc_init(void); -int vmap_pages_range_noflush(unsigned long addr, unsigned long end, +int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift); #else static inline void vmalloc_init(void) @@ -893,16 +893,16 @@ static inline void vmalloc_init(void) } static inline -int vmap_pages_range_noflush(unsigned long addr, unsigned long end, +int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { return -EINVAL; } #endif -int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift); +int __must_check __vmap_pages_range_noflush(unsigned long addr, + unsigned long end, pgprot_t prot, + struct page **pages, unsigned int page_shift); void vunmap_range_noflush(unsigned long start, unsigned long end); |