Message ID | 20190103003129.186555-1-shakeelb@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | memcg: localize memcg_kmem_enabled() check | expand |
Hi Shakeel, Thank you for the patch! Yet something to improve: [auto build test ERROR on linus/master] [also build test ERROR on v4.20 next-20190102] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] url: https://github.com/0day-ci/linux/commits/Shakeel-Butt/memcg-localize-memcg_kmem_enabled-check/20190103-120255 config: x86_64-randconfig-x013-201900 (attached as .config) compiler: gcc-7 (Debian 7.3.0-1) 7.3.0 reproduce: # save the attached .config to linux build tree make ARCH=x86_64 All errors (new ones prefixed by >>): mm/page_alloc.c: In function 'free_pages_prepare': >> mm/page_alloc.c:1059:3: error: implicit declaration of function '__memcg_kmem_uncharge'; did you mean 'memcg_kmem_uncharge'? [-Werror=implicit-function-declaration] __memcg_kmem_uncharge(page, order); ^~~~~~~~~~~~~~~~~~~~~ memcg_kmem_uncharge In file included from include/asm-generic/bug.h:5:0, from arch/x86/include/asm/bug.h:83, from include/linux/bug.h:5, from include/linux/mmdebug.h:5, from include/linux/mm.h:9, from mm/page_alloc.c:18: mm/page_alloc.c: In function '__alloc_pages_nodemask': >> mm/page_alloc.c:4553:15: error: implicit declaration of function '__memcg_kmem_charge'; did you mean 'memcg_kmem_charge'? [-Werror=implicit-function-declaration] unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) { ^ include/linux/compiler.h:77:42: note: in definition of macro 'unlikely' # define unlikely(x) __builtin_expect(!!(x), 0) ^ cc1: some warnings being treated as errors vim +1059 mm/page_alloc.c 1024 1025 static __always_inline bool free_pages_prepare(struct page *page, 1026 unsigned int order, bool check_free) 1027 { 1028 int bad = 0; 1029 1030 VM_BUG_ON_PAGE(PageTail(page), page); 1031 1032 trace_mm_page_free(page, order); 1033 1034 /* 1035 * Check tail pages before head page information is cleared to 1036 * avoid checking PageCompound for order-0 pages. 1037 */ 1038 if (unlikely(order)) { 1039 bool compound = PageCompound(page); 1040 int i; 1041 1042 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1043 1044 if (compound) 1045 ClearPageDoubleMap(page); 1046 for (i = 1; i < (1 << order); i++) { 1047 if (compound) 1048 bad += free_tail_pages_check(page, page + i); 1049 if (unlikely(free_pages_check(page + i))) { 1050 bad++; 1051 continue; 1052 } 1053 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1054 } 1055 } 1056 if (PageMappingFlags(page)) 1057 page->mapping = NULL; 1058 if (memcg_kmem_enabled() && PageKmemcg(page)) > 1059 __memcg_kmem_uncharge(page, order); 1060 if (check_free) 1061 bad += free_pages_check(page); 1062 if (bad) 1063 return false; 1064 1065 page_cpupid_reset_last(page); 1066 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1067 reset_page_owner(page, order); 1068 1069 if (!PageHighMem(page)) { 1070 debug_check_no_locks_freed(page_address(page), 1071 PAGE_SIZE << order); 1072 debug_check_no_obj_freed(page_address(page), 1073 PAGE_SIZE << order); 1074 } 1075 arch_free_page(page, order); 1076 kernel_poison_pages(page, 1 << order, 0); 1077 kernel_map_pages(page, 1 << order, 0); 1078 kasan_free_nondeferred_pages(page, order); 1079 1080 return true; 1081 } 1082 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Hi Shakeel, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linus/master] [also build test WARNING on v4.20 next-20190102] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] url: https://github.com/0day-ci/linux/commits/Shakeel-Butt/memcg-localize-memcg_kmem_enabled-check/20190103-120255 config: x86_64-randconfig-x011-201900 (attached as .config) compiler: gcc-7 (Debian 7.3.0-1) 7.3.0 reproduce: # save the attached .config to linux build tree make ARCH=x86_64 All warnings (new ones prefixed by >>): mm/page_alloc.c: In function 'free_pages_prepare': mm/page_alloc.c:1059:3: error: implicit declaration of function '__memcg_kmem_uncharge'; did you mean 'memcg_kmem_uncharge'? [-Werror=implicit-function-declaration] __memcg_kmem_uncharge(page, order); ^~~~~~~~~~~~~~~~~~~~~ memcg_kmem_uncharge In file included from include/asm-generic/bug.h:5:0, from arch/x86/include/asm/bug.h:83, from include/linux/bug.h:5, from include/linux/mmdebug.h:5, from include/linux/mm.h:9, from mm/page_alloc.c:18: mm/page_alloc.c: In function '__alloc_pages_nodemask': mm/page_alloc.c:4553:15: error: implicit declaration of function '__memcg_kmem_charge'; did you mean 'memcg_kmem_charge'? [-Werror=implicit-function-declaration] unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) { ^ include/linux/compiler.h:58:30: note: in definition of macro '__trace_if' if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ ^~~~ >> mm/page_alloc.c:4552:2: note: in expansion of macro 'if' if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && ^~ include/linux/compiler.h:48:24: note: in expansion of macro '__branch_check__' # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) ^~~~~~~~~~~~~~~~ >> mm/page_alloc.c:4553:6: note: in expansion of macro 'unlikely' unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) { ^~~~~~~~ cc1: some warnings being treated as errors vim +/if +4552 mm/page_alloc.c 9cd755587 Mel Gorman 2017-02-24 4493 9cd755587 Mel Gorman 2017-02-24 4494 /* 9cd755587 Mel Gorman 2017-02-24 4495 * This is the 'heart' of the zoned buddy allocator. 9cd755587 Mel Gorman 2017-02-24 4496 */ 9cd755587 Mel Gorman 2017-02-24 4497 struct page * 04ec6264f Vlastimil Babka 2017-07-06 4498 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, 04ec6264f Vlastimil Babka 2017-07-06 4499 nodemask_t *nodemask) 9cd755587 Mel Gorman 2017-02-24 4500 { 9cd755587 Mel Gorman 2017-02-24 4501 struct page *page; 9cd755587 Mel Gorman 2017-02-24 4502 unsigned int alloc_flags = ALLOC_WMARK_LOW; f19360f01 Tetsuo Handa 2017-09-08 4503 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 9cd755587 Mel Gorman 2017-02-24 4504 struct alloc_context ac = { }; 9cd755587 Mel Gorman 2017-02-24 4505 c63ae43ba Michal Hocko 2018-11-16 4506 /* c63ae43ba Michal Hocko 2018-11-16 4507 * There are several places where we assume that the order value is sane c63ae43ba Michal Hocko 2018-11-16 4508 * so bail out early if the request is out of bound. c63ae43ba Michal Hocko 2018-11-16 4509 */ c63ae43ba Michal Hocko 2018-11-16 4510 if (unlikely(order >= MAX_ORDER)) { c63ae43ba Michal Hocko 2018-11-16 4511 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); c63ae43ba Michal Hocko 2018-11-16 4512 return NULL; c63ae43ba Michal Hocko 2018-11-16 4513 } c63ae43ba Michal Hocko 2018-11-16 4514 9cd755587 Mel Gorman 2017-02-24 4515 gfp_mask &= gfp_allowed_mask; f19360f01 Tetsuo Handa 2017-09-08 4516 alloc_mask = gfp_mask; 04ec6264f Vlastimil Babka 2017-07-06 4517 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) 9cd755587 Mel Gorman 2017-02-24 4518 return NULL; 9cd755587 Mel Gorman 2017-02-24 4519 a380b40ab Huaisheng Ye 2018-06-07 4520 finalise_ac(gfp_mask, &ac); 5bb1b1697 Mel Gorman 2016-05-19 4521 6bb154504 Mel Gorman 2018-12-28 4522 /* 6bb154504 Mel Gorman 2018-12-28 4523 * Forbid the first pass from falling back to types that fragment 6bb154504 Mel Gorman 2018-12-28 4524 * memory until all local zones are considered. 6bb154504 Mel Gorman 2018-12-28 4525 */ 0a79cdad5 Mel Gorman 2018-12-28 4526 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); 6bb154504 Mel Gorman 2018-12-28 4527 5117f45d1 Mel Gorman 2009-06-16 4528 /* First allocation attempt */ a9263751e Vlastimil Babka 2015-02-11 4529 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 4fcb09711 Mel Gorman 2016-05-19 4530 if (likely(page)) 4fcb09711 Mel Gorman 2016-05-19 4531 goto out; 4fcb09711 Mel Gorman 2016-05-19 4532 21caf2fc1 Ming Lei 2013-02-22 4533 /* 7dea19f9e Michal Hocko 2017-05-03 4534 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 7dea19f9e Michal Hocko 2017-05-03 4535 * resp. GFP_NOIO which has to be inherited for all allocation requests 7dea19f9e Michal Hocko 2017-05-03 4536 * from a particular context which has been marked by 7dea19f9e Michal Hocko 2017-05-03 4537 * memalloc_no{fs,io}_{save,restore}. 21caf2fc1 Ming Lei 2013-02-22 4538 */ 7dea19f9e Michal Hocko 2017-05-03 4539 alloc_mask = current_gfp_context(gfp_mask); c9ab0c4fb Mel Gorman 2015-11-06 4540 ac.spread_dirty_pages = false; 91fbdc0f8 Andrew Morton 2015-02-11 4541 4741526b8 Mel Gorman 2016-05-19 4542 /* 4741526b8 Mel Gorman 2016-05-19 4543 * Restore the original nodemask if it was potentially replaced with 4741526b8 Mel Gorman 2016-05-19 4544 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4741526b8 Mel Gorman 2016-05-19 4545 */ e47483bca Vlastimil Babka 2017-01-24 4546 if (unlikely(ac.nodemask != nodemask)) 4741526b8 Mel Gorman 2016-05-19 4547 ac.nodemask = nodemask; 16096c25b Vlastimil Babka 2017-01-24 4548 a9263751e Vlastimil Babka 2015-02-11 4549 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 11e33f6a5 Mel Gorman 2009-06-16 4550 4fcb09711 Mel Gorman 2016-05-19 4551 out: c4159a75b Vladimir Davydov 2016-08-08 @4552 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 3d5b7b20b Shakeel Butt 2019-01-02 @4553 unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) { 4949148ad Vladimir Davydov 2016-07-26 4554 __free_pages(page, order); 4949148ad Vladimir Davydov 2016-07-26 4555 page = NULL; 4949148ad Vladimir Davydov 2016-07-26 4556 } 4949148ad Vladimir Davydov 2016-07-26 4557 4fcb09711 Mel Gorman 2016-05-19 4558 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 4fcb09711 Mel Gorman 2016-05-19 4559 11e33f6a5 Mel Gorman 2009-06-16 4560 return page; ^1da177e4 Linus Torvalds 2005-04-16 4561 } d239171e4 Mel Gorman 2009-06-16 4562 EXPORT_SYMBOL(__alloc_pages_nodemask); ^1da177e4 Linus Torvalds 2005-04-16 4563 :::::: The code at line 4552 was first introduced by commit :::::: c4159a75b64c0e67caededf4d7372c1b58a5f42a mm: memcontrol: only mark charged pages with PageKmemcg :::::: TO: Vladimir Davydov <vdavydov@virtuozzo.com> :::::: CC: Linus Torvalds <torvalds@linux-foundation.org> --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
diff --git a/fs/pipe.c b/fs/pipe.c index bdc5d3c0977d..51d5fd8840ab 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -140,8 +140,7 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, struct page *page = buf->page; if (page_count(page) == 1) { - if (memcg_kmem_enabled()) - memcg_kmem_uncharge(page, 0); + memcg_kmem_uncharge(page, 0); __SetPageLocked(page); return 0; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 83ae11cbd12c..e264d5c28781 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1273,12 +1273,12 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); void memcg_kmem_put_cache(struct kmem_cache *cachep); -int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg); #ifdef CONFIG_MEMCG_KMEM -int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); -void memcg_kmem_uncharge(struct page *page, int order); +int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void __memcg_kmem_uncharge(struct page *page, int order); +int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; @@ -1300,6 +1300,26 @@ static inline bool memcg_kmem_enabled(void) return static_branch_unlikely(&memcg_kmem_enabled_key); } +static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +{ + if (memcg_kmem_enabled()) + return __memcg_kmem_charge(page, gfp, order); + return 0; +} + +static inline void memcg_kmem_uncharge(struct page *page, int order) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge(page, order); +} + +static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, + int order, struct mem_cgroup *memcg) +{ + if (memcg_kmem_enabled()) + return __memcg_kmem_charge_memcg(page, gfp, order, memcg); + return 0; +} /* * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4afd5971f2d4..e8ca09920d71 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2557,7 +2557,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep) } /** - * memcg_kmem_charge_memcg: charge a kmem page + * __memcg_kmem_charge_memcg: charge a kmem page * @page: page to charge * @gfp: reclaim mode * @order: allocation order @@ -2565,7 +2565,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep) * * Returns 0 on success, an error code on failure. */ -int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, +int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, struct mem_cgroup *memcg) { unsigned int nr_pages = 1 << order; @@ -2588,24 +2588,24 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, } /** - * memcg_kmem_charge: charge a kmem page to the current memory cgroup + * __memcg_kmem_charge: charge a kmem page to the current memory cgroup * @page: page to charge * @gfp: reclaim mode * @order: allocation order * * Returns 0 on success, an error code on failure. */ -int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { struct mem_cgroup *memcg; int ret = 0; - if (mem_cgroup_disabled() || memcg_kmem_bypass()) + if (memcg_kmem_bypass()) return 0; memcg = get_mem_cgroup_from_current(); if (!mem_cgroup_is_root(memcg)) { - ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); + ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); if (!ret) __SetPageKmemcg(page); } @@ -2613,11 +2613,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) return ret; } /** - * memcg_kmem_uncharge: uncharge a kmem page + * __memcg_kmem_uncharge: uncharge a kmem page * @page: page to uncharge * @order: allocation order */ -void memcg_kmem_uncharge(struct page *page, int order) +void __memcg_kmem_uncharge(struct page *page, int order) { struct mem_cgroup *memcg = page->mem_cgroup; unsigned int nr_pages = 1 << order; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0634fbdef078..d65c337d2257 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1053,7 +1053,7 @@ static __always_inline bool free_pages_prepare(struct page *page, if (PageMappingFlags(page)) page->mapping = NULL; if (memcg_kmem_enabled() && PageKmemcg(page)) - memcg_kmem_uncharge(page, order); + __memcg_kmem_uncharge(page, order); if (check_free) bad += free_pages_check(page); if (bad) @@ -4667,7 +4667,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, out: if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && - unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { + unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) { __free_pages(page, order); page = NULL; } diff --git a/mm/slab.h b/mm/slab.h index 4190c24ef0e9..cde51d7f631f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -276,8 +276,6 @@ static __always_inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { - if (!memcg_kmem_enabled()) - return 0; if (is_root_cache(s)) return 0; return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); @@ -286,8 +284,6 @@ static __always_inline int memcg_charge_slab(struct page *page, static __always_inline void memcg_uncharge_slab(struct page *page, int order, struct kmem_cache *s) { - if (!memcg_kmem_enabled()) - return; memcg_kmem_uncharge(page, order); }
Move the memcg_kmem_enabled() checks into memcg kmem charge/uncharge functions, so, the users don't have to explicitly check that condition. This is purely code cleanup patch without any functional change. Only the order of checks in memcg_charge_slab() can potentially be changed but the functionally it will be same. This should not matter as memcg_charge_slab() is not in the hot path. Signed-off-by: Shakeel Butt <shakeelb@google.com> --- fs/pipe.c | 3 +-- include/linux/memcontrol.h | 28 ++++++++++++++++++++++++---- mm/memcontrol.c | 16 ++++++++-------- mm/page_alloc.c | 4 ++-- mm/slab.h | 4 ---- 5 files changed, 35 insertions(+), 20 deletions(-)