Message ID | 20210430195232.30491-28-michel@lespinasse.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/29] mm: export dump_mm | expand |
Hi Michel, Thank you for the patch! Yet something to improve: [auto build test ERROR on tip/x86/mm] [also build test ERROR on arm64/for-next/core linus/master v5.12] [cannot apply to hnaz-linux-mm/master next-20210430] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Michel-Lespinasse/Speculative-page-faults-anon-vmas-only/20210501-035602 base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git a500fc918f7b8dc3dff2e6c74f3e73e856c18248 config: nios2-randconfig-r014-20210501 (attached as .config) compiler: nios2-linux-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/4c848aa85ff1e5b3a01dde75e9facbe9cb7b8120 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Michel-Lespinasse/Speculative-page-faults-anon-vmas-only/20210501-035602 git checkout 4c848aa85ff1e5b3a01dde75e9facbe9cb7b8120 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross W=1 ARCH=nios2 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All error/warnings (new ones prefixed by >>): In file included from include/linux/mmap_lock.h:10, from include/linux/mm.h:18, from include/linux/pid_namespace.h:7, from include/linux/ptrace.h:10, from arch/nios2/kernel/asm-offsets.c:9: include/linux/vmstat.h: In function '__inc_zone_page_state': >> include/linux/vmstat.h:362:19: error: implicit declaration of function 'page_zone' [-Werror=implicit-function-declaration] 362 | __inc_zone_state(page_zone(page), item); | ^~~~~~~~~ >> include/linux/vmstat.h:362:19: warning: passing argument 1 of '__inc_zone_state' makes pointer from integer without a cast [-Wint-conversion] 362 | __inc_zone_state(page_zone(page), item); | ^~~~~~~~~~~~~~~ | | | int include/linux/vmstat.h:335:50: note: expected 'struct zone *' but argument is of type 'int' 335 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | ~~~~~~~~~~~~~^~~~ include/linux/vmstat.h: In function '__inc_node_page_state': >> include/linux/vmstat.h:368:19: error: implicit declaration of function 'page_pgdat'; did you mean 'page_private'? [-Werror=implicit-function-declaration] 368 | __inc_node_state(page_pgdat(page), item); | ^~~~~~~~~~ | page_private >> include/linux/vmstat.h:368:19: warning: passing argument 1 of '__inc_node_state' makes pointer from integer without a cast [-Wint-conversion] 368 | __inc_node_state(page_pgdat(page), item); | ^~~~~~~~~~~~~~~~ | | | int include/linux/vmstat.h:341:57: note: expected 'struct pglist_data *' but argument is of type 'int' 341 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) | ~~~~~~~~~~~~~~~~~~~~^~~~~ include/linux/vmstat.h: In function '__dec_zone_page_state': >> include/linux/vmstat.h:375:19: warning: passing argument 1 of '__dec_zone_state' makes pointer from integer without a cast [-Wint-conversion] 375 | __dec_zone_state(page_zone(page), item); | ^~~~~~~~~~~~~~~ | | | int include/linux/vmstat.h:347:50: note: expected 'struct zone *' but argument is of type 'int' 347 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | ~~~~~~~~~~~~~^~~~ include/linux/vmstat.h: In function '__dec_node_page_state': >> include/linux/vmstat.h:381:19: warning: passing argument 1 of '__dec_node_state' makes pointer from integer without a cast [-Wint-conversion] 381 | __dec_node_state(page_pgdat(page), item); | ^~~~~~~~~~~~~~~~ | | | int include/linux/vmstat.h:353:57: note: expected 'struct pglist_data *' but argument is of type 'int' 353 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) | ~~~~~~~~~~~~~~~~~~~~^~~~~ include/linux/vmstat.h: In function '__mod_lruvec_page_state': >> include/linux/vmstat.h:510:24: warning: passing argument 1 of '__mod_node_page_state' makes pointer from integer without a cast [-Wint-conversion] 510 | __mod_node_page_state(page_pgdat(page), idx, val); | ^~~~~~~~~~~~~~~~ | | | int include/linux/vmstat.h:318:62: note: expected 'struct pglist_data *' but argument is of type 'int' 318 | static inline void __mod_node_page_state(struct pglist_data *pgdat, | ~~~~~~~~~~~~~~~~~~~~^~~~~ include/linux/vmstat.h: In function 'mod_lruvec_page_state': include/linux/vmstat.h:516:22: warning: passing argument 1 of '__mod_node_page_state' makes pointer from integer without a cast [-Wint-conversion] 516 | mod_node_page_state(page_pgdat(page), idx, val); | ^~~~~~~~~~~~~~~~ | | | int include/linux/vmstat.h:318:62: note: expected 'struct pglist_data *' but argument is of type 'int' 318 | static inline void __mod_node_page_state(struct pglist_data *pgdat, | ~~~~~~~~~~~~~~~~~~~~^~~~~ In file included from include/linux/pid_namespace.h:7, from include/linux/ptrace.h:10, from arch/nios2/kernel/asm-offsets.c:9: include/linux/mm.h: At top level: >> include/linux/mm.h:1483:28: error: conflicting types for 'page_zone' 1483 | static inline struct zone *page_zone(const struct page *page) | ^~~~~~~~~ In file included from include/linux/mmap_lock.h:10, from include/linux/mm.h:18, from include/linux/pid_namespace.h:7, from include/linux/ptrace.h:10, from arch/nios2/kernel/asm-offsets.c:9: include/linux/vmstat.h:362:19: note: previous implicit declaration of 'page_zone' was here 362 | __inc_zone_state(page_zone(page), item); | ^~~~~~~~~ In file included from include/linux/pid_namespace.h:7, from include/linux/ptrace.h:10, from arch/nios2/kernel/asm-offsets.c:9: >> include/linux/mm.h:1488:26: error: conflicting types for 'page_pgdat' 1488 | static inline pg_data_t *page_pgdat(const struct page *page) | ^~~~~~~~~~ In file included from include/linux/mmap_lock.h:10, from include/linux/mm.h:18, from include/linux/pid_namespace.h:7, from include/linux/ptrace.h:10, from arch/nios2/kernel/asm-offsets.c:9: include/linux/vmstat.h:368:19: note: previous implicit declaration of 'page_pgdat' was here 368 | __inc_node_state(page_pgdat(page), item); | ^~~~~~~~~~ cc1: some warnings being treated as errors make[2]: *** [scripts/Makefile.build:116: arch/nios2/kernel/asm-offsets.s] Error 1 make[2]: Target '__build' not remade because of errors. make[1]: *** [Makefile:1233: prepare0] Error 2 make[1]: Target 'prepare' not remade because of errors. make: *** [Makefile:215: __sub-make] Error 2 make: Target 'prepare' not remade because of errors. vim +/page_zone +362 include/linux/vmstat.h 75ef7184053989 Mel Gorman 2016-07-28 334 7f4599e9cd6bca Christoph Lameter 2006-07-10 @335 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 7f4599e9cd6bca Christoph Lameter 2006-07-10 336 { 7f4599e9cd6bca Christoph Lameter 2006-07-10 337 atomic_long_inc(&zone->vm_stat[item]); 75ef7184053989 Mel Gorman 2016-07-28 338 atomic_long_inc(&vm_zone_stat[item]); 75ef7184053989 Mel Gorman 2016-07-28 339 } 75ef7184053989 Mel Gorman 2016-07-28 340 75ef7184053989 Mel Gorman 2016-07-28 341 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 75ef7184053989 Mel Gorman 2016-07-28 342 { 75ef7184053989 Mel Gorman 2016-07-28 343 atomic_long_inc(&pgdat->vm_stat[item]); 75ef7184053989 Mel Gorman 2016-07-28 344 atomic_long_inc(&vm_node_stat[item]); 7f4599e9cd6bca Christoph Lameter 2006-07-10 345 } 7f4599e9cd6bca Christoph Lameter 2006-07-10 346 c878538598d1e7 Christoph Lameter 2007-02-10 347 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) c878538598d1e7 Christoph Lameter 2007-02-10 348 { c878538598d1e7 Christoph Lameter 2007-02-10 349 atomic_long_dec(&zone->vm_stat[item]); 75ef7184053989 Mel Gorman 2016-07-28 350 atomic_long_dec(&vm_zone_stat[item]); 75ef7184053989 Mel Gorman 2016-07-28 351 } 75ef7184053989 Mel Gorman 2016-07-28 352 75ef7184053989 Mel Gorman 2016-07-28 353 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) 75ef7184053989 Mel Gorman 2016-07-28 354 { 75ef7184053989 Mel Gorman 2016-07-28 355 atomic_long_dec(&pgdat->vm_stat[item]); 75ef7184053989 Mel Gorman 2016-07-28 356 atomic_long_dec(&vm_node_stat[item]); c878538598d1e7 Christoph Lameter 2007-02-10 357 } c878538598d1e7 Christoph Lameter 2007-02-10 358 6a3ed2123a78de Johannes Weiner 2014-04-03 359 static inline void __inc_zone_page_state(struct page *page, 6a3ed2123a78de Johannes Weiner 2014-04-03 360 enum zone_stat_item item) 6a3ed2123a78de Johannes Weiner 2014-04-03 361 { 6a3ed2123a78de Johannes Weiner 2014-04-03 @362 __inc_zone_state(page_zone(page), item); 6a3ed2123a78de Johannes Weiner 2014-04-03 363 } 6a3ed2123a78de Johannes Weiner 2014-04-03 364 75ef7184053989 Mel Gorman 2016-07-28 365 static inline void __inc_node_page_state(struct page *page, 75ef7184053989 Mel Gorman 2016-07-28 366 enum node_stat_item item) 75ef7184053989 Mel Gorman 2016-07-28 367 { 75ef7184053989 Mel Gorman 2016-07-28 @368 __inc_node_state(page_pgdat(page), item); 75ef7184053989 Mel Gorman 2016-07-28 369 } 75ef7184053989 Mel Gorman 2016-07-28 370 75ef7184053989 Mel Gorman 2016-07-28 371 2244b95a7bcf8d Christoph Lameter 2006-06-30 372 static inline void __dec_zone_page_state(struct page *page, 2244b95a7bcf8d Christoph Lameter 2006-06-30 373 enum zone_stat_item item) 2244b95a7bcf8d Christoph Lameter 2006-06-30 374 { 57ce36feb4d128 Uwe Kleine-König 2008-02-25 @375 __dec_zone_state(page_zone(page), item); 2244b95a7bcf8d Christoph Lameter 2006-06-30 376 } 2244b95a7bcf8d Christoph Lameter 2006-06-30 377 75ef7184053989 Mel Gorman 2016-07-28 378 static inline void __dec_node_page_state(struct page *page, 75ef7184053989 Mel Gorman 2016-07-28 379 enum node_stat_item item) 75ef7184053989 Mel Gorman 2016-07-28 380 { 75ef7184053989 Mel Gorman 2016-07-28 @381 __dec_node_state(page_pgdat(page), item); 75ef7184053989 Mel Gorman 2016-07-28 382 } 75ef7184053989 Mel Gorman 2016-07-28 383 75ef7184053989 Mel Gorman 2016-07-28 384 2244b95a7bcf8d Christoph Lameter 2006-06-30 385 /* 2244b95a7bcf8d Christoph Lameter 2006-06-30 386 * We only use atomic operations to update counters. So there is no need to 2244b95a7bcf8d Christoph Lameter 2006-06-30 387 * disable interrupts. 2244b95a7bcf8d Christoph Lameter 2006-06-30 388 */ 2244b95a7bcf8d Christoph Lameter 2006-06-30 389 #define inc_zone_page_state __inc_zone_page_state 2244b95a7bcf8d Christoph Lameter 2006-06-30 390 #define dec_zone_page_state __dec_zone_page_state 2244b95a7bcf8d Christoph Lameter 2006-06-30 391 #define mod_zone_page_state __mod_zone_page_state 2244b95a7bcf8d Christoph Lameter 2006-06-30 392 75ef7184053989 Mel Gorman 2016-07-28 393 #define inc_node_page_state __inc_node_page_state 75ef7184053989 Mel Gorman 2016-07-28 394 #define dec_node_page_state __dec_node_page_state 75ef7184053989 Mel Gorman 2016-07-28 395 #define mod_node_page_state __mod_node_page_state 75ef7184053989 Mel Gorman 2016-07-28 396 6a3ed2123a78de Johannes Weiner 2014-04-03 397 #define inc_zone_state __inc_zone_state 75ef7184053989 Mel Gorman 2016-07-28 398 #define inc_node_state __inc_node_state 6a3ed2123a78de Johannes Weiner 2014-04-03 399 #define dec_zone_state __dec_zone_state 6a3ed2123a78de Johannes Weiner 2014-04-03 400 b44129b30652c8 Mel Gorman 2011-01-13 401 #define set_pgdat_percpu_threshold(pgdat, callback) { } 88f5acf88ae6a9 Mel Gorman 2011-01-13 402 a6cccdc36c966e KOSAKI Motohiro 2011-05-24 403 static inline void refresh_zone_stat_thresholds(void) { } 2bb921e5266565 Christoph Lameter 2013-09-11 404 static inline void cpu_vm_stats_fold(int cpu) { } 0eb77e98803219 Christoph Lameter 2016-01-14 405 static inline void quiet_vmstat(void) { } a6cccdc36c966e KOSAKI Motohiro 2011-05-24 406 5a883813845a2b Minchan Kim 2012-10-08 407 static inline void drain_zonestat(struct zone *zone, 5a883813845a2b Minchan Kim 2012-10-08 408 struct per_cpu_pageset *pset) { } fa25c503dfa203 KOSAKI Motohiro 2011-05-24 409 #endif /* CONFIG_SMP */ fa25c503dfa203 KOSAKI Motohiro 2011-05-24 410 d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 411 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 412 int migratetype) d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 413 { d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 414 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 415 if (is_migrate_cma(migratetype)) d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 416 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 417 } d1ce749a0db122 Bartlomiej Zolnierkiewicz 2012-10-08 418 fa25c503dfa203 KOSAKI Motohiro 2011-05-24 419 extern const char * const vmstat_text[]; 2244b95a7bcf8d Christoph Lameter 2006-06-30 420 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 421 static inline const char *zone_stat_name(enum zone_stat_item item) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 422 { 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 423 return vmstat_text[item]; 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 424 } 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 425 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 426 #ifdef CONFIG_NUMA 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 427 static inline const char *numa_stat_name(enum numa_stat_item item) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 428 { 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 429 return vmstat_text[NR_VM_ZONE_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 430 item]; 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 431 } 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 432 #endif /* CONFIG_NUMA */ 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 433 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 434 static inline const char *node_stat_name(enum node_stat_item item) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 435 { 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 436 return vmstat_text[NR_VM_ZONE_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 437 NR_VM_NUMA_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 438 item]; 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 439 } 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 440 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 441 static inline const char *lru_list_name(enum lru_list lru) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 442 { 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 443 return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 444 } 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 445 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 446 static inline const char *writeback_stat_name(enum writeback_stat_item item) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 447 { 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 448 return vmstat_text[NR_VM_ZONE_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 449 NR_VM_NUMA_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 450 NR_VM_NODE_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 451 item]; 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 452 } 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 453 ebc5d83d044381 Konstantin Khlebnikov 2019-12-04 454 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 455 static inline const char *vm_event_name(enum vm_event_item item) 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 456 { 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 457 return vmstat_text[NR_VM_ZONE_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 458 NR_VM_NUMA_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 459 NR_VM_NODE_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 460 NR_VM_WRITEBACK_STAT_ITEMS + 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 461 item]; 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 462 } ebc5d83d044381 Konstantin Khlebnikov 2019-12-04 463 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ 9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 464 c47d5032ed3002 Shakeel Butt 2020-12-14 465 #ifdef CONFIG_MEMCG c47d5032ed3002 Shakeel Butt 2020-12-14 466 c47d5032ed3002 Shakeel Butt 2020-12-14 467 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, c47d5032ed3002 Shakeel Butt 2020-12-14 468 int val); c47d5032ed3002 Shakeel Butt 2020-12-14 469 c47d5032ed3002 Shakeel Butt 2020-12-14 470 static inline void mod_lruvec_state(struct lruvec *lruvec, c47d5032ed3002 Shakeel Butt 2020-12-14 471 enum node_stat_item idx, int val) c47d5032ed3002 Shakeel Butt 2020-12-14 472 { c47d5032ed3002 Shakeel Butt 2020-12-14 473 unsigned long flags; c47d5032ed3002 Shakeel Butt 2020-12-14 474 c47d5032ed3002 Shakeel Butt 2020-12-14 475 local_irq_save(flags); c47d5032ed3002 Shakeel Butt 2020-12-14 476 __mod_lruvec_state(lruvec, idx, val); c47d5032ed3002 Shakeel Butt 2020-12-14 477 local_irq_restore(flags); c47d5032ed3002 Shakeel Butt 2020-12-14 478 } c47d5032ed3002 Shakeel Butt 2020-12-14 479 c47d5032ed3002 Shakeel Butt 2020-12-14 480 void __mod_lruvec_page_state(struct page *page, c47d5032ed3002 Shakeel Butt 2020-12-14 481 enum node_stat_item idx, int val); c47d5032ed3002 Shakeel Butt 2020-12-14 482 c47d5032ed3002 Shakeel Butt 2020-12-14 483 static inline void mod_lruvec_page_state(struct page *page, c47d5032ed3002 Shakeel Butt 2020-12-14 484 enum node_stat_item idx, int val) c47d5032ed3002 Shakeel Butt 2020-12-14 485 { c47d5032ed3002 Shakeel Butt 2020-12-14 486 unsigned long flags; c47d5032ed3002 Shakeel Butt 2020-12-14 487 c47d5032ed3002 Shakeel Butt 2020-12-14 488 local_irq_save(flags); c47d5032ed3002 Shakeel Butt 2020-12-14 489 __mod_lruvec_page_state(page, idx, val); c47d5032ed3002 Shakeel Butt 2020-12-14 490 local_irq_restore(flags); c47d5032ed3002 Shakeel Butt 2020-12-14 491 } c47d5032ed3002 Shakeel Butt 2020-12-14 492 c47d5032ed3002 Shakeel Butt 2020-12-14 493 #else c47d5032ed3002 Shakeel Butt 2020-12-14 494 c47d5032ed3002 Shakeel Butt 2020-12-14 495 static inline void __mod_lruvec_state(struct lruvec *lruvec, c47d5032ed3002 Shakeel Butt 2020-12-14 496 enum node_stat_item idx, int val) c47d5032ed3002 Shakeel Butt 2020-12-14 497 { c47d5032ed3002 Shakeel Butt 2020-12-14 498 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); c47d5032ed3002 Shakeel Butt 2020-12-14 499 } c47d5032ed3002 Shakeel Butt 2020-12-14 500 c47d5032ed3002 Shakeel Butt 2020-12-14 501 static inline void mod_lruvec_state(struct lruvec *lruvec, c47d5032ed3002 Shakeel Butt 2020-12-14 502 enum node_stat_item idx, int val) c47d5032ed3002 Shakeel Butt 2020-12-14 503 { c47d5032ed3002 Shakeel Butt 2020-12-14 504 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); c47d5032ed3002 Shakeel Butt 2020-12-14 505 } c47d5032ed3002 Shakeel Butt 2020-12-14 506 c47d5032ed3002 Shakeel Butt 2020-12-14 507 static inline void __mod_lruvec_page_state(struct page *page, c47d5032ed3002 Shakeel Butt 2020-12-14 508 enum node_stat_item idx, int val) c47d5032ed3002 Shakeel Butt 2020-12-14 509 { c47d5032ed3002 Shakeel Butt 2020-12-14 @510 __mod_node_page_state(page_pgdat(page), idx, val); c47d5032ed3002 Shakeel Butt 2020-12-14 511 } c47d5032ed3002 Shakeel Butt 2020-12-14 512 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b5c21585e35f..7d8c99023a82 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1331,21 +1331,31 @@ void do_user_addr_fault(struct pt_regs *regs, count_vm_event(SPF_ATTEMPT); seq = mmap_seq_read_start(mm); - if (seq & 1) + if (seq & 1) { + count_vm_spf_event(SPF_ABORT_ODD); goto spf_abort; + } rcu_read_lock(); vma = find_vma(mm, address); - if (!vma || vma->vm_start > address || !vma_is_anonymous(vma)) { + if (!vma || vma->vm_start > address) { rcu_read_unlock(); + count_vm_spf_event(SPF_ABORT_UNMAPPED); + goto spf_abort; + } + if (!vma_is_anonymous(vma)) { + rcu_read_unlock(); + count_vm_spf_event(SPF_ABORT_NO_SPECULATE); goto spf_abort; } pvma = *vma; rcu_read_unlock(); - if (!mmap_seq_read_check(mm, seq)) + if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY)) goto spf_abort; vma = &pvma; - if (unlikely(access_error(error_code, vma))) + if (unlikely(access_error(error_code, vma))) { + count_vm_spf_event(SPF_ABORT_ACCESS_ERROR); goto spf_abort; + } fault = do_handle_mm_fault(vma, address, flags | FAULT_FLAG_SPECULATIVE, seq, regs); diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 8f4eca2d0f43..98f24a9910a9 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -7,6 +7,7 @@ #include <linux/rwsem.h> #include <linux/tracepoint-defs.h> #include <linux/types.h> +#include <linux/vmstat.h> #ifdef CONFIG_SPECULATIVE_PAGE_FAULT #define MMAP_LOCK_SEQ_INITIALIZER(name) \ @@ -104,12 +105,26 @@ static inline unsigned long mmap_seq_read_start(struct mm_struct *mm) return seq; } -static inline bool mmap_seq_read_check(struct mm_struct *mm, unsigned long seq) +static inline bool __mmap_seq_read_check(struct mm_struct *mm, + unsigned long seq) { smp_rmb(); return seq == READ_ONCE(mm->mmap_seq); } -#endif + +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT_STATS +static inline bool mmap_seq_read_check(struct mm_struct *mm, unsigned long seq, + enum vm_event_item fail_event) +{ + if (__mmap_seq_read_check(mm, seq)) + return true; + count_vm_event(fail_event); + return false; +} +#else +#define mmap_seq_read_check(mm, seq, fail) __mmap_seq_read_check(mm, seq) +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT_STATS */ +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ static inline void mmap_write_lock(struct mm_struct *mm) { diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index cc4f8d14e43f..42e57db1623b 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -124,6 +124,29 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_SPECULATIVE_PAGE_FAULT SPF_ATTEMPT, SPF_ABORT, +#endif +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT_STATS + SPF_ABORT_ODD, + SPF_ABORT_UNMAPPED, + SPF_ABORT_NO_SPECULATE, + SPF_ABORT_VMA_COPY, + SPF_ABORT_ACCESS_ERROR, + SPF_ABORT_PUD, + SPF_ABORT_PMD, + SPF_ABORT_ANON_VMA, + SPF_ABORT_PTE_MAP_LOCK_SEQ1, + SPF_ABORT_PTE_MAP_LOCK_PMD, + SPF_ABORT_PTE_MAP_LOCK_PTL, + SPF_ABORT_PTE_MAP_LOCK_SEQ2, + SPF_ABORT_USERFAULTFD, + SPF_ABORT_FAULT, + SPF_ABORT_NON_SWAP_ENTRY, + SPF_ABORT_SWAP_NOPAGE, + SPF_ATTEMPT_ANON, + SPF_ATTEMPT_SWAP, + SPF_ATTEMPT_NUMA, + SPF_ATTEMPT_PTE, + SPF_ATTEMPT_WP, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 506d625163a1..34e05604a93f 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -109,6 +109,12 @@ static inline void vm_events_fold_cpu(int cpu) #endif /* CONFIG_VM_EVENT_COUNTERS */ +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT_STATS +#define count_vm_spf_event(x) count_vm_event(x) +#else +#define count_vm_spf_event(x) do {} while (0) +#endif + #ifdef CONFIG_NUMA_BALANCING #define count_vm_numa_event(x) count_vm_event(x) #define count_vm_numa_events(x, y) count_vm_events(x, y) diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 1e73717802f8..6be8ca7950ee 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -150,3 +150,10 @@ config PTDUMP_DEBUGFS kernel. If in doubt, say N. + +config SPECULATIVE_PAGE_FAULT_STATS + bool "Additional statistics for speculative page faults" + depends on SPECULATIVE_PAGE_FAULT + help + Additional statistics for speculative page faults. + If in doubt, say N. diff --git a/mm/memory.c b/mm/memory.c index cf1a1c0196f0..838482b7ffc5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2603,7 +2603,8 @@ bool __pte_map_lock(struct vm_fault *vmf) } speculative_page_walk_begin(); - if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq)) + if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq, + SPF_ABORT_PTE_MAP_LOCK_SEQ1)) goto fail; /* * The mmap sequence count check guarantees that the page @@ -2616,8 +2617,10 @@ bool __pte_map_lock(struct vm_fault *vmf) * is not a huge collapse operation in progress in our back. */ pmdval = READ_ONCE(*vmf->pmd); - if (!pmd_same(pmdval, vmf->orig_pmd)) + if (!pmd_same(pmdval, vmf->orig_pmd)) { + count_vm_spf_event(SPF_ABORT_PTE_MAP_LOCK_PMD); goto fail; + } #endif ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); if (!pte) @@ -2634,9 +2637,12 @@ bool __pte_map_lock(struct vm_fault *vmf) * We also don't want to retry until spin_trylock() succeeds, * because of the starvation potential against a stream of lockers. */ - if (unlikely(!spin_trylock(ptl))) + if (unlikely(!spin_trylock(ptl))) { + count_vm_spf_event(SPF_ABORT_PTE_MAP_LOCK_PTL); goto fail; - if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq)) + } + if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq, + SPF_ABORT_PTE_MAP_LOCK_SEQ2)) goto unlock_fail; speculative_page_walk_end(); vmf->pte = pte; @@ -2908,6 +2914,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (unlikely(!vma->anon_vma)) { if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + count_vm_spf_event(SPF_ABORT_ANON_VMA); ret = VM_FAULT_RETRY; goto out; } @@ -3170,10 +3177,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + count_vm_spf_event(SPF_ATTEMPT_WP); + if (userfaultfd_pte_wp(vma, *vmf->pte)) { pte_unmap_unlock(vmf->pte, vmf->ptl); - if (vmf->flags & FAULT_FLAG_SPECULATIVE) + if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + count_vm_spf_event(SPF_ABORT_USERFAULTFD); return VM_FAULT_RETRY; + } return handle_userfault(vmf, VM_UFFD_WP); } @@ -3357,6 +3369,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vm_fault_t ret = 0; void *shadow = NULL; + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + count_vm_spf_event(SPF_ATTEMPT_SWAP); + #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { /* @@ -3383,6 +3398,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + count_vm_spf_event(SPF_ABORT_NON_SWAP_ENTRY); ret = VM_FAULT_RETRY; } else if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, @@ -3409,6 +3425,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (vmf->flags & FAULT_FLAG_SPECULATIVE) { delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + count_vm_spf_event(SPF_ABORT_SWAP_NOPAGE); return VM_FAULT_RETRY; } @@ -3615,6 +3632,9 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vm_fault_t ret = 0; pte_t entry; + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + count_vm_spf_event(SPF_ATTEMPT_ANON); + /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; @@ -3644,8 +3664,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) } else { /* Allocate our own private page. */ if (unlikely(!vma->anon_vma)) { - if (vmf->flags & FAULT_FLAG_SPECULATIVE) + if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + count_vm_spf_event(SPF_ABORT_ANON_VMA); return VM_FAULT_RETRY; + } if (__anon_vma_prepare(vma)) goto oom; } @@ -3687,8 +3709,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); if (page) put_page(page); - if (vmf->flags & FAULT_FLAG_SPECULATIVE) + if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + count_vm_spf_event(SPF_ABORT_USERFAULTFD); return VM_FAULT_RETRY; + } return handle_userfault(vmf, VM_UFFD_MISSING); } @@ -4221,6 +4245,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) bool was_writable = pte_savedwrite(vmf->orig_pte); int flags = 0; + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + count_vm_spf_event(SPF_ATTEMPT_NUMA); + /* * The "pte" at this point cannot be used safely without * validation through pte_unmap_same(). It's of NUMA type but @@ -4393,6 +4420,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + count_vm_spf_event(SPF_ATTEMPT_PTE); + if (!pte_spinlock(vmf)) return VM_FAULT_RETRY; entry = vmf->orig_pte; @@ -4460,20 +4490,26 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, speculative_page_walk_begin(); pgd = pgd_offset(mm, address); pgdval = READ_ONCE(*pgd); - if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval))) + if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval))) { + count_vm_spf_event(SPF_ABORT_PUD); goto spf_fail; + } p4d = p4d_offset(pgd, address); p4dval = READ_ONCE(*p4d); - if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) + if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) { + count_vm_spf_event(SPF_ABORT_PUD); goto spf_fail; + } vmf.pud = pud_offset(p4d, address); pudval = READ_ONCE(*vmf.pud); if (pud_none(pudval) || unlikely(pud_bad(pudval)) || unlikely(pud_trans_huge(pudval)) || - unlikely(pud_devmap(pudval))) + unlikely(pud_devmap(pudval))) { + count_vm_spf_event(SPF_ABORT_PUD); goto spf_fail; + } vmf.pmd = pmd_offset(vmf.pud, address); vmf.orig_pmd = READ_ONCE(*vmf.pmd); @@ -4491,8 +4527,10 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (unlikely(pmd_none(vmf.orig_pmd) || is_swap_pmd(vmf.orig_pmd) || pmd_trans_huge(vmf.orig_pmd) || - pmd_devmap(vmf.orig_pmd))) + pmd_devmap(vmf.orig_pmd))) { + count_vm_spf_event(SPF_ABORT_PMD); goto spf_fail; + } /* * The above does not allocate/instantiate page-tables because diff --git a/mm/vmstat.c b/mm/vmstat.c index 9ae1c27a549e..dbaefae62da3 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1369,6 +1369,29 @@ const char * const vmstat_text[] = { "spf_attempt", "spf_abort", #endif +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT_STATS + "SPF_ABORT_ODD", + "SPF_ABORT_UNMAPPED", + "SPF_ABORT_NO_SPECULATE", + "SPF_ABORT_VMA_COPY", + "SPF_ABORT_ACCESS_ERROR", + "SPF_ABORT_PUD", + "SPF_ABORT_PMD", + "SPF_ABORT_ANON_VMA", + "SPF_ABORT_PTE_MAP_LOCK_SEQ1", + "SPF_ABORT_PTE_MAP_LOCK_PMD", + "SPF_ABORT_PTE_MAP_LOCK_PTL", + "SPF_ABORT_PTE_MAP_LOCK_SEQ2", + "SPF_ABORT_USERFAULTFD", + "SPF_ABORT_FAULT", + "SPF_ABORT_NON_SWAP_ENTRY", + "SPF_ABORT_SWAP_NOPAGE", + "SPF_ATTEMPT_ANON", + "SPF_ATTEMPT_SWAP", + "SPF_ATTEMPT_NUMA", + "SPF_ATTEMPT_PTE", + "SPF_ATTEMPT_WP", +#endif #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
Add a new CONFIG_SPECULATIVE_PAGE_FAULT_STATS config option, and dump extra statistics about executed spf cases and abort reasons when the option is set. Signed-off-by: Michel Lespinasse <michel@lespinasse.org> --- arch/x86/mm/fault.c | 18 ++++++++--- include/linux/mmap_lock.h | 19 +++++++++-- include/linux/vm_event_item.h | 23 ++++++++++++++ include/linux/vmstat.h | 6 ++++ mm/Kconfig.debug | 7 ++++ mm/memory.c | 60 ++++++++++++++++++++++++++++------- mm/vmstat.c | 23 ++++++++++++++ 7 files changed, 139 insertions(+), 17 deletions(-)