diff mbox series

mm: migrate: fix getting incorrect page mapping during page migration

Message ID e60b17a88afc38cb32f84c3e30837ec70b343d2b.1702641709.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series mm: migrate: fix getting incorrect page mapping during page migration | expand

Commit Message

Baolin Wang Dec. 15, 2023, 12:07 p.m. UTC
When running stress-ng testing, we found below kernel crash after a few hours:

Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
pc : dentry_name+0xd8/0x224
lr : pointer+0x22c/0x370
sp : ffff800025f134c0
......
Call trace:
  dentry_name+0xd8/0x224
  pointer+0x22c/0x370
  vsnprintf+0x1ec/0x730
  vscnprintf+0x2c/0x60
  vprintk_store+0x70/0x234
  vprintk_emit+0xe0/0x24c
  vprintk_default+0x3c/0x44
  vprintk_func+0x84/0x2d0
  printk+0x64/0x88
  __dump_page+0x52c/0x530
  dump_page+0x14/0x20
  set_migratetype_isolate+0x110/0x224
  start_isolate_page_range+0xc4/0x20c
  offline_pages+0x124/0x474
  memory_block_offline+0x44/0xf4
  memory_subsys_offline+0x3c/0x70
  device_offline+0xf0/0x120
  ......

After analyzing the vmcore, I found this issue is caused by page migration.
The scenario is that, one thread is doing page migration, and we will use the
target page's ->mapping field to save 'anon_vma' pointer between page unmap and
page move, and now the target page is locked and refcount is 1.

Currently, there is another stress-ng thread performing memory hotplug,
attempting to offline the target page that is being migrated. It discovers that
the refcount of this target page is 1, preventing the offline operation, thus
proceeding to dump the page. However, page_mapping() of the target page may
return an incorrect file mapping to crash the system in dump_mapping(), since
the target page->mapping only saves 'anon_vma' pointer without setting
PAGE_MAPPING_ANON flag.

There are seveval ways to fix this issue:
(1) Setting the PAGE_MAPPING_ANON flag for target page's ->mapping when saving
'anon_vma', but this can confuse PageAnon() for PFN walkers, since the target
page has not built mappings yet.
(2) Getting the page lock to call page_mapping() in __dump_page() to avoid crashing
the system, however, there are still some PFN walkers that call page_mapping()
without holding the page lock, such as compaction.
(3) Using target page->private field to save the 'anon_vma' pointer and 2 bits
page state, just as page->mapping records an anonymous page, which can remove
the page_mapping() impact for PFN walkers and also seems a simple way.

So I choose option 3 to fix this issue, and this can also fix other potential
issues for PFN walkers, such as compaction.

Fixes: 64c8902ed441 ("migrate_pages: split unmap_and_move() to _unmap() and _move()")
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 mm/migrate.c | 27 ++++++++++-----------------
 1 file changed, 10 insertions(+), 17 deletions(-)

Comments

Matthew Wilcox Dec. 15, 2023, 2:51 p.m. UTC | #1
On Fri, Dec 15, 2023 at 08:07:52PM +0800, Baolin Wang wrote:
> When running stress-ng testing, we found below kernel crash after a few hours:
> 
> Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
> pc : dentry_name+0xd8/0x224
> lr : pointer+0x22c/0x370
> sp : ffff800025f134c0
> ......
> Call trace:
>   dentry_name+0xd8/0x224
>   pointer+0x22c/0x370
>   vsnprintf+0x1ec/0x730
>   vscnprintf+0x2c/0x60
>   vprintk_store+0x70/0x234
>   vprintk_emit+0xe0/0x24c
>   vprintk_default+0x3c/0x44
>   vprintk_func+0x84/0x2d0
>   printk+0x64/0x88
>   __dump_page+0x52c/0x530
>   dump_page+0x14/0x20

[...]

> There are seveval ways to fix this issue:
> (1) Setting the PAGE_MAPPING_ANON flag for target page's ->mapping when saving
> 'anon_vma', but this can confuse PageAnon() for PFN walkers, since the target
> page has not built mappings yet.
> (2) Getting the page lock to call page_mapping() in __dump_page() to avoid crashing
> the system, however, there are still some PFN walkers that call page_mapping()
> without holding the page lock, such as compaction.
> (3) Using target page->private field to save the 'anon_vma' pointer and 2 bits
> page state, just as page->mapping records an anonymous page, which can remove
> the page_mapping() impact for PFN walkers and also seems a simple way.
> 
> So I choose option 3 to fix this issue, and this can also fix other potential
> issues for PFN walkers, such as compaction.

I'm not saying no to this fix, but dump_mapping() is supposed to be
resilient against this.  Is the issue that 'dentry' is NULL, or is it
some field within dentry that is NULL?  eg, would this fix your
case?

+++ b/fs/inode.c
@@ -588,7 +588,7 @@ void dump_mapping(const struct address_space *mapping)
        }

        dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
-       if (get_kernel_nofault(dentry, dentry_ptr)) {
+       if (get_kernel_nofault(dentry, dentry_ptr) || !dentry) {
                pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
                                a_ops, ino, dentry_ptr);
                return;

Just to be clear, I think we should fix both the dumping and the migration
code.
Matthew Wilcox Dec. 15, 2023, 3:01 p.m. UTC | #2
On Fri, Dec 15, 2023 at 02:51:43PM +0000, Matthew Wilcox wrote:
> I'm not saying no to this fix, but dump_mapping() is supposed to be
> resilient against this.  Is the issue that 'dentry' is NULL, or is it
> some field within dentry that is NULL?  eg, would this fix your
> case?

Uh, dentry is an on-stack dentry.  So obviously it's a pointer within it
that's NULL.  Maybe this, having stared at the implementation of %pd?

+++ b/fs/inode.c
@@ -588,7 +588,8 @@ void dump_mapping(const struct address_space *mapping)
        }

        dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
-       if (get_kernel_nofault(dentry, dentry_ptr)) {
+       if (get_kernel_nofault(dentry, dentry_ptr) ||
+           !dentry->d_parent || !dentry->d_name) {
                pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
                                a_ops, ino, dentry_ptr);
                return;
Baolin Wang Dec. 18, 2023, 3:34 a.m. UTC | #3
On 12/15/2023 11:01 PM, Matthew Wilcox wrote:
> On Fri, Dec 15, 2023 at 02:51:43PM +0000, Matthew Wilcox wrote:
>> I'm not saying no to this fix, but dump_mapping() is supposed to be
>> resilient against this.  Is the issue that 'dentry' is NULL, or is it
>> some field within dentry that is NULL?  eg, would this fix your
>> case?
> 
> Uh, dentry is an on-stack dentry.  So obviously it's a pointer within it
> that's NULL.  Maybe this, having stared at the implementation of %pd?

Yes, I think below check can avoid crash of dentry_name() used by '%pd'. 
Maybe we should also initialize on-stack dentry by
struct dentry dentry = { };

I can send a patch with your suggestion. Thanks.

> +++ b/fs/inode.c
> @@ -588,7 +588,8 @@ void dump_mapping(const struct address_space *mapping)
>          }
> 
>          dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
> -       if (get_kernel_nofault(dentry, dentry_ptr)) {
> +       if (get_kernel_nofault(dentry, dentry_ptr) ||
> +           !dentry->d_parent || !dentry->d_name) {
>                  pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
>                                  a_ops, ino, dentry_ptr);
>                  return;
Huang, Ying Dec. 18, 2023, 5:19 a.m. UTC | #4
Baolin Wang <baolin.wang@linux.alibaba.com> writes:

> When running stress-ng testing, we found below kernel crash after a few hours:
>
> Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
> pc : dentry_name+0xd8/0x224
> lr : pointer+0x22c/0x370
> sp : ffff800025f134c0
> ......
> Call trace:
>   dentry_name+0xd8/0x224
>   pointer+0x22c/0x370
>   vsnprintf+0x1ec/0x730
>   vscnprintf+0x2c/0x60
>   vprintk_store+0x70/0x234
>   vprintk_emit+0xe0/0x24c
>   vprintk_default+0x3c/0x44
>   vprintk_func+0x84/0x2d0
>   printk+0x64/0x88
>   __dump_page+0x52c/0x530
>   dump_page+0x14/0x20
>   set_migratetype_isolate+0x110/0x224
>   start_isolate_page_range+0xc4/0x20c
>   offline_pages+0x124/0x474
>   memory_block_offline+0x44/0xf4
>   memory_subsys_offline+0x3c/0x70
>   device_offline+0xf0/0x120
>   ......
>
> After analyzing the vmcore, I found this issue is caused by page migration.
> The scenario is that, one thread is doing page migration, and we will use the
> target page's ->mapping field to save 'anon_vma' pointer between page unmap and
> page move, and now the target page is locked and refcount is 1.
>
> Currently, there is another stress-ng thread performing memory hotplug,
> attempting to offline the target page that is being migrated. It discovers that
> the refcount of this target page is 1, preventing the offline operation, thus
> proceeding to dump the page. However, page_mapping() of the target page may
> return an incorrect file mapping to crash the system in dump_mapping(), since
> the target page->mapping only saves 'anon_vma' pointer without setting
> PAGE_MAPPING_ANON flag.
>
> There are seveval ways to fix this issue:
> (1) Setting the PAGE_MAPPING_ANON flag for target page's ->mapping when saving
> 'anon_vma', but this can confuse PageAnon() for PFN walkers, since the target
> page has not built mappings yet.
> (2) Getting the page lock to call page_mapping() in __dump_page() to avoid crashing
> the system, however, there are still some PFN walkers that call page_mapping()
> without holding the page lock, such as compaction.
> (3) Using target page->private field to save the 'anon_vma' pointer and 2 bits
> page state, just as page->mapping records an anonymous page, which can remove
> the page_mapping() impact for PFN walkers and also seems a simple way.
>
> So I choose option 3 to fix this issue, and this can also fix other potential
> issues for PFN walkers, such as compaction.
>
> Fixes: 64c8902ed441 ("migrate_pages: split unmap_and_move() to _unmap() and _move()")
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>

Good catch!  Thanks!

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

> ---
>  mm/migrate.c | 27 ++++++++++-----------------
>  1 file changed, 10 insertions(+), 17 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 397f2a6e34cb..bad3039d165e 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1025,38 +1025,31 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>  }
>  
>  /*
> - * To record some information during migration, we use some unused
> - * fields (mapping and private) of struct folio of the newly allocated
> - * destination folio.  This is safe because nobody is using them
> - * except us.
> + * To record some information during migration, we use unused private
> + * field of struct folio of the newly allocated destination folio.
> + * This is safe because nobody is using it except us.
>   */
> -union migration_ptr {
> -	struct anon_vma *anon_vma;
> -	struct address_space *mapping;
> -};
> -
>  enum {
>  	PAGE_WAS_MAPPED = BIT(0),
>  	PAGE_WAS_MLOCKED = BIT(1),
> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
>  };
>  
>  static void __migrate_folio_record(struct folio *dst,
> -				   unsigned long old_page_state,
> +				   int old_page_state,
>  				   struct anon_vma *anon_vma)
>  {
> -	union migration_ptr ptr = { .anon_vma = anon_vma };
> -	dst->mapping = ptr.mapping;
> -	dst->private = (void *)old_page_state;
> +	dst->private = (void *)anon_vma + old_page_state;
>  }
>  
>  static void __migrate_folio_extract(struct folio *dst,
>  				   int *old_page_state,
>  				   struct anon_vma **anon_vmap)
>  {
> -	union migration_ptr ptr = { .mapping = dst->mapping };
> -	*anon_vmap = ptr.anon_vma;
> -	*old_page_state = (unsigned long)dst->private;
> -	dst->mapping = NULL;
> +	unsigned long private = (unsigned long)dst->private;
> +
> +	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
> +	*old_page_state = private & PAGE_OLD_STATES;
>  	dst->private = NULL;
>  }
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index 397f2a6e34cb..bad3039d165e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1025,38 +1025,31 @@  static int move_to_new_folio(struct folio *dst, struct folio *src,
 }
 
 /*
- * To record some information during migration, we use some unused
- * fields (mapping and private) of struct folio of the newly allocated
- * destination folio.  This is safe because nobody is using them
- * except us.
+ * To record some information during migration, we use unused private
+ * field of struct folio of the newly allocated destination folio.
+ * This is safe because nobody is using it except us.
  */
-union migration_ptr {
-	struct anon_vma *anon_vma;
-	struct address_space *mapping;
-};
-
 enum {
 	PAGE_WAS_MAPPED = BIT(0),
 	PAGE_WAS_MLOCKED = BIT(1),
+	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
 };
 
 static void __migrate_folio_record(struct folio *dst,
-				   unsigned long old_page_state,
+				   int old_page_state,
 				   struct anon_vma *anon_vma)
 {
-	union migration_ptr ptr = { .anon_vma = anon_vma };
-	dst->mapping = ptr.mapping;
-	dst->private = (void *)old_page_state;
+	dst->private = (void *)anon_vma + old_page_state;
 }
 
 static void __migrate_folio_extract(struct folio *dst,
 				   int *old_page_state,
 				   struct anon_vma **anon_vmap)
 {
-	union migration_ptr ptr = { .mapping = dst->mapping };
-	*anon_vmap = ptr.anon_vma;
-	*old_page_state = (unsigned long)dst->private;
-	dst->mapping = NULL;
+	unsigned long private = (unsigned long)dst->private;
+
+	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
+	*old_page_state = private & PAGE_OLD_STATES;
 	dst->private = NULL;
 }