diff mbox

[v2] Btrfs: btrfs_release_extent_buffer_page didn't free pages of dummy extent

Message ID 1423051837-25290-1-git-send-email-forrestl@synology.com (mailing list archive)
State Superseded
Headers show

Commit Message

Forrest Liu Feb. 4, 2015, 12:10 p.m. UTC
btrfs_release_extent_buffer_page() can't handle dummy extent that
allocated by btrfs_clone_extent_buffer() properly.

Reference count of pages that allocated by btrfs_clone_extent_buffer()
was 2, 1 by alloc_page(), and another by attach_extent_buffer_page().

Signed-off-by: Chien-Kuan Yeh <ckya@synology.com>
Signed-off-by: Forrest Liu <forrestl@synology.com>
---
changelog:
v2: do not call PagePrivate() if page is NULL 

 fs/btrfs/extent_io.c | 51 ++++++++++++++++++++++++++-------------------------
 1 file changed, 26 insertions(+), 25 deletions(-)

Comments

Forrest Liu Feb. 4, 2015, 12:16 p.m. UTC | #1
Hi,

Following command can reproduce the problem of memory leak

mntpath=/volume2
while [ 1 ]; do
        btrfs inspect-internal inode-resolve 256 $mntpath >/dev/null
done

The patch was tested on 3.19-rc7

Thanks,
Forrest

2015-02-04 20:10 GMT+08:00 Forrest Liu <forrestl@synology.com>:
> btrfs_release_extent_buffer_page() can't handle dummy extent that
> allocated by btrfs_clone_extent_buffer() properly.
>
> Reference count of pages that allocated by btrfs_clone_extent_buffer()
> was 2, 1 by alloc_page(), and another by attach_extent_buffer_page().
>
> Signed-off-by: Chien-Kuan Yeh <ckya@synology.com>
> Signed-off-by: Forrest Liu <forrestl@synology.com>
> ---
> changelog:
> v2: do not call PagePrivate() if page is NULL
>
>  fs/btrfs/extent_io.c | 51 ++++++++++++++++++++++++++-------------------------
>  1 file changed, 26 insertions(+), 25 deletions(-)
>
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index 790dbae..9de93ee 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -4554,36 +4554,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
>         do {
>                 index--;
>                 page = eb->pages[index];
> -               if (page && mapped) {
> +               if (!page)
> +                       continue;
> +               if (mapped)
>                         spin_lock(&page->mapping->private_lock);
> +               /*
> +                * We do this since we'll remove the pages after we've
> +                * removed the eb from the radix tree, so we could race
> +                * and have this page now attached to the new eb.  So
> +                * only clear page_private if it's still connected to
> +                * this eb.
> +                */
> +               if (PagePrivate(page) &&
> +                   page->private == (unsigned long)eb) {
> +                       BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
> +                       BUG_ON(PageDirty(page));
> +                       BUG_ON(PageWriteback(page));
>                         /*
> -                        * We do this since we'll remove the pages after we've
> -                        * removed the eb from the radix tree, so we could race
> -                        * and have this page now attached to the new eb.  So
> -                        * only clear page_private if it's still connected to
> -                        * this eb.
> +                        * We need to make sure we haven't be attached
> +                        * to a new eb.
>                          */
> -                       if (PagePrivate(page) &&
> -                           page->private == (unsigned long)eb) {
> -                               BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
> -                               BUG_ON(PageDirty(page));
> -                               BUG_ON(PageWriteback(page));
> -                               /*
> -                                * We need to make sure we haven't be attached
> -                                * to a new eb.
> -                                */
> -                               ClearPagePrivate(page);
> -                               set_page_private(page, 0);
> -                               /* One for the page private */
> -                               page_cache_release(page);
> -                       }
> -                       spin_unlock(&page->mapping->private_lock);
> -
> -               }
> -               if (page) {
> -                       /* One for when we alloced the page */
> +                       ClearPagePrivate(page);
> +                       set_page_private(page, 0);
> +                       /* One for the page private */
>                         page_cache_release(page);
>                 }
> +
> +               if (mapped)
> +                       spin_unlock(&page->mapping->private_lock);
> +
> +               /* One for when we alloced the page */
> +               page_cache_release(page);
>         } while (index != 0);
>  }
>
> --
> 1.9.1
>
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 790dbae..9de93ee 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4554,36 +4554,37 @@  static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
 	do {
 		index--;
 		page = eb->pages[index];
-		if (page && mapped) {
+		if (!page)
+			continue;
+		if (mapped)
 			spin_lock(&page->mapping->private_lock);
+		/*
+		 * We do this since we'll remove the pages after we've
+		 * removed the eb from the radix tree, so we could race
+		 * and have this page now attached to the new eb.  So
+		 * only clear page_private if it's still connected to
+		 * this eb.
+		 */
+		if (PagePrivate(page) &&
+		    page->private == (unsigned long)eb) {
+			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+			BUG_ON(PageDirty(page));
+			BUG_ON(PageWriteback(page));
 			/*
-			 * We do this since we'll remove the pages after we've
-			 * removed the eb from the radix tree, so we could race
-			 * and have this page now attached to the new eb.  So
-			 * only clear page_private if it's still connected to
-			 * this eb.
+			 * We need to make sure we haven't be attached
+			 * to a new eb.
 			 */
-			if (PagePrivate(page) &&
-			    page->private == (unsigned long)eb) {
-				BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
-				BUG_ON(PageDirty(page));
-				BUG_ON(PageWriteback(page));
-				/*
-				 * We need to make sure we haven't be attached
-				 * to a new eb.
-				 */
-				ClearPagePrivate(page);
-				set_page_private(page, 0);
-				/* One for the page private */
-				page_cache_release(page);
-			}
-			spin_unlock(&page->mapping->private_lock);
-
-		}
-		if (page) {
-			/* One for when we alloced the page */
+			ClearPagePrivate(page);
+			set_page_private(page, 0);
+			/* One for the page private */
 			page_cache_release(page);
 		}
+
+		if (mapped)
+			spin_unlock(&page->mapping->private_lock);
+
+		/* One for when we alloced the page */
+		page_cache_release(page);
 	} while (index != 0);
 }