@@ -3130,6 +3130,45 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
__free_extent_buffer(eb);
}
+/*
+ * we may reuse a extent buffer whose device space has been released, if the len
+ * of the extent buffer is smaller than we expect, we must enlarge the extent
+ * buffer, and before doing that, we must release the extent buffer that
+ * intersects it.
+ *
+ * Don't worry about the state of the extent buffer that is going to be release.
+ * because it is just an image left in the memory, and its device space has been
+ * released, or the btrfs can't allocate its device space for other extent
+ * buffer.
+ *
+ * Note: Must hold io_tree->buffer_lock
+ */
+static int btrfs_enlarge_extent_buffer(struct extent_io_tree *tree,
+ struct extent_buffer *eb,
+ unsigned long newlen)
+{
+ struct rb_node *next;
+ struct extent_buffer *next_eb;
+
+ eb->len = newlen;
+ set_page_extent_head(eb->first_page, newlen);
+
+ next = rb_next(&eb->rb_node);
+ while (next) {
+ next_eb = rb_entry(next, struct extent_buffer, rb_node);
+ if (next_eb->start >= eb->start + eb->len)
+ break;
+
+ if (atomic_read(&next_eb->refs) > 1)
+ return 1;
+
+ next = rb_next(next);
+ rb_erase(&next_eb->rb_node, &tree->buffer);
+ btrfs_release_extent_buffer(next_eb);
+ }
+ return 0;
+}
+
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
struct page *page0,
@@ -3147,10 +3186,49 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
spin_lock(&tree->buffer_lock);
eb = buffer_search(tree, start);
if (eb) {
- atomic_inc(&eb->refs);
- spin_unlock(&tree->buffer_lock);
- mark_page_accessed(eb->first_page);
- return eb;
+ /*
+ * If this extent buffer's device space has been released some
+ * time ago, and is reallocated again to store other metadata,
+ * but it hasn't been release, we may get the old entent buffer
+ * and reuse it.
+ *
+ * But, we must change it according the new len.
+ */
+ if (eb->len >= len) {
+ if (eb->len > len) {
+ btrfs_release_extent_buffer_page(eb, num_pages);
+
+ eb->len = len;
+ set_page_extent_head(eb->first_page, len);
+ }
+
+ atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
+ mark_page_accessed(eb->first_page);
+ return eb;
+ } else {
+ int ret;
+
+ /*
+ * if eb->len != len, it means this extent buffer
+ * is reused as a new extent buffer.
+ */
+ BUG_ON(atomic_read(&eb->refs) != 1);
+
+ i = num_extent_pages(eb->start, eb->len);
+ index += i;
+
+ ret = btrfs_enlarge_extent_buffer(tree, eb, len);
+ if (ret) {
+ spin_unlock(&tree->buffer_lock);
+ return NULL;
+ } else {
+ rb_erase(&eb->rb_node, &tree->buffer);
+ spin_unlock(&tree->buffer_lock);
+ mark_page_accessed(eb->first_page);
+ goto eb_alloc_pages;
+ }
+ }
}
spin_unlock(&tree->buffer_lock);
@@ -3170,6 +3248,8 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
} else {
i = 0;
}
+
+eb_alloc_pages:
for (; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
if (!p) {
@@ -3197,6 +3277,8 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
/* add one reference for the caller */
atomic_inc(&exists->refs);
spin_unlock(&tree->buffer_lock);
+
+ BUG_ON(exists->len != eb->len);
goto free_eb;
}
/* add one reference for the tree */