@@ -26,6 +26,7 @@
#include "extent_io.h"
#include "disk-io.h"
#include "compression.h"
+#include "dedupe.h"
static struct kmem_cache *btrfs_ordered_extent_cache;
@@ -184,7 +185,8 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
*/
static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len,
- int type, int dio, int compress_type)
+ int type, int dio, int compress_type,
+ struct btrfs_dedupe_hash *hash)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_inode_tree *tree;
@@ -204,6 +206,33 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
entry->inode = igrab(inode);
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
+ entry->hash = NULL;
+ /*
+ * A hash hit means we have already incremented the extents delayed
+ * ref.
+ * We must handle this even if another process is trying to
+ * turn off dedupe, otherwise we will leak a reference.
+ */
+ if (hash && (hash->bytenr || root->fs_info->dedupe_enabled)) {
+ struct btrfs_dedupe_info *dedupe_info;
+
+ dedupe_info = root->fs_info->dedupe_info;
+ if (WARN_ON(dedupe_info == NULL)) {
+ kmem_cache_free(btrfs_ordered_extent_cache,
+ entry);
+ return -EINVAL;
+ }
+ entry->hash = btrfs_dedupe_alloc_hash(dedupe_info->hash_algo);
+ if (!entry->hash) {
+ kmem_cache_free(btrfs_ordered_extent_cache, entry);
+ return -ENOMEM;
+ }
+ entry->hash->bytenr = hash->bytenr;
+ entry->hash->num_bytes = hash->num_bytes;
+ memcpy(entry->hash->hash, hash->hash,
+ btrfs_hash_sizes[dedupe_info->hash_algo]);
+ }
+
if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
set_bit(type, &entry->flags);
@@ -250,15 +279,23 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
{
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
disk_len, type, 0,
- BTRFS_COMPRESS_NONE);
+ BTRFS_COMPRESS_NONE, NULL);
}
+int btrfs_add_ordered_extent_dedupe(struct inode *inode, u64 file_offset,
+ u64 start, u64 len, u64 disk_len, int type,
+ struct btrfs_dedupe_hash *hash)
+{
+ return __btrfs_add_ordered_extent(inode, file_offset, start, len,
+ disk_len, type, 0,
+ BTRFS_COMPRESS_NONE, hash);
+}
int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int type)
{
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
disk_len, type, 1,
- BTRFS_COMPRESS_NONE);
+ BTRFS_COMPRESS_NONE, NULL);
}
int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
@@ -267,7 +304,7 @@ int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
{
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
disk_len, type, 0,
- compress_type);
+ compress_type, NULL);
}
/*
@@ -577,6 +614,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
list_del(&sum->list);
kfree(sum);
}
+ kfree(entry->hash);
kmem_cache_free(btrfs_ordered_extent_cache, entry);
}
}
@@ -139,6 +139,16 @@ struct btrfs_ordered_extent {
struct completion completion;
struct btrfs_work flush_work;
struct list_head work_list;
+
+ /*
+ * For inband deduplication
+ * If hash is NULL, no deduplication.
+ * If hash->bytenr is zero, means this is a dedupe miss, hash will
+ * be added into dedupe tree.
+ * If hash->bytenr is non-zero, this is a dedupe hit. Extent ref is
+ * *ALREADY* increased.
+ */
+ struct btrfs_dedupe_hash *hash;
};
/*
@@ -172,6 +182,9 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
int uptodate);
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int type);
+int btrfs_add_ordered_extent_dedupe(struct inode *inode, u64 file_offset,
+ u64 start, u64 len, u64 disk_len, int type,
+ struct btrfs_dedupe_hash *hash);
int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int type);
int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,