@@ -830,7 +830,6 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
qrecord->data_rsv = reserved;
qrecord->data_rsv_refroot = generic_ref->ref_root;
}
- qrecord->bytenr = generic_ref->bytenr;
qrecord->num_bytes = generic_ref->num_bytes;
qrecord->old_roots = NULL;
}
@@ -861,11 +860,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
int ret;
ret = btrfs_qgroup_trace_extent_nolock(fs_info,
- delayed_refs, qrecord);
+ delayed_refs, qrecord,
+ head_ref->bytenr);
if (ret) {
/* Clean up if insertion fails or item exists. */
xa_release(&delayed_refs->dirty_extents,
- qrecord->bytenr >> fs_info->sectorsize_bits);
+ head_ref->bytenr >> fs_info->sectorsize_bits);
/* Caller responsible for freeing qrecord on error. */
if (ret < 0)
return ERR_PTR(ret);
@@ -1076,7 +1076,7 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(trans, record);
+ return btrfs_qgroup_trace_extent_post(trans, record, head_ref->bytenr);
return 0;
free_record:
@@ -2001,17 +2001,18 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
* Return <0 for insertion failure, caller can free @record safely.
*/
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record)
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record,
+ u64 bytenr)
{
struct btrfs_qgroup_extent_record *existing, *ret;
- const unsigned long index = (record->bytenr >> fs_info->sectorsize_bits);
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
if (!btrfs_qgroup_full_accounting(fs_info))
return 1;
lockdep_assert_held(&delayed_refs->lock);
- trace_btrfs_qgroup_trace_extent(fs_info, record);
+ trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
xa_lock(&delayed_refs->dirty_extents);
existing = xa_load(&delayed_refs->dirty_extents, index);
@@ -2056,7 +2057,8 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
* transaction committing, but not now as qgroup accounting will be wrong again.
*/
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
- struct btrfs_qgroup_extent_record *qrecord)
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr)
{
struct btrfs_backref_walk_ctx ctx = { 0 };
int ret;
@@ -2087,7 +2089,7 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
return 0;
- ctx.bytenr = qrecord->bytenr;
+ ctx.bytenr = bytenr;
ctx.fs_info = trans->fs_info;
ret = btrfs_find_all_roots(&ctx, true);
@@ -2144,12 +2146,11 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
}
delayed_refs = &trans->transaction->delayed_refs;
- record->bytenr = bytenr;
record->num_bytes = num_bytes;
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
- ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
spin_unlock(&delayed_refs->lock);
if (ret) {
/* Clean up if insertion fails or item exists. */
@@ -2157,7 +2158,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
kfree(record);
return 0;
}
- return btrfs_qgroup_trace_extent_post(trans, record);
+ return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
}
/*
@@ -3033,14 +3034,16 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
xa_for_each(&delayed_refs->dirty_extents, index, record) {
+ const u64 bytenr = (index << fs_info->sectorsize_bits);
+
num_dirty_extents++;
- trace_btrfs_qgroup_account_extents(fs_info, record);
+ trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
if (!ret && !(fs_info->qgroup_flags &
BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
struct btrfs_backref_walk_ctx ctx = { 0 };
- ctx.bytenr = record->bytenr;
+ ctx.bytenr = bytenr;
ctx.fs_info = fs_info;
/*
@@ -3082,7 +3085,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
ulist_del(record->old_roots, qgroup_to_skip,
0);
}
- ret = btrfs_qgroup_account_extent(trans, record->bytenr,
+ ret = btrfs_qgroup_account_extent(trans, bytenr,
record->num_bytes,
record->old_roots,
new_roots);
@@ -125,7 +125,12 @@ struct btrfs_inode;
* Record a dirty extent, and info qgroup to update quota on it
*/
struct btrfs_qgroup_extent_record {
- u64 bytenr;
+ /*
+ * The bytenr of the extent is given by its index in the dirty_extents
+ * xarray of struct btrfs_delayed_ref_root left shifted by
+ * fs_info->sectorsize_bits.
+ */
+
u64 num_bytes;
/*
@@ -343,9 +348,11 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record);
+ struct btrfs_qgroup_extent_record *record,
+ u64 bytenr);
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
- struct btrfs_qgroup_extent_record *qrecord);
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr);
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes);
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
@@ -1706,9 +1706,10 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec),
+ TP_ARGS(fs_info, rec, bytenr),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -1716,7 +1717,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = rec->bytenr;
+ __entry->bytenr = bytenr;
__entry->num_bytes = rec->num_bytes;
),
@@ -1727,17 +1728,19 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
TRACE_EVENT(qgroup_num_dirty_extents,