@@ -475,13 +475,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
discard_ctl->discard_extent_bytes += trimmed;
}
- /*
- * Updated without locks as this is inside the workfn and nothing else
- * is reading the values
- */
- discard_ctl->prev_discard = trimmed;
- discard_ctl->prev_discard_time = ktime_get_ns();
-
/* Determine next steps for a block_group */
if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
if (discard_state == BTRFS_DISCARD_BITMAPS) {
@@ -497,7 +490,10 @@ static void btrfs_discard_workfn(struct work_struct *work)
}
}
+ now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
+ discard_ctl->prev_discard = trimmed;
+ discard_ctl->prev_discard_time = now;
discard_ctl->block_group = NULL;
spin_unlock(&discard_ctl->lock);
Because only one discard worker may be running at any given point, it could have been safe to modify ->prev_discard, etc. without synchronisation, if not for @override flag in btrfs_discard_schedule_work() and delayed_work_pending() returning false while workfn is running. That may lead lead to torn reads of u64 for some architectures, but that's not a big problem as only slightly affects the discard rate. Suggested-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- fs/btrfs/discard.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-)