@@ -172,7 +172,7 @@ struct scrub_stripe {
};
struct scrub_ctx {
- struct scrub_stripe stripes[SCRUB_STRIPES_PER_SCTX];
+ struct scrub_stripe *stripes;
struct scrub_stripe *raid56_data_stripes;
struct btrfs_fs_info *fs_info;
int first_free;
@@ -181,6 +181,9 @@ struct scrub_ctx {
int readonly;
int sectors_per_bio;
+ /* Number of stripes we have in @stripes. */
+ unsigned int nr_stripes;
+
/* State of IO submission throttling affecting the associated device */
ktime_t throttle_deadline;
u64 throttle_sent;
@@ -308,16 +311,24 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
scrub_pause_off(fs_info);
}
+static void free_scrub_stripes(struct scrub_ctx *sctx)
+{
+ if (!sctx->stripes)
+ return;
+
+ for (int i = 0; i < sctx->nr_stripes; i++)
+ release_scrub_stripe(&sctx->stripes[i]);
+ kfree(sctx->stripes);
+ sctx->nr_stripes = 0;
+ sctx->stripes = NULL;
+}
+
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
{
- int i;
-
if (!sctx)
return;
- for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
- release_scrub_stripe(&sctx->stripes[i]);
-
+ free_scrub_stripes(sctx);
kfree(sctx);
}
@@ -331,7 +342,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
struct btrfs_fs_info *fs_info, int is_dev_replace)
{
struct scrub_ctx *sctx;
- int i;
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx)
@@ -339,14 +349,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace;
sctx->fs_info = fs_info;
- for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
- int ret;
-
- ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
- if (ret < 0)
- goto nomem;
- sctx->stripes[i].sctx = sctx;
- }
sctx->first_free = 0;
atomic_set(&sctx->cancel_req, 0);
@@ -1659,6 +1661,7 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
const int nr_stripes = sctx->cur_stripe;
int ret = 0;
+ ASSERT(nr_stripes <= sctx->nr_stripes);
if (!nr_stripes)
return 0;
@@ -1753,8 +1756,11 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
struct scrub_stripe *stripe;
int ret;
+ ASSERT(sctx->stripes);
+ ASSERT(sctx->nr_stripes);
+
/* No available slot, submit all stripes and wait for them. */
- if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
+ if (sctx->cur_stripe >= sctx->nr_stripes) {
ret = flush_scrub_stripes(sctx);
if (ret < 0)
return ret;
@@ -2076,6 +2082,30 @@ static int scrub_simple_stripe(struct scrub_ctx *sctx,
return ret;
}
+static int alloc_scrub_stripes(struct scrub_ctx *sctx, int nr_stripes)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ int ret;
+
+ ASSERT(!sctx->stripes);
+ ASSERT(!sctx->nr_stripes);
+ sctx->stripes = kcalloc(nr_stripes, sizeof(struct scrub_stripe),
+ GFP_KERNEL);
+ if (!sctx->stripes)
+ return -ENOMEM;
+ sctx->nr_stripes = nr_stripes;
+ for (int i = 0; i < sctx->nr_stripes; i++) {
+ ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
+ if (ret < 0)
+ goto cleanup;
+ sctx->stripes[i].sctx = sctx;
+ }
+ return 0;
+cleanup:
+ free_scrub_stripes(sctx);
+ return -ENOMEM;
+}
+
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_block_group *bg,
struct extent_map *em,
@@ -2102,6 +2132,10 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
scrub_blocked_if_needed(fs_info);
+ ret = alloc_scrub_stripes(sctx, SCRUB_STRIPES_PER_SCTX);
+ if (ret < 0)
+ return ret;
+
if (sctx->is_dev_replace &&
btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
mutex_lock(&sctx->wr_lock);
@@ -2224,6 +2258,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
kfree(sctx->raid56_data_stripes);
sctx->raid56_data_stripes = NULL;
}
+ free_scrub_stripes(sctx);
if (sctx->is_dev_replace && ret >= 0) {
int ret2;