@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
struct cache *ca;
struct bucket *b;
- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
int r;
atomic_sub(sectors, &c->rescale);
@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket, list);
found:
if (!ret->sectors_free && KEY_PTRS(alloc)) {
- ret->sectors_free = c->sb.bucket_size;
+ ret->sectors_free = c->cache->sb.bucket_size;
bkey_copy(&ret->key, alloc);
bkey_init(alloc);
}
@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
&PTR_CACHE(c, &b->key, i)->sectors_written);
}
- if (b->sectors_free < c->sb.block_size)
+ if (b->sectors_free < c->cache->sb.block_size)
b->sectors_free = 0;
/*
@@ -517,8 +517,6 @@ struct cache_set {
atomic_t idle_counter;
atomic_t at_max_writeback_rate;
- struct cache_sb sb;
-
struct cache *cache;
struct bcache_device **devices;
@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
{
- return s & (c->sb.bucket_size - 1);
+ return s & (c->cache->sb.bucket_size - 1);
}
static inline struct cache *PTR_CACHE(struct cache_set *c,
@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
if (b->written < btree_blocks(b))
bch_bset_init_next(&b->keys, write_block(b),
- bset_magic(&b->c->sb));
+ bset_magic(&b->c->cache->sb));
}
@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
* See the comment arount cache_set->fill_iter.
*/
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
goto err;
err = "bad magic";
- if (i->magic != bset_magic(&b->c->sb))
+ if (i->magic != bset_magic(&b->c->cache->sb))
goto err;
err = "bad checksum";
@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
if (b->written < btree_blocks(b))
bch_bset_init_next(&b->keys, write_block(b),
- bset_magic(&b->c->sb));
+ bset_magic(&b->c->cache->sb));
out:
mempool_free(iter, &b->c->fill_iter);
return;
@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
do_btree_node_write(b);
- atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
b->written += set_blocks(i, block_bytes(b->c->cache));
@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
if (c->verify_data)
list_move(&c->verify_data->list, &c->btree_cache);
- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
#endif
list_splice(&c->btree_cache_freeable,
@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
mutex_init(&c->verify_lock);
c->verify_ondisk = (void *)
- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
+ __get_free_pages(GFP_KERNEL|__GFP_COMP,
+ ilog2(meta_bucket_pages(&c->cache->sb)));
if (!c->verify_ondisk) {
/*
* Don't worry about the mca_rereserve buckets
@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
}
b->parent = parent;
- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
mutex_unlock(&c->bucket_lock);
@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
static inline void set_gc_sectors(struct cache_set *c)
{
- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
}
void bkey_put(struct cache_set *c, struct bkey *k);
@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets)
return true;
@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
- if (KEY_SIZE(k) + r > c->sb.bucket_size)
+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
return "bad, length too big";
if (bucket < ca->sb.first_bucket)
return "bad, short offset";
@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
size_t n = PTR_BUCKET_NR(b->c, k, j);
pr_cont(" bucket %zu", n);
- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
pr_cont(" prio %i",
PTR_BUCKET(b->c, k, j)->prio);
}
@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
for (f = &feature_list[0]; f->compat != 0; f++) { \
if (f->compat != BCH_FEATURE_ ## type) \
continue; \
- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
if (first) { \
out += snprintf(out, buf + size - out, \
"["); \
@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
\
out += snprintf(out, buf + size - out, "%s", f->string);\
\
- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
out += snprintf(out, buf + size - out, "]"); \
\
first = false; \
@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
return bio;
}
@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
bkey_init(k);
SET_KEY_PTRS(k, 1);
- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
out:
if (!journal_full(&c->journal))
@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
- c->sb.block_size;
+ ca->sb.block_size;
struct bio *bio;
struct bio_list list;
@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(&c->sb);
+ w->data->magic = jset_magic(&ca->sb);
w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data);
@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
size_t sectors;
struct closure cl;
bool wait = false;
+ struct cache *ca = c->cache;
closure_init_stack(&cl);
@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
- block_bytes(c->cache)) * c->sb.block_size;
+ block_bytes(ca)) * ca->sb.block_size;
if (sectors <= min_t(size_t,
- c->journal.blocks_free * c->sb.block_size,
+ c->journal.blocks_free * ca->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;
@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip;
}
- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
- bio_sectors(bio) & (c->sb.block_size - 1)) {
+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
pr_debug("skipping unaligned io\n");
goto skip;
}
@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
down(&c->sb_write_mutex);
closure_init(cl, &c->cl);
- c->sb.seq++;
+ ca->sb.seq++;
- if (c->sb.version > version)
- version = c->sb.version;
-
- ca->sb.version = version;
- ca->sb.seq = c->sb.seq;
- ca->sb.last_mount = c->sb.last_mount;
-
- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
+ if (ca->sb.version < version)
+ ca->sb.version = version;
bio_init(bio, ca->sb_bv, 1);
bio_set_dev(bio, ca->bdev);
@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
- struct cache *ca;
+ struct cache *ca = c->cache;
unsigned int size;
closure_init_stack(&cl);
@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
return 1;
- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
+ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
/* Only one bucket used for uuid write */
- ca = PTR_CACHE(c, &k.key, 0);
atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
bkey_copy(&c->uuid_bucket, &k.key);
@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -EINVAL;
}
- if (dc->sb.block_size < c->sb.block_size) {
+ if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */
pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name);
@@ -1664,6 +1657,9 @@ static void cache_set_free(struct closure *cl)
bch_journal_free(c);
mutex_lock(&bch_register_lock);
+ bch_bset_sort_state_free(&c->sort);
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
+
ca = c->cache;
if (ca) {
ca->set = NULL;
@@ -1671,8 +1667,6 @@ static void cache_set_free(struct closure *cl)
kobject_put(&ca->kobj);
}
- bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
@@ -1838,6 +1832,7 @@ void bch_cache_set_unregister(struct cache_set *c)
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
int iter_size;
+ struct cache *ca = container_of(sb, struct cache, sb);
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
if (!c)
@@ -1860,23 +1855,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bch_cache_accounting_init(&c->accounting, &c->cl);
memcpy(c->set_uuid, sb->set_uuid, 16);
- c->sb.block_size = sb->block_size;
- c->sb.bucket_size = sb->bucket_size;
- c->sb.nr_in_set = sb->nr_in_set;
- c->sb.last_mount = sb->last_mount;
- c->sb.version = sb->version;
- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
- c->sb.feature_compat = sb->feature_compat;
- c->sb.feature_ro_compat = sb->feature_ro_compat;
- c->sb.feature_incompat = sb->feature_incompat;
- }
+ c->cache = ca;
+ c->cache->set = c;
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
+ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
c->devices_max_used = 0;
atomic_set(&c->attached_dev_nr, 0);
- c->btree_pages = meta_bucket_pages(&c->sb);
+ c->btree_pages = meta_bucket_pages(sb);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
@@ -1914,7 +1901,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
sizeof(struct bbio) +
- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
+ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
goto err;
if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
@@ -1924,7 +1911,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
if (!c->uuids)
goto err;
@@ -2104,7 +2091,7 @@ static int run_cache_set(struct cache_set *c)
goto err;
closure_sync(&cl);
- c->sb.last_mount = (u32)ktime_get_real_seconds();
+ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
list_for_each_entry_safe(dc, t, &uncached_devices, list)
@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* This is the size of the cache, minus the amount used for
* flash-only devices
*/
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
atomic_long_read(&c->flash_dev_dirty_sectors);
/*