@@ -407,7 +407,7 @@ static const struct iomap_ops blkdev_iomap_ops = {
static int blkdev_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
- bh->b_bdev = I_BDEV(inode);
+ bh_set_bdev_file(bh, inode->i_private);
bh->b_blocknr = iblock;
set_buffer_mapped(bh);
return 0;
@@ -381,7 +381,7 @@ static int read_file_page(struct file *file, unsigned long index,
}
bh->b_blocknr = block;
- bh->b_bdev = inode->i_sb->s_bdev;
+ bh_set_bdev_file(bh, inode->i_sb->s_bdev_file);
if (count < blocksize)
count = 0;
else
@@ -365,7 +365,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul
err_alloc:
brelse(ext_bh);
clear_buffer_mapped(bh_result);
- bh_result->b_bdev = NULL;
+ bh_set_bdev_file(bh_result, NULL);
// unlock cache
affs_unlock_ext(inode);
return -ENOSPC;
@@ -129,7 +129,7 @@ static void buffer_io_error(struct buffer_head *bh, char *msg)
if (!test_bit(BH_Quiet, &bh->b_state))
printk_ratelimited(KERN_ERR
"Buffer I/O error on dev %pg, logical block %llu%s\n",
- bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
+ bh_bdev(bh), (unsigned long long)bh->b_blocknr, msg);
}
/*
@@ -1367,7 +1367,7 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
for (i = 0; i < BH_LRU_SIZE; i++) {
struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
- if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
+ if (bh && bh->b_blocknr == block && bh_bdev(bh) == bdev &&
bh->b_size == size) {
if (i) {
while (i) {
@@ -1564,7 +1564,7 @@ static void discard_buffer(struct buffer_head * bh)
lock_buffer(bh);
clear_buffer_dirty(bh);
- bh->b_bdev = NULL;
+ bh_set_bdev_file(bh, NULL);
b_state = READ_ONCE(bh->b_state);
do {
} while (!try_cmpxchg(&bh->b_state, &b_state,
@@ -2005,7 +2005,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
{
loff_t offset = (loff_t)block << inode->i_blkbits;
- bh->b_bdev = iomap_bdev(iomap);
+ bh_set_bdev_file(bh, iomap->bdev_file);
/*
* Block points to offset in file we need to map, iomap contains
@@ -2781,7 +2781,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
if (buffer_prio(bh))
opf |= REQ_PRIO;
- bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
+ bio = bio_alloc(bh_bdev(bh), 1, opf, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
@@ -673,7 +673,7 @@ static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
sector = start_sector << (sdio->blkbits - 9);
nr_pages = bio_max_segs(sdio->pages_in_io);
BUG_ON(nr_pages <= 0);
- dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
+ dio_bio_alloc(dio, sdio, bh_bdev(map_bh), sector, nr_pages);
sdio->boundary = 0;
out:
return ret;
@@ -948,7 +948,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
map_bh->b_blocknr << sdio->blkfactor;
if (buffer_new(map_bh)) {
clean_bdev_aliases(
- map_bh->b_bdev,
+ bh_bdev(map_bh),
map_bh->b_blocknr,
map_bh->b_size >> i_blkbits);
}
@@ -80,7 +80,7 @@
} while (0)
# define ea_bdebug(bh, f...) do { \
printk(KERN_DEBUG "block %pg:%lu: ", \
- bh->b_bdev, (unsigned long) bh->b_blocknr); \
+ bh_bdev(bh), (unsigned long) bh->b_blocknr); \
printk(f); \
printk("\n"); \
} while (0)
@@ -384,7 +384,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
snprintf(mmp->mmp_bdevname, sizeof(mmp->mmp_bdevname),
- "%pg", bh->b_bdev);
+ "%pg", bh_bdev(bh));
/*
* Start a kernel thread to update the MMP block periodically.
@@ -93,8 +93,7 @@ struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
static void buffer_io_error(struct buffer_head *bh)
{
printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
- bh->b_bdev,
- (unsigned long long)bh->b_blocknr);
+ bh_bdev(bh), (unsigned long long)bh->b_blocknr);
}
static void ext4_finish_bio(struct bio *bio)
@@ -397,7 +396,7 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
- bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
+ bio = bio_alloc(bh_bdev(bh), BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_end_io = ext4_end_bio;
@@ -68,7 +68,7 @@
inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
# define ea_bdebug(bh, fmt, ...) \
printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
- bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
+ bh_bdev(bh), (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
#else
# define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
@@ -622,7 +622,7 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
spin_unlock(&sdp->sd_ail_lock);
}
}
- bh->b_bdev = NULL;
+ bh_set_bdev_file(bh, NULL);
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
@@ -218,7 +218,7 @@ static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
struct buffer_head *bh = *bhs;
struct bio *bio;
- bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
+ bio = bio_alloc(bh_bdev(bh), num, opf, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
while (num > 0) {
bh = *bhs;
@@ -1014,7 +1014,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
clear_buffer_mapped(bh);
clear_buffer_new(bh);
clear_buffer_req(bh);
- bh->b_bdev = NULL;
+ bh_set_bdev_file(bh, NULL);
}
}
@@ -434,7 +434,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
folio_set_bh(new_bh, new_folio, new_offset);
new_bh->b_size = bh_in->b_size;
- new_bh->b_bdev = journal->j_dev;
+ bh_set_bdev_file(new_bh, journal->j_dev_file);
new_bh->b_blocknr = blocknr;
new_bh->b_private = bh_in;
set_buffer_mapped(new_bh);
@@ -929,7 +929,7 @@ static void warn_dirty_buffer(struct buffer_head *bh)
"JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
"There's a risk of filesystem corruption in case of system "
"crash.\n",
- bh->b_bdev, (unsigned long long)bh->b_blocknr);
+ bh_bdev(bh), (unsigned long long)bh->b_blocknr);
}
/* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
@@ -990,7 +990,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
/* If it takes too long to lock the buffer, trace it */
time_lock = jbd2_time_diff(start_lock, jiffies);
if (time_lock > HZ/10)
- trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
+ trace_jbd2_lock_buffer_stall(bh_bdev(bh)->bd_dev,
jiffies_to_msecs(time_lock));
/* We now hold the buffer lock so it is safe to query the buffer
@@ -2374,7 +2374,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
write_unlock(&journal->j_state_lock);
jbd2_journal_put_journal_head(jh);
/* Already zapped buffer? Nothing to do... */
- if (!bh->b_bdev)
+ if (!bh_bdev(bh))
return 0;
return -EBUSY;
}
@@ -2428,7 +2428,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
clear_buffer_new(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
- bh->b_bdev = NULL;
+ bh_set_bdev_file(bh, NULL);
return may_free;
}
@@ -126,7 +126,7 @@ static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
do {
if (block == page_block) {
page_bh->b_state = bh->b_state;
- page_bh->b_bdev = bh->b_bdev;
+ bh_copy_bdev_file(page_bh, bh);
page_bh->b_blocknr = bh->b_blocknr;
break;
}
@@ -216,7 +216,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
page_block++;
block_in_file++;
}
- bdev = map_bh->b_bdev;
+ bdev = bh_bdev(map_bh);
}
/*
@@ -272,7 +272,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
page_block++;
block_in_file++;
}
- bdev = map_bh->b_bdev;
+ bdev = bh_bdev(map_bh);
}
if (first_hole != blocks_per_page) {
@@ -515,7 +515,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
boundary_block = bh->b_blocknr;
boundary_bdev = bh->b_bdev;
}
- bdev = bh->b_bdev;
+ bdev = bh_bdev(bh);
} while ((bh = bh->b_this_page) != head);
if (first_unmapped)
@@ -565,7 +565,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
}
page_block++;
boundary = buffer_boundary(&map_bh);
- bdev = map_bh.b_bdev;
+ bdev = bh_bdev(&map_bh);
if (block_in_file == last_block)
break;
block_in_file++;
@@ -59,7 +59,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
BUG();
}
memset(bh->b_data, 0, i_blocksize(inode));
- bh->b_bdev = inode->i_sb->s_bdev;
+ bh_set_bdev_file(bh, inode->i_sb->s_bdev_file);
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
set_buffer_uptodate(bh);
@@ -118,7 +118,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
goto found;
}
set_buffer_mapped(bh);
- bh->b_bdev = inode->i_sb->s_bdev;
+ bh_set_bdev_file(bh, inode->i_sb->s_bdev_file);
bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
@@ -84,7 +84,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
}
if (!buffer_mapped(bh)) {
- bh->b_bdev = inode->i_sb->s_bdev;
+ bh_set_bdev_file(bh, inode->i_sb->s_bdev_file);
set_buffer_mapped(bh);
}
bh->b_blocknr = pbn;
@@ -89,7 +89,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
if (buffer_uptodate(bh))
goto failed_bh;
- bh->b_bdev = sb->s_bdev;
+ bh_set_bdev_file(bh, sb->s_bdev_file);
err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
if (likely(!err)) {
get_bh(bh);
@@ -111,7 +111,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
dbh->b_blocknr = sbh->b_blocknr;
- dbh->b_bdev = sbh->b_bdev;
+ bh_copy_bdev_file(dbh, sbh);
bh = dbh;
bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
@@ -216,7 +216,7 @@ static void nilfs_copy_folio(struct folio *dst, struct folio *src,
lock_buffer(dbh);
dbh->b_state = sbh->b_state & mask;
dbh->b_blocknr = sbh->b_blocknr;
- dbh->b_bdev = sbh->b_bdev;
+ bh_copy_bdev_file(dbh, sbh);
sbh = sbh->b_this_page;
dbh = dbh->b_this_page;
} while (dbh != dbufs);
@@ -609,7 +609,7 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
lbo = ((u64)lcn << cluster_bits) + off;
set_buffer_mapped(bh);
- bh->b_bdev = sb->s_bdev;
+ bh_set_bdev_file(bh, sb->s_bdev_file);
bh->b_blocknr = lbo >> sb->s_blocksize_bits;
valid = ni->i_valid;
@@ -2332,7 +2332,7 @@ static void tb_buffer_sanity_check(struct super_block *sb,
"in tree %s[%d] (%b)",
descr, level, bh);
- if (bh->b_bdev != sb->s_bdev)
+ if (bh_bdev(bh) != sb->s_bdev)
reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
"device %s[%d] (%b)",
descr, level, bh);
@@ -618,7 +618,7 @@ static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
if (buffer_journaled(bh)) {
reiserfs_warning(NULL, "clm-2084",
"pinned buffer %lu:%pg sent to disk",
- bh->b_blocknr, bh->b_bdev);
+ bh->b_blocknr, bh_bdev(bh));
}
if (uptodate)
set_buffer_uptodate(bh);
@@ -156,7 +156,7 @@ static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
{
return scnprintf(buf, size,
"dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
- bh->b_bdev, bh->b_size,
+ bh_bdev(bh), bh->b_size,
(unsigned long long)bh->b_blocknr,
atomic_read(&(bh->b_count)),
bh->b_state, bh->b_page,
@@ -561,7 +561,7 @@ static int print_super_block(struct buffer_head *bh)
return 1;
}
- printk("%pg\'s super block is in block %llu\n", bh->b_bdev,
+ printk("%pg\'s super block is in block %llu\n", bh_bdev(bh),
(unsigned long long)bh->b_blocknr);
printk("Reiserfs version %s\n", version);
printk("Block count %u\n", sb_block_count(rs));
@@ -331,7 +331,7 @@ static inline int key_in_buffer(
|| chk_path->path_length > MAX_HEIGHT,
"PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)",
key, chk_path->path_length);
- RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev,
+ RFALSE(!bh_bdev(PATH_PLAST_BUFFER(chk_path)),
"PAP-5060: device must not be NODEV");
if (comp_keys(get_lkey(chk_path, sb), key) == 1)
@@ -187,7 +187,7 @@ void reiserfs_unmap_buffer(struct buffer_head *bh)
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
- bh->b_bdev = NULL;
+ bh_set_bdev_file(bh, NULL);
unlock_buffer(bh);
}
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/blk_types.h>
+#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
@@ -136,6 +137,23 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
+static __always_inline void bh_set_bdev_file(struct buffer_head *bh,
+ struct file *bdev_file)
+{
+ bh->b_bdev = bdev_file ? file_bdev(bdev_file) : NULL;
+}
+
+static __always_inline void bh_copy_bdev_file(struct buffer_head *dbh,
+ struct buffer_head *sbh)
+{
+ dbh->b_bdev = sbh->b_bdev;
+}
+
+static __always_inline struct block_device *bh_bdev(struct buffer_head *bh)
+{
+ return bh->b_bdev;
+}
+
static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
{
/*
@@ -377,7 +395,7 @@ static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
set_buffer_mapped(bh);
- bh->b_bdev = sb->s_bdev;
+ bh_set_bdev_file(bh, sb->s_bdev_file);
bh->b_blocknr = block;
bh->b_size = sb->s_blocksize;
}
@@ -26,7 +26,7 @@ DECLARE_EVENT_CLASS(block_buffer,
),
TP_fast_assign(
- __entry->dev = bh->b_bdev->bd_dev;
+ __entry->dev = bh_bdev(bh)->bd_dev;
__entry->sector = bh->b_blocknr;
__entry->size = bh->b_size;
),