@@ -675,6 +675,8 @@ enum {
#define EXT4_GET_BLOCKS_IO_SUBMIT 0x0400
/* Caller is in the atomic contex, find extent if it has been cached */
#define EXT4_GET_BLOCKS_CACHED_NOWAIT 0x0800
+ /* Provision blocks on underlying storage */
+#define EXT4_GET_BLOCKS_PROVISION 0x1000
/*
* The bit position of these flags must not overlap with any of the
@@ -4356,6 +4356,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
}
}
+ /* Attempt to provision blocks on underlying storage */
+ if (flags & EXT4_GET_BLOCKS_PROVISION) {
+ err = sb_issue_provision(inode->i_sb, pblk, ar.len, GFP_NOFS);
+ if (err)
+ goto out;
+ }
+
/*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
@@ -4690,7 +4697,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
- FALLOC_FL_INSERT_RANGE))
+ FALLOC_FL_INSERT_RANGE | FALLOC_FL_PROVISION))
return -EOPNOTSUPP;
inode_lock(inode);
@@ -4750,6 +4757,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (ret)
goto out;
+ /* Ensure that preallocation provisions the blocks on the underlying
+ * storage device.
+ */
+ if (mode & FALLOC_FL_PROVISION)
+ flags |= EXT4_GET_BLOCKS_PROVISION;
+
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
if (ret)
goto out;
@@ -640,6 +640,15 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
if (err)
goto cleanup;
+ /* Attempt to provision blocks on underlying storage */
+ if (flags & EXT4_GET_BLOCKS_PROVISION) {
+ err = sb_issue_provision(inode->i_sb,
+ le32_to_cpu(chain[depth-1].key),
+ ar.len, GFP_NOFS);
+ if (err)
+ goto out;
+ }
+
map->m_flags |= EXT4_MAP_NEW;
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -1107,6 +1107,17 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
+static inline int sb_issue_provision(struct super_block *sb, sector_t block,
+ sector_t nr_blocks, gfp_t gfp_mask)
+{
+ return blkdev_issue_provision(sb->s_bdev,
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ gfp_mask);
+}
+
static inline bool bdev_is_partition(struct block_device *bdev)
{
return bdev->bd_partno;