diff mbox series

[12/18] btrfs: migrate defrag.c to use block size terminology

Message ID ed1c6f9daf9d0b2fa253a6d9b632ee53b64a1c06.1734514696.git.wqu@suse.com (mailing list archive)
State New
Headers show
Series btrfs: migrate to "block size" to describe the | expand

Commit Message

Qu Wenruo Dec. 18, 2024, 9:41 a.m. UTC
Straightforward rename from "sector" to "block".

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/defrag.c | 52 +++++++++++++++++++++++------------------------
 1 file changed, 26 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 968dae953948..7a96505957b3 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -272,7 +272,7 @@  static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 	if (ret < 0)
 		goto cleanup;
 
-	cur = max(cur + fs_info->sectorsize, range.start);
+	cur = max(cur + fs_info->blocksize, range.start);
 	goto again;
 
 cleanup:
@@ -749,14 +749,14 @@  static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_map *em;
-	const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
+	const u32 blocksize = BTRFS_I(inode)->root->fs_info->blocksize;
 
 	/*
 	 * Hopefully we have this extent in the tree already, try without the
 	 * full extent lock.
 	 */
 	read_lock(&em_tree->lock);
-	em = lookup_extent_mapping(em_tree, start, sectorsize);
+	em = lookup_extent_mapping(em_tree, start, blocksize);
 	read_unlock(&em_tree->lock);
 
 	/*
@@ -775,7 +775,7 @@  static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
 
 	if (!em) {
 		struct extent_state *cached = NULL;
-		u64 end = start + sectorsize - 1;
+		u64 end = start + blocksize - 1;
 
 		/* Get the big lock and read metadata off disk. */
 		if (!locked)
@@ -1199,7 +1199,7 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 	struct defrag_target_range *tmp;
 	LIST_HEAD(target_list);
 	struct folio **folios;
-	const u32 sectorsize = inode->root->fs_info->sectorsize;
+	const u32 blocksize = inode->root->fs_info->blocksize;
 	u64 last_index = (start + len - 1) >> PAGE_SHIFT;
 	u64 start_index = start >> PAGE_SHIFT;
 	unsigned int nr_pages = last_index - start_index + 1;
@@ -1207,7 +1207,7 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 	int i;
 
 	ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
-	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
+	ASSERT(IS_ALIGNED(start, blocksize) && IS_ALIGNED(len, blocksize));
 
 	folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS);
 	if (!folios)
@@ -1270,11 +1270,11 @@  static int defrag_one_cluster(struct btrfs_inode *inode,
 			      struct file_ra_state *ra,
 			      u64 start, u32 len, u32 extent_thresh,
 			      u64 newer_than, bool do_compress,
-			      unsigned long *sectors_defragged,
-			      unsigned long max_sectors,
+			      unsigned long *blocks_defragged,
+			      unsigned long max_blocks,
 			      u64 *last_scanned_ret)
 {
-	const u32 sectorsize = inode->root->fs_info->sectorsize;
+	const u32 blocksize = inode->root->fs_info->blocksize;
 	struct defrag_target_range *entry;
 	struct defrag_target_range *tmp;
 	LIST_HEAD(target_list);
@@ -1290,14 +1290,14 @@  static int defrag_one_cluster(struct btrfs_inode *inode,
 		u32 range_len = entry->len;
 
 		/* Reached or beyond the limit */
-		if (max_sectors && *sectors_defragged >= max_sectors) {
+		if (max_blocks && *blocks_defragged >= max_blocks) {
 			ret = 1;
 			break;
 		}
 
-		if (max_sectors)
+		if (max_blocks)
 			range_len = min_t(u32, range_len,
-				(max_sectors - *sectors_defragged) * sectorsize);
+				(max_blocks - *blocks_defragged) * blocksize);
 
 		/*
 		 * If defrag_one_range() has updated last_scanned_ret,
@@ -1315,7 +1315,7 @@  static int defrag_one_cluster(struct btrfs_inode *inode,
 		/*
 		 * Here we may not defrag any range if holes are punched before
 		 * we locked the pages.
-		 * But that's fine, it only affects the @sectors_defragged
+		 * But that's fine, it only affects the @blocks_defragged
 		 * accounting.
 		 */
 		ret = defrag_one_range(inode, entry->start, range_len,
@@ -1323,8 +1323,8 @@  static int defrag_one_cluster(struct btrfs_inode *inode,
 				       last_scanned_ret);
 		if (ret < 0)
 			break;
-		*sectors_defragged += range_len >>
-				      inode->root->fs_info->sectorsize_bits;
+		*blocks_defragged += range_len >>
+				      inode->root->fs_info->blocksize_bits;
 	}
 out:
 	list_for_each_entry_safe(entry, tmp, &target_list, list) {
@@ -1343,11 +1343,11 @@  static int defrag_one_cluster(struct btrfs_inode *inode,
  * @ra:		   readahead state
  * @range:	   defrag options including range and flags
  * @newer_than:	   minimum transid to defrag
- * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
+ * @max_to_defrag: max number of blocks to be defragged, if 0, the whole inode
  *		   will be defragged.
  *
  * Return <0 for error.
- * Return >=0 for the number of sectors defragged, and range->start will be updated
+ * Return >=0 for the number of blocks defragged, and range->start will be updated
  * to indicate the file offset where next defrag should be started at.
  * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
  *  defragging all the range).
@@ -1357,7 +1357,7 @@  int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 		      u64 newer_than, unsigned long max_to_defrag)
 {
 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
-	unsigned long sectors_defragged = 0;
+	unsigned long blocks_defragged = 0;
 	u64 isize = i_size_read(inode);
 	u64 cur;
 	u64 last_byte;
@@ -1394,8 +1394,8 @@  int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 	}
 
 	/* Align the range */
-	cur = round_down(range->start, fs_info->sectorsize);
-	last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
+	cur = round_down(range->start, fs_info->blocksize);
+	last_byte = round_up(last_byte, fs_info->blocksize) - 1;
 
 	/*
 	 * Make writeback start from the beginning of the range, so that the
@@ -1406,7 +1406,7 @@  int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 		inode->i_mapping->writeback_index = start_index;
 
 	while (cur < last_byte) {
-		const unsigned long prev_sectors_defragged = sectors_defragged;
+		const unsigned long prev_blocks_defragged = blocks_defragged;
 		u64 last_scanned = cur;
 		u64 cluster_end;
 
@@ -1434,10 +1434,10 @@  int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 			BTRFS_I(inode)->defrag_compress = compress_type;
 		ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
 				cluster_end + 1 - cur, extent_thresh,
-				newer_than, do_compress, &sectors_defragged,
+				newer_than, do_compress, &blocks_defragged,
 				max_to_defrag, &last_scanned);
 
-		if (sectors_defragged > prev_sectors_defragged)
+		if (blocks_defragged > prev_blocks_defragged)
 			balance_dirty_pages_ratelimited(inode->i_mapping);
 
 		btrfs_inode_unlock(BTRFS_I(inode), 0);
@@ -1456,9 +1456,9 @@  int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 	 * in next run.
 	 */
 	range->start = cur;
-	if (sectors_defragged) {
+	if (blocks_defragged) {
 		/*
-		 * We have defragged some sectors, for compression case they
+		 * We have defragged some blocks, for compression case they
 		 * need to be written back immediately.
 		 */
 		if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
@@ -1471,7 +1471,7 @@  int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 			btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
 		else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
 			btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
-		ret = sectors_defragged;
+		ret = blocks_defragged;
 	}
 	if (do_compress) {
 		btrfs_inode_lock(BTRFS_I(inode), 0);