@@ -56,6 +56,10 @@ xfs_buf_log_format_size(
(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
}
+/*
+ * We only have to worry about discontiguous buffer range straddling on unmapped
+ * buffers. Everything else will have a contiguous data region we can copy from.
+ */
static inline bool
xfs_buf_item_straddle(
struct xfs_buf *bp,
@@ -65,6 +69,9 @@ xfs_buf_item_straddle(
{
void *first, *last;
+ if (bp->b_page_count == 1 || !(bp->b_flags & XBF_UNMAPPED))
+ return false;
+
first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
last = xfs_buf_offset(bp,
offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
@@ -132,11 +139,13 @@ xfs_buf_item_size_segment(
return;
slow_scan:
- /* Count the first bit we jumped out of the above loop from */
- (*nvecs)++;
- *nbytes += XFS_BLF_CHUNK;
+ ASSERT(bp->b_addr == NULL);
last_bit = first_bit;
+ nbits = 1;
while (last_bit != -1) {
+
+ *nbytes += XFS_BLF_CHUNK;
+
/*
* This takes the bit number to start looking from and
* returns the next set bit from there. It returns -1
@@ -151,6 +160,8 @@ xfs_buf_item_size_segment(
* else keep scanning the current set of bits.
*/
if (next_bit == -1) {
+ if (first_bit != last_bit)
+ (*nvecs)++;
break;
} else if (next_bit != last_bit + 1 ||
xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
@@ -162,7 +173,6 @@ xfs_buf_item_size_segment(
last_bit++;
nbits++;
}
- *nbytes += XFS_BLF_CHUNK;
}
}