@@ -488,3 +488,42 @@ _sweave_reflink_rainbow_delalloc() {
_pwrite_byte 0x62 $((blksz * i)) $blksz $dfile.chk
done
}
+
+# Do De-dupe on a directory (or a file). $DUPEREMOVE_PROG is required if you
+# call this function, and -d -r options are recommended.
+#
+# The 1st argument can be used to forcibly decide if dedupe extents with in
+# the same file (better to use "same" if you run on a single file). If it's
+# not set to "same" or "nosame", the function will use "same" by default
+# except testing on large filesystem.
+_duperemove()
+{
+ local dedupe_opt=""
+
+ # Decide if allow dedupe of extents with in the same file. If specify
+ # "same" or "nosame", follow the specified option, else if test on
+ # large filesystem, "nosame" by default.
+ if [ "$1" = "same" ]; then
+ dedupe_opt="same"
+ shift
+ elif [ "$1" = "nosame" ]; then
+ dedupe_opt="nosame"
+ shift
+ elif [ "$LARGE_SCRATCH_DEV" = "yes" ]; then
+ # Don't allow dedupe of extents with in the same file if test
+ # on large filesystem. Due to xfstests pre-alloc a huge size
+ # file to take most fs free space at every test beginning if
+ # --large-fs option is used.
+ dedupe_opt="nosame"
+ fi
+
+ # If above judgements set $dedupe_opt, then use this option, or "same"
+ # by default.
+ if [ -n "$dedupe_opt" ]; then
+ dedupe_opt="--dedupe-options=${dedupe_opt}"
+ else
+ dedupe_opt="--dedupe-options=same"
+ fi
+
+ $DUPEREMOVE_PROG $dedupe_opt "$@"
+}
@@ -21,18 +21,18 @@ fssize=$((2 * 1024 * 1024 * 1024))
_scratch_mkfs_sized $fssize > $seqres.full 2>&1
_scratch_mount >> $seqres.full 2>&1
+testfile="$SCRATCH_MNT/${seq}.file"
# fill the fs with a big file has same contents
-$XFS_IO_PROG -f -c "pwrite -S 0x55 0 $fssize" $SCRATCH_MNT/${seq}.file \
- >> $seqres.full 2>&1
-md5sum $SCRATCH_MNT/${seq}.file > ${tmp}.md5sum
+$XFS_IO_PROG -f -c "pwrite -S 0x55 0 $fssize" $testfile >> $seqres.full 2>&1
+md5sum $testfile > ${tmp}.md5sum
echo "= before cycle mount ="
# Dedupe with 1M blocksize
-$DUPEREMOVE_PROG -dr --dedupe-options=same -b 1048576 $SCRATCH_MNT/ >>$seqres.full 2>&1
+_duperemove -dr -b 1048576 $SCRATCH_MNT/ >>$seqres.full 2>&1
# Verify integrity
md5sum -c --quiet ${tmp}.md5sum
# Dedupe with 64k blocksize
-$DUPEREMOVE_PROG -dr --dedupe-options=same -b 65536 $SCRATCH_MNT/ >>$seqres.full 2>&1
+_duperemove -dr -b 65536 $SCRATCH_MNT/ >>$seqres.full 2>&1
# Verify integrity again
md5sum -c --quiet ${tmp}.md5sum
@@ -35,8 +35,7 @@ function iterate_dedup_verify()
$FSSTRESS_PROG $fsstress_opts -d $noisedir \
-n 200 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
# Too many output, so only save error output
- $DUPEREMOVE_PROG -dr --dedupe-options=same $dupdir \
- >/dev/null 2>$seqres.full
+ _duperemove same -dr $dupdir >/dev/null 2>$seqres.full
md5sum -c --quiet $md5file$index
src=$dest
dest=$dupdir/$((index + 1))
@@ -67,7 +67,7 @@ for ((i = 0; i < $((2 * LOAD_FACTOR)); i++)); do
# dupremove processes in an arbitrary order, which leaves the
# memory in an inconsistent state long enough for the assert
# to trip.
- cmd="$DUPEREMOVE_PROG -dr --dedupe-options=same $testdir"
+ cmd="_duperemove -dr $testdir"
bash -c "$cmd" >> $seqres.full 2>&1
done 2>&1 | sed -e '/Terminated/d' &
dedup_pids="$! $dedup_pids"
When test on large fs (--large-fs), xfstests allocates a large file in SCRATCH_MNT/ at first. g/559~561 try to dedupe of extents within the same file, so they'll waste lots of time to deal with that large file. So bring in a common function named _duperemove(), which decide if allow dedupe of extents with in the same file. If specify "same" or "nosame" in the 1st argument, follow the specified option. Else use "same" by default except testing on large filesystem. Signed-off-by: Zorro Lang <zlang@redhat.com> --- common/reflink | 39 +++++++++++++++++++++++++++++++++++++++ tests/generic/559 | 10 +++++----- tests/generic/560 | 3 +-- tests/generic/561 | 2 +- 4 files changed, 46 insertions(+), 8 deletions(-)