new file mode 100755
@@ -0,0 +1,98 @@
+#! /bin/bash
+# FSQA Test No. 118
+#
+# Test that if we fsync a directory that had a snapshot entry in it that was
+# deleted and crash, the next time we mount the filesystem, the log replay
+# procedure will not fail and the snapshot is not present anymore.
+#
+#-----------------------------------------------------------------------
+#
+# Copyright (C) 2016 SUSE Linux Products GmbH. All Rights Reserved.
+# Author: Filipe Manana <fdmanana@suse.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#-----------------------------------------------------------------------
+#
+
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ _cleanup_flakey
+ cd /
+ rm -f $tmp.*
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/dmflakey
+
+# real QA test starts here
+_supported_fs btrfs
+_supported_os Linux
+_require_scratch
+_require_dm_target flakey
+_require_metadata_journaling $SCRATCH_DEV
+
+rm -f $seqres.full
+
+_scratch_mkfs >>$seqres.full 2>&1
+_init_flakey
+_mount_flakey
+
+# Create a snapshot at the root of our filesystem (mount point path), delete it,
+# fsync the mount point path, crash and mount to replay the log. This should
+# succeed and after the filesystem is mounted the snapshot should not be visible
+# anymore.
+_run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT $SCRATCH_MNT/snap1
+_run_btrfs_util_prog subvolume delete $SCRATCH_MNT/snap1
+$XFS_IO_PROG -c "fsync" $SCRATCH_MNT
+
+echo "Filesystem content before first power failure:"
+ls -R $SCRATCH_MNT | _filter_scratch
+
+_flakey_drop_and_remount
+
+echo "Filesystem content after first power failure:"
+# Must match what we had before the power failure, we don't expect to see the
+# snapshot anymore.
+ls -R $SCRATCH_MNT | _filter_scratch
+
+# Similar scenario as above, but this time the snapshot is created inside a
+# directory and not directly under the root (mount point path).
+mkdir $SCRATCH_MNT/testdir
+_run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT $SCRATCH_MNT/testdir/snap2
+_run_btrfs_util_prog subvolume delete $SCRATCH_MNT/testdir/snap2
+$XFS_IO_PROG -c "fsync" $SCRATCH_MNT/testdir
+
+echo "Filesystem content before second power failure:"
+ls -R $SCRATCH_MNT | _filter_scratch
+
+_flakey_drop_and_remount
+
+echo "Filesystem content after second power failure:"
+# Must match what we had before the power failure, we don't expect to see the
+# snapshot anymore.
+ls -R $SCRATCH_MNT | _filter_scratch
+
+_unmount_flakey
+
+status=0
+exit
new file mode 100644
@@ -0,0 +1,15 @@
+QA output created by 118
+Filesystem content before first power failure:
+SCRATCH_MNT:
+Filesystem content after first power failure:
+SCRATCH_MNT:
+Filesystem content before second power failure:
+SCRATCH_MNT:
+testdir
+
+SCRATCH_MNT/testdir:
+Filesystem content after second power failure:
+SCRATCH_MNT:
+testdir
+
+SCRATCH_MNT/testdir:
@@ -118,3 +118,4 @@
115 auto qgroup
116 auto quick metadata
117 auto quick send clone
+118 auto quick snapshot metadata