@@ -81,6 +81,13 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
{
int i;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /* lockdep really cares that we take all of these spinlocks
+ * in the right order. If any of the locks in the path are not
+ * currently blocking, it is going to complain. So, make really
+ * really sure by forcing the path to blocking before we clear
+ * the path blocking.
+ */
if (held) {
btrfs_set_lock_blocking_rw(held, held_rw);
if (held_rw == BTRFS_WRITE_LOCK)
@@ -89,6 +96,7 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
held_rw = BTRFS_READ_LOCK_BLOCKING;
}
btrfs_set_path_blocking(p);
+#endif
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
if (p->nodes[i] && p->locks[i]) {
@@ -100,8 +108,10 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
}
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (held)
btrfs_clear_lock_blocking_rw(held, held_rw);
+#endif
}
/* this also releases the path */
@@ -2906,7 +2916,7 @@ cow_done:
}
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
- err = btrfs_tree_read_lock_atomic(b);
+ err = btrfs_try_tree_read_lock(b);
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
@@ -3038,7 +3048,7 @@ again:
}
level = btrfs_header_level(b);
- err = btrfs_tree_read_lock_atomic(b);
+ err = btrfs_try_tree_read_lock(b);
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
@@ -132,26 +132,6 @@ again:
}
/*
- * take a spinning read lock.
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers
- */
-int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
-{
- if (atomic_read(&eb->blocking_writers))
- return 0;
-
- read_lock(&eb->lock);
- if (atomic_read(&eb->blocking_writers)) {
- read_unlock(&eb->lock);
- return 0;
- }
- atomic_inc(&eb->read_locks);
- atomic_inc(&eb->spinning_readers);
- return 1;
-}
-
-/*
* returns 1 if we get the read lock and 0 if we don't
* this won't wait for blocking writers
*/
@@ -182,7 +162,9 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
atomic_read(&eb->blocking_readers))
return 0;
- write_lock(&eb->lock);
+ if (!write_trylock(&eb->lock))
+ return 0;
+
if (atomic_read(&eb->blocking_writers) ||
atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock);
@@ -35,8 +35,6 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
-int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
-
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{