@@ -466,7 +466,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
cancel:
/* Remove if completely consumed. */
- spin_lock(&wreq->lock);
+ spin_lock_bh(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
@@ -482,7 +482,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
}
}
- spin_unlock(&wreq->lock);
+ spin_unlock_bh(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
@@ -189,7 +189,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
- spin_lock(&wreq->lock);
+ spin_lock_bh(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@@ -200,7 +200,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
- spin_unlock(&wreq->lock);
+ spin_unlock_bh(&wreq->lock);
stream->construct = subreq;
}
Use bh-disabling spinlocks when accessing rreq->lock because, in the future, it may be twiddled from softirq context when cleanup is driven from cache backend DIO completion. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org --- fs/netfs/write_collect.c | 4 ++-- fs/netfs/write_issue.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-)