@@ -179,11 +179,10 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
struct list_head *list = &wq->ordered_list;
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
- unsigned long flags;
bool free_self = false;
while (1) {
- spin_lock_irqsave(lock, flags);
+ spin_lock(lock);
if (list_empty(list))
break;
work = list_entry(list->next, struct btrfs_work,
@@ -207,13 +206,13 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
break;
trace_btrfs_ordered_sched(work);
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock(lock);
work->ordered_func(work);
/* now take the lock again and drop our item from the list */
- spin_lock_irqsave(lock, flags);
+ spin_lock(lock);
list_del(&work->ordered_list);
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock(lock);
if (work == self) {
/*
@@ -248,7 +247,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
trace_btrfs_all_work_done(wq->fs_info, work);
}
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock(lock);
if (free_self) {
self->ordered_free(self);
@@ -307,14 +306,12 @@ void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
{
- unsigned long flags;
-
work->wq = wq;
thresh_queue_hook(wq);
if (work->ordered_func) {
- spin_lock_irqsave(&wq->list_lock, flags);
+ spin_lock(&wq->list_lock);
list_add_tail(&work->ordered_list, &wq->ordered_list);
- spin_unlock_irqrestore(&wq->list_lock, flags);
+ spin_unlock(&wq->list_lock);
}
trace_btrfs_work_queued(work);
queue_work(wq->normal_wq, &work->normal_work);
btrfs_queue_work with an ordered_func is never called from irq context, so remove the irq disabling for btrfs_workqueue.list_lock. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/btrfs/async-thread.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-)