@@ -1129,39 +1129,88 @@ static void submit_bio_wait_endio(struct bio *bio)
complete(bio->bi_private);
}
+static
+int __submit_bio_wait(struct bio *bio, struct completion *done, int state)
+{
+ long timeout;
+ long ret = 0;
+
+ bio->bi_private = done;
+ bio->bi_end_io = submit_bio_wait_endio;
+ bio->bi_opf |= REQ_SYNC;
+ submit_bio(bio);
+
+ /* Prevent hang_check timer from firing at us during very long I/O */
+ timeout = sysctl_hung_task_timeout_secs * (HZ / 2);
+ if (!timeout)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ while (ret == 0)
+ ret = __wait_for_completion_io(done, timeout, state);
+
+ if (ret < 0)
+ return ret;
+
+ return blk_status_to_errno(bio->bi_status);
+}
+
/**
- * submit_bio_wait - submit a bio, and wait until it completes
+ * submit_bio_wait - Submit a bio, and wait for it to complete.
* @bio: The &struct bio which describes the I/O
*
* Simple wrapper around submit_bio(). Returns 0 on success, or the error from
* bio_endio() on failure.
*
- * WARNING: Unlike to how submit_bio() is usually used, this function does not
- * result in bio reference to be consumed. The caller must drop the reference
- * on his own.
+ * WARNING: Unlike how submit_bio() is usually used, this function does
+ * not result in a bio reference being consumed. The caller must drop
+ * the reference.
*/
int submit_bio_wait(struct bio *bio)
{
DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
- unsigned long hang_check;
+ return __submit_bio_wait(bio, &done, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(submit_bio_wait);
- bio->bi_private = &done;
- bio->bi_end_io = submit_bio_wait_endio;
- bio->bi_opf |= REQ_SYNC;
- submit_bio(bio);
+/**
+ * submit_bio_killable - Submit a bio, and wait for it to complete.
+ * @bio: The &struct bio which describes the I/O
+ * @async_end_io: Callback for interrupted waits
+ *
+ * Submits the BIO to the block device and waits for it to complete.
+ * If the wait is interrupted by a fatal signal, the @async_end_io
+ * will be called instead. Unlike submit_bio_wait(), the bio will
+ * have its reference count consumed by this call (unless we get a
+ * fatal signal; in which case the reference count should be
+ * consumed by @async_end_io).
+ *
+ * Return: 0 if the bio completed successfully, -ERESTARTSYS if we received
+ * a fatal signal, or a different errno if the bio could not complete.
+ */
+int submit_bio_killable(struct bio *bio, bio_end_io_t async_end_io)
+{
+ DECLARE_COMPLETION_ONSTACK_MAP(cmpl, bio->bi_disk->lockdep_map);
+ int err = __submit_bio_wait(bio, &cmpl, TASK_KILLABLE);
- /* Prevent hang_check timer from firing at us during very long I/O */
- hang_check = sysctl_hung_task_timeout_secs;
- if (hang_check)
- while (!wait_for_completion_io_timeout(&done,
- hang_check * (HZ/2)))
- ;
- else
- wait_for_completion_io(&done);
+ if (likely(err != -ERESTARTSYS))
+ goto completed;
- return blk_status_to_errno(bio->bi_status);
+ bio->bi_end_io = async_end_io;
+ synchronize_rcu();
+ /*
+ * Nobody can touch the completion now, but it may have been
+ * completed while we waited. It doesn't really matter what
+ * error we return since the task is about to die, but we need
+ * to not leak the bio.
+ */
+ if (!cmpl.done)
+ return err;
+
+ err = blk_status_to_errno(bio->bi_status);
+completed:
+ bio_put(bio);
+ return err;
}
-EXPORT_SYMBOL(submit_bio_wait);
+EXPORT_SYMBOL(submit_bio_killable);
/**
* bio_advance - increment/complete a bio by some number of bytes
@@ -431,6 +431,7 @@ static inline void bio_wouldblock_error(struct bio *bio)
struct request_queue;
extern int submit_bio_wait(struct bio *bio);
+extern int submit_bio_killable(struct bio *bio, bio_end_io_t async_end_io);
extern void bio_advance(struct bio *, unsigned);
extern void bio_init(struct bio *bio, struct bio_vec *table,
@@ -108,6 +108,7 @@ extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
unsigned long timeout);
+long __wait_for_completion_io(struct completion *x, long timeout, int state);
extern long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
extern long wait_for_completion_killable_timeout(
@@ -117,11 +117,12 @@ wait_for_common(struct completion *x, long timeout, int state)
return __wait_for_common(x, schedule_timeout, timeout, state);
}
-static long __sched
-wait_for_common_io(struct completion *x, long timeout, int state)
+long __sched
+__wait_for_completion_io(struct completion *x, long timeout, int state)
{
return __wait_for_common(x, io_schedule_timeout, timeout, state);
}
+EXPORT_SYMBOL_GPL(__wait_for_completion_io);
/**
* wait_for_completion: - waits for completion of a task
@@ -168,7 +169,7 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
*/
void __sched wait_for_completion_io(struct completion *x)
{
- wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+ __wait_for_completion_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_io);
@@ -188,7 +189,7 @@ EXPORT_SYMBOL(wait_for_completion_io);
unsigned long __sched
wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
{
- return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
+ return __wait_for_completion_io(x, timeout, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_io_timeout);
This new function allows the user to interrupt the I/O wait with a fatal signal, as long as the caller provides an alternative function to clean up once the I/O does complete. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- block/bio.c | 87 +++++++++++++++++++++++++++++--------- include/linux/bio.h | 1 + include/linux/completion.h | 1 + kernel/sched/completion.c | 9 ++-- 4 files changed, 75 insertions(+), 23 deletions(-)