@@ -382,7 +382,8 @@ xfs_submit_ioend_bio(
atomic_inc(&ioend->io_remaining);
bio->bi_private = ioend;
bio->bi_end_io = xfs_end_bio;
- bio->bi_rw = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+ bio->bi_op = REQ_OP_WRITE;
+ bio->bi_rw = WB_SYNC_ALL ? WRITE_SYNC : 0;
submit_bio(bio);
}
@@ -1124,7 +1124,8 @@ xfs_buf_ioapply_map(
int map,
int *buf_offset,
int *count,
- int rw)
+ int op,
+ int op_flags)
{
int page_index;
int total_nr_pages = bp->b_page_count;
@@ -1163,7 +1164,8 @@ next_chunk:
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
- bio->bi_rw = rw;
+ bio->bi_op = op;
+ bio->bi_rw = op_flags;
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
@@ -1207,7 +1209,8 @@ _xfs_buf_ioapply(
struct xfs_buf *bp)
{
struct blk_plug plug;
- int rw;
+ int op;
+ int op_flags = 0;
int offset;
int size;
int i;
@@ -1226,14 +1229,13 @@ _xfs_buf_ioapply(
bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
if (bp->b_flags & XBF_WRITE) {
+ op = REQ_OP_WRITE;
if (bp->b_flags & XBF_SYNCIO)
- rw = WRITE_SYNC;
- else
- rw = WRITE;
+ op_flags = WRITE_SYNC;
if (bp->b_flags & XBF_FUA)
- rw |= REQ_FUA;
+ op_flags |= REQ_FUA;
if (bp->b_flags & XBF_FLUSH)
- rw |= REQ_FLUSH;
+ op_flags |= REQ_FLUSH;
/*
* Run the write verifier callback function if it exists. If
@@ -1263,13 +1265,14 @@ _xfs_buf_ioapply(
}
}
} else if (bp->b_flags & XBF_READ_AHEAD) {
- rw = READA;
+ op = REQ_OP_READ;
+ op_flags = REQ_RAHEAD;
} else {
- rw = READ;
+ op = REQ_OP_READ;
}
/* we only use the buffer cache for meta-data */
- rw |= REQ_META;
+ op_flags |= REQ_META;
/*
* Walk all the vectors issuing IO on them. Set up the initial offset
@@ -1281,7 +1284,7 @@ _xfs_buf_ioapply(
size = BBTOB(bp->b_io_length);
blk_start_plug(&plug);
for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
+ xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
if (bp->b_error)
break;
if (size <= 0)