@@ -669,10 +669,10 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
goto integrity_clone;
- if (bio->bi_rw & REQ_WRITE_SAME) {
+ if (bio->bi_op == REQ_OP_WRITE_SAME) {
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
goto integrity_clone;
}
@@ -1795,7 +1795,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
* Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands.
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
split = bio_clone_bioset(bio, gfp, bs);
else
split = bio_clone_fast(bio, gfp, bs);
@@ -1151,8 +1151,7 @@ static struct request *__get_request(struct request_list *rl, int op,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
- /* tmp compat - allow users to check either one for the op */
- rq->cmd_flags = op | op_flags | REQ_ALLOCED;
+ rq->cmd_flags = op_flags | REQ_ALLOCED;
rq->op = op;
/* init elvpriv */
@@ -1704,8 +1703,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
- /* tmp compat. Allow users to set bi_op or bi_rw */
- req->cmd_flags |= (bio->bi_rw | bio->bi_op) & REQ_COMMON_MASK;
+ req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
if (bio->bi_rw & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
@@ -1855,9 +1853,9 @@ static void handle_bad_sector(struct bio *bio)
char b[BDEVNAME_SIZE];
printk(KERN_INFO "attempt to access beyond end of device\n");
- printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+ printk(KERN_INFO "%s: rw=%d,%ld, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
- bio->bi_rw,
+ bio->bi_op, bio->bi_rw,
(unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
}
@@ -1978,14 +1976,14 @@ generic_make_request_checks(struct bio *bio)
}
}
- if ((bio->bi_rw & REQ_DISCARD) &&
+ if ((bio->bi_op == REQ_OP_DISCARD) &&
(!blk_queue_discard(q) ||
((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
err = -EOPNOTSUPP;
goto end_io;
}
- if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
+ if (bio->bi_op == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -2039,12 +2037,6 @@ blk_qc_t generic_make_request(struct bio *bio)
struct bio_list bio_list_on_stack;
blk_qc_t ret = BLK_QC_T_NONE;
- /* tmp compat. Allow users to set either one or both.
- * This will be removed when we have converted
- * everyone in the next patches.
- */
- bio->bi_rw |= bio->bi_op;
-
if (!generic_make_request_checks(bio))
goto out;
@@ -2114,12 +2106,6 @@ EXPORT_SYMBOL(generic_make_request);
*/
blk_qc_t submit_bio(struct bio *bio)
{
- /* tmp compat. Allow users to set either one or both.
- * This will be removed when we have converted
- * everyone in the next patches.
- */
- bio->bi_rw |= bio->bi_op;
-
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
@@ -2127,12 +2113,12 @@ blk_qc_t submit_bio(struct bio *bio)
if (bio_has_data(bio)) {
unsigned int count;
- if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ if (unlikely(bio->bi_op == REQ_OP_WRITE_SAME))
count = bdev_logical_block_size(bio->bi_bdev) >> 9;
else
count = bio_sectors(bio);
- if (bio->bi_rw & WRITE) {
+ if (op_is_write(bio->bi_op)) {
count_vm_events(PGPGOUT, count);
} else {
task_io_account_read(bio->bi_iter.bi_size);
@@ -2143,7 +2129,7 @@ blk_qc_t submit_bio(struct bio *bio)
char b[BDEVNAME_SIZE];
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
- (bio->bi_rw & WRITE) ? "WRITE" : "READ",
+ op_is_write(bio->bi_op) ? "WRITE" : "READ",
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
@@ -2993,8 +2979,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- /* tmp compat. Allow users to set bi_op or bi_rw */
- rq->cmd_flags |= bio_data_dir(bio);
rq->op = bio->bi_op;
if (bio_has_data(bio))
@@ -172,9 +172,9 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
struct bio *split, *res;
unsigned nsegs;
- if ((*bio)->bi_rw & REQ_DISCARD)
+ if ((*bio)->bi_op == REQ_OP_DISCARD)
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
- else if ((*bio)->bi_rw & REQ_WRITE_SAME)
+ else if ((*bio)->bi_op == REQ_OP_WRITE_SAME)
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
else
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
@@ -213,10 +213,10 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* This should probably be returning 0, but blk_add_request_payload()
* (Christoph!!!!)
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
return 1;
- if (bio->bi_rw & REQ_WRITE_SAME)
+ if (bio->bi_op == REQ_OP_WRITE_SAME)
return 1;
fbio = bio;
@@ -389,7 +389,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
nsegs = 0;
cluster = blk_queue_cluster(q);
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio->bi_op == REQ_OP_DISCARD) {
/*
* This is a hack - drivers should be neither modifying the
* biovec, nor relying on bi_vcnt - but because of
@@ -404,7 +404,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
return 0;
}
- if (bio->bi_rw & REQ_WRITE_SAME) {
+ if (bio->bi_op == REQ_OP_WRITE_SAME) {
single_segment:
*sg = sglist;
bvec = bio_iovec(bio);
@@ -443,7 +443,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
- if (rq->cmd_flags & REQ_WRITE)
+ if (op_is_write(rq->op))
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
sg_unmark_end(sg);
@@ -170,8 +170,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->q = q;
rq->mq_ctx = ctx;
rq->op = op;
- /* tmp compat - allow users to check either one for the op */
- rq->cmd_flags |= op | op_flags;
+ rq->cmd_flags |= op_flags;
/* do not touch atomic flags, it needs atomic ops against the timer */
rq->cpu = -1;
INIT_HLIST_NODE(&rq->hash);
@@ -1124,7 +1124,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
return 0;
- if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
+ if (!blk_rq_bytes(rq) || op_is_write(rq->op))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
@@ -339,7 +339,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto io_error;
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio->bi_op == REQ_OP_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
bio->bi_iter.bi_size & PAGE_MASK)
goto io_error;
@@ -1603,15 +1603,16 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
return 0;
}
-static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection,
+ struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
- return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bi_rw & REQ_FUA ? DP_FUA : 0) |
- (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
- (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
+ return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
+ (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
+ (bio->bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
+ (bio->bi_op == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
- return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+ return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
@@ -1636,7 +1637,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
- dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+ dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
if (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
@@ -174,7 +174,7 @@ void drbd_peer_request_endio(struct bio *bio)
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_device *device = peer_req->peer_device->device;
int is_write = bio_data_dir(bio) == WRITE;
- int is_discard = !!(bio->bi_rw & REQ_DISCARD);
+ int is_discard = !!(bio->bi_op == REQ_OP_DISCARD);
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n",
@@ -248,7 +248,7 @@ void drbd_request_endio(struct bio *bio)
/* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) {
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
what = (bio->bi_error == -EOPNOTSUPP)
? DISCARD_COMPLETED_NOTSUPP
: DISCARD_COMPLETED_WITH_ERROR;
@@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static inline void handle_partial_read(struct loop_cmd *cmd, long bytes)
{
- if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE))
+ if (bytes < 0 || op_is_write(cmd->rq->op))
return;
if (unlikely(bytes < blk_rq_bytes(cmd->rq))) {
@@ -535,7 +535,7 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- if (rq->cmd_flags & REQ_WRITE) {
+ if (op_is_write(rq->op)) {
if (rq->cmd_flags & REQ_FLUSH)
ret = lo_req_flush(lo, rq);
else if (rq->op == REQ_OP_DISCARD)
@@ -1666,7 +1666,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
- const bool write = cmd->rq->cmd_flags & REQ_WRITE;
+ const bool write = op_is_write(cmd->rq->op);
struct loop_device *lo = cmd->rq->q->queuedata;
int ret = 0;
@@ -3375,7 +3375,7 @@ static void rbd_queue_workfn(struct work_struct *work)
if (rq->op == REQ_OP_DISCARD)
op_type = OBJ_OP_DISCARD;
- else if (rq->cmd_flags & REQ_WRITE)
+ else if (rq->op == REQ_OP_WRITE)
op_type = OBJ_OP_WRITE;
else
op_type = OBJ_OP_READ;
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
dma_cnt[i] = 0;
}
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio->bi_op == REQ_OP_DISCARD) {
bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
@@ -462,7 +462,7 @@ static void process_page(unsigned long data)
le32_to_cpu(desc->local_addr)>>9,
le32_to_cpu(desc->transfer_size));
dump_dmastat(card, control);
- } else if ((bio->bi_rw & REQ_WRITE) &&
+ } else if (op_is_write(bio->bi_op) &&
le32_to_cpu(desc->local_addr) >> 9 ==
card->init_size) {
card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
@@ -852,7 +852,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
offset = (bio->bi_iter.bi_sector &
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio->bi_op == REQ_OP_DISCARD)) {
zram_bio_discard(zram, index, offset, bio);
bio_endio(bio);
return;
@@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
memcpy(rq->cmd, pc->c, 12);
pc->rq = rq;
- if (rq->cmd_flags & REQ_WRITE)
+ if (cmd == WRITE)
pc->flags |= PC_FLAG_WRITING;
pc->flags |= PC_FLAG_DMA_OK;
@@ -904,7 +904,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio->bi_op == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
}
@@ -378,7 +378,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
- (bio->bi_rw & REQ_DISCARD))
+ (bio->bi_op == REQ_OP_DISCARD))
goto skip;
if (mode == CACHE_MODE_NONE ||
@@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
* But check_overlapping drops dirty keys for which io hasn't started,
* so we still want to call it.
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
@@ -913,7 +913,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bio = s->orig_bio;
bio_get(s->iop.bio);
- if (!(bio->bi_rw & REQ_DISCARD) ||
+ if (!(bio->bi_op == REQ_OP_DISCARD) ||
blk_queue_discard(bdev_get_queue(dc->bdev)))
closure_bio_submit(bio, cl);
} else if (s->iop.writeback) {
@@ -993,7 +993,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
cached_dev_read(dc, s);
}
} else {
- if ((bio->bi_rw & REQ_DISCARD) &&
+ if ((bio->bi_op == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
bio_endio(bio);
else
@@ -1104,7 +1104,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
- s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.bypass = (bio->bi_op == REQ_OP_DISCARD) != 0;
s->iop.writeback = true;
s->iop.bio = bio;
@@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
- !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+ !(bio->bi_rw & (REQ_FUA | REQ_FLUSH)) &&
+ bio->bi_op != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
}
@@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio,
static bool accountable_bio(struct cache *cache, struct bio *bio)
{
return ((bio->bi_bdev == cache->origin_dev->bdev) &&
- !(bio->bi_rw & REQ_DISCARD));
+ bio->bi_op != REQ_OP_DISCARD);
}
static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -1062,7 +1063,8 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
- return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
+ return bio->bi_op == REQ_OP_DISCARD ||
+ bio->bi_rw & (REQ_FLUSH | REQ_FUA);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1975,7 +1977,7 @@ static void process_deferred_bios(struct cache *cache)
if (bio->bi_rw & REQ_FLUSH)
process_flush_bio(cache, bio);
- else if (bio->bi_rw & REQ_DISCARD)
+ else if (bio->bi_op == REQ_OP_DISCARD)
process_discard_bio(cache, &structs, bio);
else
process_bio(cache, &structs, bio);
@@ -1916,7 +1916,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
* - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
* - for REQ_DISCARD caller must use flush if IO ordering matters
*/
- if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
+ if (unlikely(bio->bi_rw & REQ_FLUSH || bio->bi_op == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
@@ -557,7 +557,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
int i = 0;
bool flush_bio = (bio->bi_rw & REQ_FLUSH);
bool fua_bio = (bio->bi_rw & REQ_FUA);
- bool discard_bio = (bio->bi_rw & REQ_DISCARD);
+ bool discard_bio = (bio->bi_op == REQ_OP_DISCARD);
pb->block = NULL;
@@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
* If the bio is discard, return an error, but do not
* degrade the array.
*/
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio->bi_op == REQ_OP_DISCARD) {
bio->bi_error = -EOPNOTSUPP;
bio_endio(bio);
return;
@@ -665,7 +665,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
.client = ms->io_client,
};
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio->bi_op == REQ_OP_DISCARD) {
io_req.bi_op = REQ_OP_DISCARD;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
@@ -705,7 +705,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
while ((bio = bio_list_pop(writes))) {
if ((bio->bi_rw & REQ_FLUSH) ||
- (bio->bi_rw & REQ_DISCARD)) {
+ (bio->bi_op == REQ_OP_DISCARD)) {
bio_list_add(&sync, bio);
continue;
}
@@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+ if (!(bio->bi_rw & REQ_FLUSH) && bio->bi_op != REQ_OP_DISCARD)
dm_rh_dec(ms->rh, bio_record->write_region);
return error;
}
@@ -403,7 +403,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
return;
}
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
return;
/* We must inform the log that the sync count has changed. */
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+ if (bio->bi_rw & REQ_FLUSH || bio->bi_op == REQ_OP_DISCARD)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
@@ -292,8 +292,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD) ||
- unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+ if (unlikely(bio->bi_op == REQ_OP_DISCARD) ||
+ unlikely(bio->bi_op == REQ_OP_WRITE_SAME)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_bio_nr);
@@ -705,7 +705,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
struct dm_thin_endio_hook *h;
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
return;
h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -868,7 +868,8 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA) ||
+ bio->bi_op == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
inc_all_io_entry(info->tc->pool, bio);
@@ -1646,7 +1647,8 @@ static void __remap_and_issue_shared_cell(void *context,
while ((bio = bio_list_pop(&cell->bios))) {
if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
+ (bio->bi_rw & (REQ_FLUSH | REQ_FUA) ||
+ bio->bi_op == REQ_OP_DISCARD))
bio_list_add(&info->defer_bios, bio);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2035,7 +2037,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
break;
}
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
pool->process_discard(tc, bio);
else
pool->process_bio(tc, bio);
@@ -2122,7 +2124,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
return;
}
- if (cell->holder->bi_rw & REQ_DISCARD)
+ if (cell->holder->bi_op == REQ_OP_DISCARD)
pool->process_discard_cell(tc, cell);
else
pool->process_cell(tc, cell);
@@ -2558,7 +2560,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA) ||
+ bio->bi_op == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -1053,7 +1053,7 @@ static void clone_endio(struct bio *bio)
}
}
- if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
+ if (unlikely(r == -EREMOTEIO && (bio->bi_op == REQ_OP_WRITE_SAME) &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
disable_write_same(md);
@@ -1737,9 +1737,9 @@ static int __split_and_process_non_flush(struct clone_info *ci)
struct dm_target *ti;
unsigned len;
- if (unlikely(bio->bi_rw & REQ_DISCARD))
+ if (unlikely(bio->bi_op == REQ_OP_DISCARD))
return __send_discard(ci);
- else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ else if (unlikely(bio->bi_op == REQ_OP_WRITE_SAME))
return __send_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
@@ -252,7 +252,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
split->bi_iter.bi_sector = split->bi_iter.bi_sector -
start_sector + data_offset;
- if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ if (unlikely((split->bi_op == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
@@ -488,7 +488,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
- if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ if (unlikely((split->bi_op == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
@@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
WARN_ON(or->out.bio || or->out.total_bytes);
- WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
+ WARN_ON(!op_is_write(bio->bi_op));
or->out.bio = bio;
or->out.total_bytes = len;
}
@@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(bio->bi_rw & REQ_WRITE);
+ WARN_ON(op_is_write(bio->bi_op));
or->in.bio = bio;
or->in.total_bytes = len;
}
@@ -211,9 +211,9 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
return io->ci_result;
io->ci_lockreq = CILR_NEVER;
- rw = head->bi_rw;
+ rw = bio_data_dir(head);
for (bio = head; bio ; bio = bio->bi_next) {
- LASSERT(rw == bio->bi_rw);
+ LASSERT(rw == bio_data_dir(bio));
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
bio_for_each_segment(bvec, bio, iter) {
@@ -304,9 +304,9 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
/* TODO: need to split the bio, too bad. */
LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
- rw = first->bi_rw;
+ rw = bio_data_dir(first);
bio = &lo->lo_bio;
- while (*bio && (*bio)->bi_rw == rw) {
+ while (*bio && bio_data_dir(*bio) == rw) {
CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
(unsigned long long)(*bio)->bi_iter.bi_sector,
(*bio)->bi_iter.bi_size,
@@ -106,18 +106,23 @@ static inline bool bio_has_data(struct bio *bio)
{
if (bio &&
bio->bi_iter.bi_size &&
- !(bio->bi_rw & REQ_DISCARD))
+ !(bio->bi_op == REQ_OP_DISCARD))
return true;
return false;
}
+static inline bool bio_no_advance_iter(struct bio *bio)
+{
+ return bio->bi_op == REQ_OP_DISCARD || bio->bi_op == REQ_OP_WRITE_SAME;
+}
+
static inline bool bio_is_rw(struct bio *bio)
{
if (!bio_has_data(bio))
return false;
- if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ if (bio_no_advance_iter(bio))
return false;
return true;
@@ -225,7 +230,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
- if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
@@ -253,10 +258,10 @@ static inline unsigned bio_segments(struct bio *bio)
* differently:
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio->bi_op == REQ_OP_DISCARD)
return 1;
- if (bio->bi_rw & REQ_WRITE_SAME)
+ if (bio->bi_op == REQ_OP_WRITE_SAME)
return 1;
bio_for_each_segment(bv, bio, iter)
@@ -2448,33 +2448,24 @@ static inline bool op_is_write(int op)
}
/*
- * return READ, READA, or WRITE
+ * return data direction, READ or WRITE
*/
-static inline int bio_rw(struct bio *bio)
+static inline int bio_data_dir(struct bio *bio)
{
- /*
- * tmp cpmpat. Allow users to set either op or rw, until
- * all code is converted in the next patches.
- */
if (op_is_write(bio->bi_op))
return WRITE;
-
- return bio->bi_rw & (RW_MASK | RWA_MASK);
+ return READ;
}
/*
- * return data direction, READ or WRITE
+ * return READ, READA, or WRITE
*/
-static inline int bio_data_dir(struct bio *bio)
+static inline int bio_rw(struct bio *bio)
{
- /*
- * tmp cpmpat. Allow users to set either op or rw, until
- * all code is converted in the next patches.
- */
- if (op_is_write(bio->bi_op))
- return WRITE;
+ if (bio->bi_rw & RWA_MASK)
+ return READA;
- return bio->bi_rw & 1;
+ return bio_data_dir(bio);
}
extern void check_disk_size_change(struct gendisk *disk,