@@ -539,7 +539,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder)
* @bdev: block device of interest
* @holder: holder that has claimed @bdev
*
- * Finish exclusive open of a block device. Mark the device as exlusively
+ * Finish exclusive open of a block device. Mark the device as exclusively
* open by the holder and wake up all waiters for exclusive open to finish.
*/
static void bd_finish_claiming(struct block_device *bdev, void *holder)
@@ -1963,7 +1963,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
* As for throughput, we ask bfq_better_to_idle() whether we
* still need to plug I/O dispatching. If bfq_better_to_idle()
* says no, then plugging is not needed any longer, either to
- * boost throughput or to perserve service guarantees. Then
+ * boost throughput or to preserve service guarantees. Then
* the best option is to stop plugging I/O, as not doing so
* would certainly lower throughput. We may end up in this
* case if: (1) upon a dispatch attempt, we detected that it
@@ -2491,7 +2491,7 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
- if (blk_discard_mergable(__rq))
+ if (blk_discard_mergeable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(blkg_rwstat_exit);
* @pd: policy private data of interest
* @rwstat: rwstat to print
*
- * Print @rwstat to @sf for the device assocaited with @pd.
+ * Print @rwstat to @sf for the device associated with @pd.
*/
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
const struct blkg_rwstat_sample *rwstat)
@@ -63,7 +63,7 @@ static const struct sysfs_ops blk_ia_range_sysfs_ops = {
};
/*
- * Independent access range entries are not freed individually, but alltogether
+ * Independent access range entries are not freed individually, but all together
* with struct blk_independent_access_ranges and its array of ranges. Since
* kobject_add() takes a reference on the parent kobject contained in
* struct blk_independent_access_ranges, the array of independent access range
@@ -790,7 +790,7 @@ static void blk_account_io_merge_request(struct request *req)
static enum elv_merge blk_try_req_merge(struct request *req,
struct request *next)
{
- if (blk_discard_mergable(req))
+ if (blk_discard_mergeable(req))
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
return ELEVATOR_BACK_MERGE;
@@ -864,7 +864,7 @@ static struct request *attempt_merge(struct request_queue *q,
req->__data_len += blk_rq_bytes(next);
- if (!blk_discard_mergable(req))
+ if (!blk_discard_mergeable(req))
elv_merge_requests(q, req, next);
/*
@@ -947,7 +947,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
- if (blk_discard_mergable(rq))
+ if (blk_discard_mergeable(rq))
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
@@ -148,7 +148,7 @@ static inline bool rq_mergeable(struct request *rq)
* 2) Otherwise, the request will be normal read/write requests. The ranges
* need to be contiguous.
*/
-static inline bool blk_discard_mergable(struct request *req)
+static inline bool blk_discard_mergeable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
@@ -310,7 +310,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
- if (blk_discard_mergable(__rq))
+ if (blk_discard_mergeable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_BACK_MERGE;
}
@@ -142,7 +142,7 @@ struct kyber_cpu_latency {
*/
struct kyber_ctx_queue {
/*
- * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
+ * Used to ensure operations on rq_list and kcq_map to be an atomic one.
* Also protect the rqs on rq_list when merge.
*/
spinlock_t lock;
@@ -734,7 +734,7 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
if (elv_bio_merge_ok(__rq, bio)) {
*rq = __rq;
- if (blk_discard_mergable(__rq))
+ if (blk_discard_mergeable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
Fix some random typos. Signed-off-by: Yang Hau <yuanyanghau@gmail.com> --- block/bdev.c | 2 +- block/bfq-iosched.c | 4 ++-- block/blk-cgroup-rwstat.c | 2 +- block/blk-ia-ranges.c | 2 +- block/blk-merge.c | 6 +++--- block/blk.h | 2 +- block/elevator.c | 2 +- block/kyber-iosched.c | 2 +- block/mq-deadline.c | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-)