@@ -902,6 +902,19 @@ static void elv_ioq_served(struct io_queue *ioq, unsigned long served)
queue_charge = group_charge = served;
/*
+ * For single ioq schedulers we don't expire the queue if there are
+ * no other competing groups. It might happen that once a queue has
+ * not been expired for a long time, suddenly a new group is created
+ * and IO comes in that new group. In that case, we don't want to
+ * charge the old queue for whole of the period it was not expired.
+ */
+
+ if (elv_ioq_charge_one_slice(ioq) && queue_charge > allocated_slice)
+ queue_charge = group_charge = allocated_slice;
+
+ elv_clear_ioq_charge_one_slice(ioq);
+
+ /*
* We don't want to charge more than allocated slice otherwise this
* queue can miss one dispatch round doubling max latencies. On the
* other hand we don't want to charge less than allocated slice as
@@ -2143,6 +2156,37 @@ void elv_reset_request_ioq(struct request_queue *q, struct request *rq)
}
}
+static inline int is_only_root_group(void)
+{
+ if (list_empty(&io_root_cgroup.css.cgroup->children))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * One can do some optimizations for single ioq scheduler, when one does
+ * not have to expire the queue after every time slice is used. This avoids
+ * some unnecessary overhead, especially in AS where we wait for requests to
+ * finish from last queue before new queue is scheduled in.
+ */
+static inline int single_ioq_no_timed_expiry(struct request_queue *q)
+{
+ struct elv_fq_data *efqd = q->elevator->efqd;
+ struct io_queue *ioq = elv_active_ioq(q->elevator);
+
+ if (!elv_iosched_single_ioq(q->elevator))
+ return 0;
+
+ if (!is_only_root_group())
+ return 0;
+
+ if (efqd->busy_queues == 1 && ioq == efqd->root_group->ioq)
+ return 1;
+
+ return 0;
+}
+
#else /* CONFIG_GROUP_IOSCHED */
static inline unsigned int iog_weight(struct io_group *iog) { return 0; }
@@ -2188,6 +2232,17 @@ int elv_iog_should_idle(struct io_queue *ioq) { return 0; }
EXPORT_SYMBOL(elv_iog_should_idle);
static int elv_ioq_should_wait_busy(struct io_queue *ioq) { return 0; }
+static inline int is_only_root_group(void)
+{
+ return 1;
+}
+
+/* Never expire the single ioq in flat mode */
+static inline int single_ioq_no_timed_expiry(struct request_queue *q)
+{
+ return 1;
+};
+
#endif /* CONFIG_GROUP_IOSCHED */
/*
@@ -2794,6 +2849,16 @@ void *elv_select_ioq(struct request_queue *q, int force)
goto expire;
}
+ /*
+ * If there is only root group present, don't expire the queue for
+ * single queue ioschedulers (noop, deadline, AS).
+ */
+
+ if (single_ioq_no_timed_expiry(q)) {
+ elv_mark_ioq_charge_one_slice(ioq);
+ goto keep_queue;
+ }
+
/* We are waiting for this group to become busy before it expires.*/
if (elv_iog_wait_busy(iog)) {
ioq = NULL;
@@ -3015,6 +3080,16 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
elv_clear_ioq_slice_new(ioq);
}
+ /*
+ * If there is only root group present, don't expire the queue
+ * for single queue ioschedulers (noop, deadline, AS). It is
+ * unnecessary overhead.
+ */
+ if (single_ioq_no_timed_expiry(q)) {
+ elv_mark_ioq_charge_one_slice(ioq);
+ elv_log_ioq(efqd, ioq, "single ioq no timed expiry");
+ goto done;
+ }
/*
* If there are no requests waiting in this queue, and
@@ -242,6 +242,8 @@ enum elv_queue_state_flags {
ELV_QUEUE_FLAG_slice_new, /* no requests dispatched in slice */
ELV_QUEUE_FLAG_sync, /* synchronous queue */
ELV_QUEUE_FLAG_must_expire, /* expire queue even slice is left */
+ ELV_QUEUE_FLAG_charge_one_slice, /* Charge the queue for only one
+ * time slice length */
};
#define ELV_IO_QUEUE_FLAG_FNS(name) \
@@ -265,6 +267,7 @@ ELV_IO_QUEUE_FLAG_FNS(idle_window)
ELV_IO_QUEUE_FLAG_FNS(slice_new)
ELV_IO_QUEUE_FLAG_FNS(sync)
ELV_IO_QUEUE_FLAG_FNS(must_expire)
+ELV_IO_QUEUE_FLAG_FNS(charge_one_slice)
#ifdef CONFIG_GROUP_IOSCHED