@@ -58,9 +58,9 @@ enum {
* So, we cap these to a reasonable value.
*/
static const unsigned int kyber_depth[] = {
- [KYBER_READ] = 256,
- [KYBER_SYNC_WRITE] = 128,
- [KYBER_OTHER] = 64,
+ [KYBER_READ] = 1,
+ [KYBER_SYNC_WRITE] = 1,
+ [KYBER_OTHER] = 1,
};
/*
@@ -126,6 +126,7 @@ enum {
#define IS_GOOD(status) ((status) > 0)
#define IS_BAD(status) ((status) < 0)
+#if 0
static int kyber_lat_status(struct blk_stat_callback *cb,
unsigned int sched_domain, u64 target)
{
@@ -243,6 +244,7 @@ static void kyber_adjust_other_depth(struct kyber_queue_data *kqd,
if (depth != orig_depth)
sbitmap_queue_resize(&kqd->domain_tokens[KYBER_OTHER], depth);
}
+#endif
/*
* Apply heuristics for limiting queue depths based on gathered latency
@@ -250,6 +252,8 @@ static void kyber_adjust_other_depth(struct kyber_queue_data *kqd,
*/
static void kyber_stat_timer_fn(struct blk_stat_callback *cb)
{
+ return;
+#if 0
struct kyber_queue_data *kqd = cb->data;
int read_status, write_status;
@@ -269,6 +273,7 @@ static void kyber_stat_timer_fn(struct blk_stat_callback *cb)
((IS_BAD(read_status) || IS_BAD(write_status) ||
kqd->domain_tokens[KYBER_OTHER].sb.depth < kyber_depth[KYBER_OTHER])))
blk_stat_activate_msecs(kqd->cb, 100);
+#endif
}
static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd)