@@ -131,6 +131,14 @@ struct iothrottle_node {
struct iothrottle_stat stat;
};
+/*
+ * This is a trick to reduce the unneded overhead when io-throttle is not used
+ * at all. We use a counter of the io-throttle rules; if the counter is zero,
+ * we immediately return from the io-throttle hooks, without accounting IO and
+ * without checking if we need to apply some limiting rules.
+ */
+static atomic_t iothrottle_node_count __read_mostly;
+
/**
* struct iothrottle - throttling rules for a cgroup
* @css: pointer to the cgroup state
@@ -193,6 +201,7 @@ static void iothrottle_insert_node(struct iothrottle *iot,
{
WARN_ON_ONCE(!cgroup_is_locked());
list_add_rcu(&n->node, &iot->list);
+ atomic_inc(&iothrottle_node_count);
}
/*
@@ -214,6 +223,7 @@ iothrottle_delete_node(struct iothrottle *iot, struct iothrottle_node *n)
{
WARN_ON_ONCE(!cgroup_is_locked());
list_del_rcu(&n->node);
+ atomic_dec(&iothrottle_node_count);
}
/*
@@ -250,8 +260,10 @@ static void iothrottle_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
* reference to the list.
*/
if (!list_empty(&iot->list))
- list_for_each_entry_safe(n, p, &iot->list, node)
+ list_for_each_entry_safe(n, p, &iot->list, node) {
kfree(n);
+ atomic_dec(&iothrottle_node_count);
+ }
kfree(iot);
}
@@ -836,7 +848,7 @@ cgroup_io_throttle(struct bio *bio, struct block_device *bdev, ssize_t bytes)
unsigned long long sleep;
int type, can_sleep = 1;
- if (iothrottle_disabled())
+ if (iothrottle_disabled() || !atomic_read(&iothrottle_node_count))
return 0;
if (unlikely(!bdev))
return 0;