@@ -3086,6 +3086,42 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
break;
}
+#ifdef CONFIG_CGROUP_DRM
+static unsigned int
+__get_class(struct drm_i915_file_private *fpriv, const struct i915_request *rq)
+{
+ unsigned int class;
+
+ class = rq->context->engine->uabi_class;
+
+ if (WARN_ON_ONCE(class >= ARRAY_SIZE(fpriv->client->throttle)))
+ class = 0;
+
+ return class;
+}
+
+static void copy_priority(struct i915_sched_attr *attr,
+ const struct i915_execbuffer *eb,
+ const struct i915_request *rq)
+{
+ struct drm_i915_file_private *file_priv = eb->file->driver_priv;
+ int prio;
+
+ *attr = eb->gem_context->sched;
+
+ prio = file_priv->client->throttle[__get_class(file_priv, rq)];
+ if (prio)
+ attr->priority = prio;
+}
+#else
+static void copy_priority(struct i915_sched_attr *attr,
+ const struct i915_execbuffer *eb,
+ const struct i915_request *rq)
+{
+ *attr = eb->gem_context->sched;
+}
+#endif
+
static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
int err, bool last_parallel)
{
@@ -3102,7 +3138,7 @@ static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
/* Check that the context wasn't destroyed before submission */
if (likely(!intel_context_is_closed(eb->context))) {
- attr = eb->gem_context->sched;
+ copy_priority(&attr, eb, rq);
} else {
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
@@ -1794,6 +1794,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
#ifdef CONFIG_CGROUP_DRM
static const struct drm_cgroup_ops i915_drm_cgroup_ops = {
.active_time_us = i915_drm_cgroup_get_active_time_us,
+ .signal_budget = i915_drm_cgroup_signal_budget,
};
#endif
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -122,6 +123,138 @@ u64 i915_drm_cgroup_get_active_time_us(struct drm_file *file)
return busy;
}
+
+int i915_drm_cgroup_signal_budget(struct drm_file *file, u64 usage, u64 budget)
+{
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ u64 class_usage[I915_LAST_UABI_ENGINE_CLASS + 1];
+ u64 class_last[I915_LAST_UABI_ENGINE_CLASS + 1];
+ struct i915_drm_client *client = fpriv->client;
+ struct drm_i915_private *i915 = fpriv->i915;
+ struct intel_engine_cs *engine;
+ bool over = usage > budget;
+ struct task_struct *task;
+ struct pid *pid;
+ unsigned int i;
+ ktime_t unused;
+ int ret = 0;
+ u64 t;
+
+ if (!supports_stats(i915))
+ return -EINVAL;
+
+ if (usage == 0 && budget == 0)
+ return 0;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ if (over) {
+ client->over_budget++;
+ if (!client->over_budget)
+ client->over_budget = 2;
+
+ drm_dbg(&i915->drm, "%s[%u] over budget (%llu/%llu)\n",
+ task ? task->comm : "<unknown>", pid_vnr(pid),
+ usage, budget);
+ } else {
+ client->over_budget = 0;
+ memset(client->class_last, 0, sizeof(client->class_last));
+ memset(client->throttle, 0, sizeof(client->throttle));
+
+ drm_dbg(&i915->drm, "%s[%u] un-throttled; under budget\n",
+ task ? task->comm : "<unknown>", pid_vnr(pid));
+
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ memset(class_usage, 0, sizeof(class_usage));
+ for_each_uabi_engine(engine, i915)
+ class_usage[engine->uabi_class] +=
+ ktime_to_ns(intel_engine_get_busy_time(engine, &unused));
+
+ memcpy(class_last, client->class_last, sizeof(class_last));
+ memcpy(client->class_last, class_usage, sizeof(class_last));
+
+ for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
+ class_usage[i] -= class_last[i];
+
+ t = client->last;
+ client->last = ktime_get_raw_ns();
+ t = client->last - t;
+
+ if (client->over_budget == 1)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++) {
+ u64 client_class_usage[I915_LAST_UABI_ENGINE_CLASS + 1];
+ unsigned int capacity, rel_usage;
+
+ if (!i915->engine_uabi_class_count[i])
+ continue;
+
+ t = DIV_ROUND_UP_ULL(t, 1000);
+ class_usage[i] = DIV_ROUND_CLOSEST_ULL(class_usage[i], 1000);
+ rel_usage = DIV_ROUND_CLOSEST_ULL(class_usage[i] * 100ULL,
+ t *
+ i915->engine_uabi_class_count[i]);
+ if (rel_usage < 95) {
+ /* Physical class not oversubsribed. */
+ if (client->throttle[i]) {
+ client->throttle[i] = 0;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ drm_dbg(&i915->drm,
+ "%s[%u] un-throttled; physical class %s utilisation %u%%\n",
+ task ? task->comm : "<unknown>",
+ pid_vnr(pid),
+ uabi_class_names[i],
+ rel_usage);
+ rcu_read_unlock();
+ }
+ continue;
+ }
+
+ client_class_usage[i] =
+ get_class_active_ns(client, i915, i, &capacity);
+ if (client_class_usage[i]) {
+ int permille;
+
+ ret |= 1;
+
+ permille = DIV_ROUND_CLOSEST_ULL((usage - budget) *
+ 1000,
+ budget);
+ client->throttle[i] =
+ DIV_ROUND_CLOSEST(permille *
+ I915_CONTEXT_MIN_USER_PRIORITY,
+ 1000);
+ if (client->throttle[i] <
+ I915_CONTEXT_MIN_USER_PRIORITY)
+ client->throttle[i] =
+ I915_CONTEXT_MIN_USER_PRIORITY;
+
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ drm_dbg(&i915->drm,
+ "%s[%u] %d‰ over budget, throttled to priority %d; physical class %s utilisation %u%%\n",
+ task ? task->comm : "<unknown>",
+ pid_vnr(pid),
+ permille,
+ client->throttle[i],
+ uabi_class_names[i],
+ rel_usage);
+ rcu_read_unlock();
+ }
+ }
+
+ return ret;
+}
#endif
#ifdef CONFIG_PROC_FS
@@ -47,6 +47,13 @@ struct i915_drm_client {
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
atomic64_t past_runtime[I915_LAST_UABI_ENGINE_CLASS + 1];
+
+#ifdef CONFIG_CGROUP_DRM
+ int throttle[I915_LAST_UABI_ENGINE_CLASS + 1];
+ unsigned int over_budget;
+ u64 last;
+ u64 class_last[I915_LAST_UABI_ENGINE_CLASS + 1];
+#endif
};
static inline struct i915_drm_client *
@@ -91,5 +98,7 @@ i915_drm_client_add_context_objects(struct i915_drm_client *client,
#endif
u64 i915_drm_cgroup_get_active_time_us(struct drm_file *file);
+int i915_drm_cgroup_signal_budget(struct drm_file *file,
+ u64 usage, u64 budget);
#endif /* !__I915_DRM_CLIENT_H__ */