@@ -257,6 +257,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
if (i915->params.enable_hangcheck)
pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
pc->sched.priority = I915_PRIORITY_NORMAL;
+ pc->sched.nice = task_nice(current);
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
if (!HAS_EXECLISTS(i915)) {
@@ -250,7 +250,7 @@ static struct i915_priolist *to_priolist(struct rb_node *rb)
static int rq_prio(const struct i915_request *rq)
{
- return READ_ONCE(rq->sched.attr.priority);
+ return i915_request_priority(rq);
}
static int effective_prio(const struct i915_request *rq)
@@ -8,9 +8,33 @@
#include <linux/types.h>
#include "i915_drm_client.h"
+#include "gem/i915_gem_context.h"
#include "i915_gem.h"
#include "i915_utils.h"
+static int
+clients_notify(struct notifier_block *nb, unsigned long val, void *ptr)
+{
+ struct i915_drm_clients *clients =
+ container_of(nb, typeof(*clients), prio_notifier);
+ struct i915_drm_client *client;
+ unsigned long flags;
+
+ read_lock_irqsave(&clients->lock, flags);
+ hash_for_each_possible(clients->tasks, client, node, (uintptr_t)ptr) {
+ struct i915_gem_context *ctx;
+
+ if (client->owner != ptr)
+ continue;
+
+ list_for_each_entry_rcu(ctx, &client->ctx_list, client_link)
+ ctx->sched.nice = (int)val;
+ }
+ read_unlock_irqrestore(&clients->lock, flags);
+
+ return NOTIFY_DONE;
+}
+
void i915_drm_clients_init(struct i915_drm_clients *clients,
struct drm_i915_private *i915)
{
@@ -21,6 +45,10 @@ void i915_drm_clients_init(struct i915_drm_clients *clients,
rwlock_init(&clients->lock);
hash_init(clients->tasks);
+
+ memset(&clients->prio_notifier, 0, sizeof(clients->prio_notifier));
+ clients->prio_notifier.notifier_call = clients_notify;
+ register_user_nice_notifier(&clients->prio_notifier);
}
struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients)
@@ -63,9 +91,9 @@ void __i915_drm_client_free(struct kref *kref)
struct xarray *xa = &clients->xarray;
unsigned long flags;
- write_lock(&clients->lock);
+ write_lock_irq(&clients->lock);
hash_del(&client->node);
- write_unlock(&clients->lock);
+ write_unlock_irq(&clients->lock);
xa_lock_irqsave(xa, flags);
__xa_erase(xa, client->id);
@@ -75,6 +103,8 @@ void __i915_drm_client_free(struct kref *kref)
void i915_drm_clients_fini(struct i915_drm_clients *clients)
{
+ unregister_user_nice_notifier(&clients->prio_notifier);
+
GEM_BUG_ON(!xa_empty(&clients->xarray));
xa_destroy(&clients->xarray);
}
@@ -88,12 +118,12 @@ void i915_drm_client_update_owner(struct i915_drm_client *client,
return;
clients = client->clients;
- write_lock(&clients->lock);
+ write_lock_irq(&clients->lock);
if (READ_ONCE(client->owner) != owner) {
if (client->owner)
hash_del(&client->node);
client->owner = owner;
hash_add(clients->tasks, &client->node, (uintptr_t)owner);
}
- write_unlock(&clients->lock);
+ write_unlock_irq(&clients->lock);
}
@@ -9,6 +9,7 @@
#include <linux/hashtable.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/notifier.h>
#include <linux/rwlock.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -24,6 +25,8 @@ struct i915_drm_clients {
rwlock_t lock;
DECLARE_HASHTABLE(tasks, 6);
+
+ struct notifier_block prio_notifier;
};
struct i915_drm_client {
@@ -1930,7 +1930,7 @@ static int print_sched_attr(const struct i915_sched_attr *attr,
return x;
x += snprintf(buf + x, len - x,
- " prio=%d", attr->priority);
+ " prio=%d nice=%d", attr->priority, attr->nice);
return x;
}
@@ -399,6 +399,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+static inline int i915_request_priority(const struct i915_request *rq)
+{
+ return i915_sched_attr_priority(&rq->sched.attr);
+}
+
void i915_request_show(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
@@ -155,7 +155,7 @@ lock_sched_engine(struct i915_sched_node *node,
static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
- const int prio = max(attr->priority, node->attr.priority);
+ const int prio = max(i915_sched_attr_priority(attr), node->attr.priority);
struct i915_sched_engine *sched_engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
@@ -305,6 +305,7 @@ void i915_sched_node_init(struct i915_sched_node *node)
void i915_sched_node_reinit(struct i915_sched_node *node)
{
node->attr.priority = I915_PRIORITY_INVALID;
+ node->attr.nice = 0;
node->semaphores = 0;
node->flags = 0;
@@ -38,6 +38,20 @@ void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
+static inline int i915_sched_attr_priority(const struct i915_sched_attr *attr)
+{
+ int prio = attr->priority;
+
+ /*
+ * Only allow I915_CONTEXT_DEFAULT_PRIORITY to be affected by the
+ * nice setting.
+ */
+ if (!prio)
+ prio = -attr->nice;
+
+ return prio;
+}
+
struct list_head *
i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio);
@@ -29,6 +29,14 @@ struct i915_sched_attr {
* The &drm_i915_private.kernel_context is assigned the lowest priority.
*/
int priority;
+
+ /**
+ * @nice: context nice level
+ *
+ * Nice level follows the CPU scheduler nice value as set for the
+ * process owning the GPU context.
+ */
+ int nice;
};
/*