diff mbox

[RFC,-v3,2/3] sched: add yield_to function

Message ID 1294248190.31375.14.camel@marge.simson.net (mailing list archive)
State New, archived
Headers show

Commit Message

Mike Galbraith Jan. 5, 2011, 5:23 p.m. UTC
None
diff mbox

Patch

Index: linux-2.6/include/linux/sched.h
===================================================================
--- linux-2.6.orig/include/linux/sched.h
+++ linux-2.6/include/linux/sched.h
@@ -1056,6 +1056,7 @@  struct sched_class {
 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
 	void (*yield_task) (struct rq *rq);
+	int (*yield_to_task) (struct task_struct *p, int preempt);
 
 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
 
Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -5327,6 +5327,62 @@  void __sched yield(void)
 }
 EXPORT_SYMBOL(yield);
 
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ */
+void __sched yield_to(struct task_struct *p, int preempt)
+{
+	struct task_struct *curr = current;
+	struct rq *rq, *p_rq;
+	unsigned long flags;
+	int yield = 0;
+
+	local_irq_save(flags);
+	rq = this_rq();
+
+again:
+	p_rq = task_rq(p);
+	double_rq_lock(rq, p_rq);
+	while (task_rq(p) != p_rq) {
+		double_rq_unlock(rq, p_rq);
+		goto again;
+	}
+
+	if (!curr->sched_class->yield_to_task)
+		goto out;
+
+	if (curr->sched_class != p->sched_class)
+		goto out;
+
+	if (task_running(p_rq, p) || p->state)
+		goto out;
+
+	if (!same_thread_group(p, curr))
+		goto out;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	if (task_group(p) != task_group(curr))
+		goto out;
+#endif
+
+	yield = curr->sched_class->yield_to_task(p, preempt);
+
+out:
+	double_rq_unlock(rq, p_rq);
+	local_irq_restore(flags);
+
+	if (yield) {
+		set_current_state(TASK_RUNNING);
+		schedule();
+	}
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
 /*
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -1337,6 +1337,57 @@  static void yield_task_fair(struct rq *r
 }
 
 #ifdef CONFIG_SMP
+static void pull_task(struct rq *src_rq, struct task_struct *p,
+		      struct rq *this_rq, int this_cpu);
+#endif
+
+static int yield_to_task_fair(struct task_struct *p, int preempt)
+{
+	struct sched_entity *se = &current->se;
+	struct sched_entity *pse = &p->se;
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct cfs_rq *p_cfs_rq = cfs_rq_of(pse);
+	int local = cfs_rq == p_cfs_rq;
+	int this_cpu = smp_processor_id();
+
+	if (!pse->on_rq)
+		return 0;
+
+#ifdef CONFIG_SMP
+	/*
+	 * If this yield is important enough to want to preempt instead
+	 * of only dropping a ->next hint, we're alone, and the target
+	 * is not alone, pull the target to this cpu.
+	 *
+	 * NOTE: the target may be alone in it's cfs_rq if another class
+	 * task or another task group is currently executing on it's cpu.
+	 * In this case, we still pull, to accelerate it toward the cpu.
+	 */
+	if (!local && preempt && cfs_rq->nr_running == 1 &&
+			cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
+		pull_task(task_rq(p), p, this_rq(), this_cpu);
+		p_cfs_rq = cfs_rq_of(pse);
+		local = 1;
+	}
+#endif
+
+	/* We know whether we want to preempt or not, but are we allowed? */
+	preempt &= same_thread_group(p, task_of(p_cfs_rq->curr));
+
+	if (local)
+		clear_buddies(cfs_rq, se);
+	else if (preempt) {
+		clear_buddies(p_cfs_rq, p_cfs_rq->curr);
+		resched_task(task_of(p_cfs_rq->curr));
+	}
+
+	/* Tell the scheduler that we'd really like pse to run next. */
+	p_cfs_rq->next = pse;
+
+	return local;
+}
+
+#ifdef CONFIG_SMP
 
 static void task_waking_fair(struct rq *rq, struct task_struct *p)
 {
@@ -4143,6 +4194,7 @@  static const struct sched_class fair_sch
 	.enqueue_task		= enqueue_task_fair,
 	.dequeue_task		= dequeue_task_fair,
 	.yield_task		= yield_task_fair,
+	.yield_to_task		= yield_to_task_fair,
 
 	.check_preempt_curr	= check_preempt_wakeup,