From patchwork Mon May 6 06:56:02 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?SsO8cmdlbiBHcm/Dnw==?= X-Patchwork-Id: 10930597 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 879461398 for ; Mon, 6 May 2019 06:59:38 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 7434C28397 for ; Mon, 6 May 2019 06:59:38 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 67F572860A; Mon, 6 May 2019 06:59:38 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id A724D28590 for ; Mon, 6 May 2019 06:59:37 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1hNXYT-0001sA-O1; Mon, 06 May 2019 06:56:53 +0000 Received: from us1-rack-dfw2.inumbo.com ([104.130.134.6]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1hNXYS-0001rv-4d for xen-devel@lists.xenproject.org; Mon, 06 May 2019 06:56:52 +0000 X-Inumbo-ID: 1ee8480e-6fcc-11e9-843c-bc764e045a96 Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-rack-dfw2.inumbo.com (Halon) with ESMTPS id 1ee8480e-6fcc-11e9-843c-bc764e045a96; Mon, 06 May 2019 06:56:50 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 0AF0FACAA; Mon, 6 May 2019 06:56:49 +0000 (UTC) From: Juergen Gross To: xen-devel@lists.xenproject.org Date: Mon, 6 May 2019 08:56:02 +0200 Message-Id: <20190506065644.7415-4-jgross@suse.com> X-Mailer: git-send-email 2.16.4 In-Reply-To: <20190506065644.7415-1-jgross@suse.com> References: <20190506065644.7415-1-jgross@suse.com> Subject: [Xen-devel] [PATCH RFC V2 03/45] xen/sched: alloc struct sched_item for each vcpu X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Juergen Gross , Tim Deegan , Stefano Stabellini , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Dario Faggioli , Julien Grall , Jan Beulich MIME-Version: 1.0 Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" X-Virus-Scanned: ClamAV using ClamSMTP Allocate a struct sched_item for each vcpu. This removes the need to have it locally on the stack in schedule.c. Signed-off-by: Juergen Gross --- xen/common/schedule.c | 67 +++++++++++++++++++++++-------------------------- xen/include/xen/sched.h | 2 ++ 2 files changed, 33 insertions(+), 36 deletions(-) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 27d8d36504..071289b9c0 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -252,10 +252,15 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2, int sched_init_vcpu(struct vcpu *v, unsigned int processor) { struct domain *d = v->domain; - struct sched_item item = { .vcpu = v }; + struct sched_item *item; v->processor = processor; + if ( (item = xzalloc(struct sched_item)) == NULL ) + return 1; + v->sched_item = item; + item->vcpu = v; + /* Initialise the per-vcpu timers. */ init_timer(&v->periodic_timer, vcpu_periodic_timer_fn, v, v->processor); @@ -264,9 +269,13 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) init_timer(&v->poll_timer, poll_timer_fn, v, v->processor); - v->sched_priv = sched_alloc_vdata(dom_scheduler(d), &item, d->sched_priv); + v->sched_priv = sched_alloc_vdata(dom_scheduler(d), item, d->sched_priv); if ( v->sched_priv == NULL ) + { + v->sched_item = NULL; + xfree(item); return 1; + } /* * Initialize affinity settings. The idler, and potentially @@ -285,7 +294,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) } else { - sched_insert_item(dom_scheduler(d), &item); + sched_insert_item(dom_scheduler(d), item); } return 0; @@ -306,7 +315,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c) void *vcpudata; struct scheduler *old_ops; void *old_domdata; - struct sched_item item; for_each_vcpu ( d, v ) { @@ -327,8 +335,8 @@ int sched_move_domain(struct domain *d, struct cpupool *c) for_each_vcpu ( d, v ) { - item.vcpu = v; - vcpu_priv[v->vcpu_id] = sched_alloc_vdata(c->sched, &item, domdata); + vcpu_priv[v->vcpu_id] = sched_alloc_vdata(c->sched, v->sched_item, + domdata); if ( vcpu_priv[v->vcpu_id] == NULL ) { for_each_vcpu ( d, v ) @@ -346,8 +354,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) for_each_vcpu ( d, v ) { - item.vcpu = v; - sched_remove_item(old_ops, &item); + sched_remove_item(old_ops, v->sched_item); } d->cpupool = c; @@ -358,7 +365,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c) { spinlock_t *lock; - item.vcpu = v; vcpudata = v->sched_priv; migrate_timer(&v->periodic_timer, new_p); @@ -383,7 +389,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) new_p = cpumask_cycle(new_p, c->cpu_valid); - sched_insert_item(c->sched, &item); + sched_insert_item(c->sched, v->sched_item); sched_free_vdata(old_ops, vcpudata); } @@ -401,15 +407,17 @@ int sched_move_domain(struct domain *d, struct cpupool *c) void sched_destroy_vcpu(struct vcpu *v) { - struct sched_item item = { .vcpu = v }; + struct sched_item *item = v->sched_item; kill_timer(&v->periodic_timer); kill_timer(&v->singleshot_timer); kill_timer(&v->poll_timer); if ( test_and_clear_bool(v->is_urgent) ) atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count); - sched_remove_item(vcpu_scheduler(v), &item); + sched_remove_item(vcpu_scheduler(v), item); sched_free_vdata(vcpu_scheduler(v), v->sched_priv); + xfree(item); + v->sched_item = NULL; } int sched_init_domain(struct domain *d, int poolid) @@ -453,8 +461,6 @@ void sched_destroy_domain(struct domain *d) void vcpu_sleep_nosync_locked(struct vcpu *v) { - struct sched_item item = { .vcpu = v }; - ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock)); if ( likely(!vcpu_runnable(v)) ) @@ -462,7 +468,7 @@ void vcpu_sleep_nosync_locked(struct vcpu *v) if ( v->runstate.state == RUNSTATE_runnable ) vcpu_runstate_change(v, RUNSTATE_offline, NOW()); - sched_sleep(vcpu_scheduler(v), &item); + sched_sleep(vcpu_scheduler(v), v->sched_item); } } @@ -494,7 +500,6 @@ void vcpu_wake(struct vcpu *v) { unsigned long flags; spinlock_t *lock; - struct sched_item item = { .vcpu = v }; TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); @@ -504,7 +509,7 @@ void vcpu_wake(struct vcpu *v) { if ( v->runstate.state >= RUNSTATE_blocked ) vcpu_runstate_change(v, RUNSTATE_runnable, NOW()); - sched_wake(vcpu_scheduler(v), &item); + sched_wake(vcpu_scheduler(v), v->sched_item); } else if ( !(v->pause_flags & VPF_blocked) ) { @@ -543,7 +548,6 @@ void vcpu_unblock(struct vcpu *v) static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu) { unsigned int old_cpu = v->processor; - struct sched_item item = { .vcpu = v }; /* * Transfer urgency status to new CPU before switching CPUs, as @@ -560,7 +564,7 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu) * Actual CPU switch to new CPU. This is safe because the lock * pointer can't change while the current lock is held. */ - sched_migrate(vcpu_scheduler(v), &item, new_cpu); + sched_migrate(vcpu_scheduler(v), v->sched_item, new_cpu); } /* @@ -602,7 +606,6 @@ static void vcpu_migrate_finish(struct vcpu *v) unsigned int old_cpu, new_cpu; spinlock_t *old_lock, *new_lock; bool_t pick_called = 0; - struct sched_item item = { .vcpu = v }; /* * If the vcpu is currently running, this will be handled by @@ -639,7 +642,7 @@ static void vcpu_migrate_finish(struct vcpu *v) break; /* Select a new CPU. */ - new_cpu = sched_pick_cpu(vcpu_scheduler(v), &item); + new_cpu = sched_pick_cpu(vcpu_scheduler(v), v->sched_item); if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) && cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) ) break; @@ -709,7 +712,6 @@ void restore_vcpu_affinity(struct domain *d) { spinlock_t *lock; unsigned int old_cpu = v->processor; - struct sched_item item = { .vcpu = v }; ASSERT(!vcpu_runnable(v)); @@ -745,7 +747,7 @@ void restore_vcpu_affinity(struct domain *d) v->processor = cpumask_any(cpumask_scratch_cpu(cpu)); lock = vcpu_schedule_lock_irq(v); - v->processor = sched_pick_cpu(vcpu_scheduler(v), &item); + v->processor = sched_pick_cpu(vcpu_scheduler(v), v->sched_item); spin_unlock_irq(lock); if ( old_cpu != v->processor ) @@ -857,9 +859,7 @@ static int cpu_disable_scheduler_check(unsigned int cpu) void sched_set_affinity( struct vcpu *v, const cpumask_t *hard, const cpumask_t *soft) { - struct sched_item item = { .vcpu = v }; - - sched_adjust_affinity(dom_scheduler(v->domain), &item, hard, soft); + sched_adjust_affinity(dom_scheduler(v->domain), v->sched_item, hard, soft); if ( hard ) cpumask_copy(v->cpu_hard_affinity, hard); @@ -1032,10 +1032,9 @@ static long do_poll(struct sched_poll *sched_poll) long vcpu_yield(void) { struct vcpu * v=current; - struct sched_item item = { .vcpu = v }; spinlock_t *lock = vcpu_schedule_lock_irq(v); - sched_yield(vcpu_scheduler(v), &item); + sched_yield(vcpu_scheduler(v), v->sched_item); vcpu_schedule_unlock_irq(lock, v); SCHED_STAT_CRANK(vcpu_yield); @@ -1530,8 +1529,6 @@ static void schedule(void) void context_saved(struct vcpu *prev) { - struct sched_item item = { .vcpu = prev }; - /* Clear running flag /after/ writing context to memory. */ smp_wmb(); @@ -1540,7 +1537,7 @@ void context_saved(struct vcpu *prev) /* Check for migration request /after/ clearing running flag. */ smp_mb(); - sched_context_saved(vcpu_scheduler(prev), &item); + sched_context_saved(vcpu_scheduler(prev), prev->sched_item); vcpu_migrate_finish(prev); } @@ -1596,7 +1593,6 @@ static int cpu_schedule_up(unsigned int cpu) else { struct vcpu *idle = idle_vcpu[cpu]; - struct sched_item item = { .vcpu = idle }; /* * During (ACPI?) suspend the idle vCPU for this pCPU is not freed, @@ -1610,7 +1606,7 @@ static int cpu_schedule_up(unsigned int cpu) */ ASSERT(idle->sched_priv == NULL); - idle->sched_priv = sched_alloc_vdata(&ops, &item, + idle->sched_priv = sched_alloc_vdata(&ops, idle->sched_item, idle->domain->sched_priv); if ( idle->sched_priv == NULL ) return -ENOMEM; @@ -1803,7 +1799,6 @@ void __init scheduler_init(void) int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { struct vcpu *idle; - struct sched_item item; void *ppriv, *ppriv_old, *vpriv, *vpriv_old; struct scheduler *old_ops = per_cpu(scheduler, cpu); struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; @@ -1839,11 +1834,11 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) * sched_priv field of the per-vCPU info of the idle domain. */ idle = idle_vcpu[cpu]; - item.vcpu = idle; ppriv = sched_alloc_pdata(new_ops, cpu); if ( IS_ERR(ppriv) ) return PTR_ERR(ppriv); - vpriv = sched_alloc_vdata(new_ops, &item, idle->domain->sched_priv); + vpriv = sched_alloc_vdata(new_ops, idle->sched_item, + idle->domain->sched_priv); if ( vpriv == NULL ) { sched_free_pdata(new_ops, ppriv, cpu); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 946a71ffdc..85f9119d48 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -140,6 +140,7 @@ void evtchn_destroy(struct domain *d); /* from domain_kill */ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */ struct waitqueue_vcpu; +struct sched_item; struct vcpu { @@ -160,6 +161,7 @@ struct vcpu struct timer poll_timer; /* timeout for SCHEDOP_poll */ + struct sched_item *sched_item; void *sched_priv; /* scheduler-specific data */ struct vcpu_runstate_info runstate;