diff mbox

[2/2] Unified the kvm requests in one function

Message ID 1343971845-17995-2-git-send-email-Bharat.Bhushan@freescale.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bharat Bhushan Aug. 3, 2012, 5:30 a.m. UTC
I am sending this as a separate patch for easiness on review.
Once reviewed I will merge this with watchdog patch.

Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
---
 arch/powerpc/kvm/booke.c |   63 +++++++++++++++++++++++++++-------------------
 1 files changed, 37 insertions(+), 26 deletions(-)

Comments

Alexander Graf Aug. 7, 2012, 10:23 a.m. UTC | #1
On 03.08.2012, at 07:30, Bharat Bhushan wrote:

> I am sending this as a separate patch for easiness on review.
> Once reviewed I will merge this with watchdog patch.

Please don't merge, but rather make 2 different patches out of it:

  1) Introduce kvmppc_handle_requests()
  2) Implement watchdog, including request bits

> 
> Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
> ---
> arch/powerpc/kvm/booke.c |   63 +++++++++++++++++++++++++++-------------------
> 1 files changed, 37 insertions(+), 26 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index a0f5922..68ed863 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -525,18 +525,31 @@ static void update_timer_ints(struct kvm_vcpu *vcpu)
> 		kvmppc_core_dequeue_watchdog(vcpu);
> }
> 
> +static int kvmppc_handle_requests(struct kvm_run *run, struct kvm_vcpu *vcpu)
> +{
> +	int ret = 0;
> +
> +	if (!vcpu->requests)
> +		return 0;
> +
> +	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
> +		smp_mb();
> +		update_timer_ints(vcpu);
> +	}
> +
> +	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
> +		run->exit_reason = KVM_EXIT_WATCHDOG;
> +		ret = 1;
> +	}
> +
> +	return ret;
> +}
> +
> static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
> {
> 	unsigned long *pending = &vcpu->arch.pending_exceptions;
> 	unsigned int priority;
> 
> -	if (vcpu->requests) {
> -		if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
> -			smp_mb();
> -			update_timer_ints(vcpu);
> -		}
> -	}
> -
> 	priority = __ffs(*pending);
> 	while (priority < BOOKE_IRQPRIO_MAX) {
> 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
> @@ -578,7 +591,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
>  *
>  * returns !0 if a signal is pending and check_signal is true
>  */
> -static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
> +static int kvmppc_prepare_to_enter(struct kvm_run *run, struct kvm_vcpu *vcpu)

You can always get to run via vcpu->run. Every time we pass in run as parameter, we're using a deprecated kvm calling convention.

Otherwise looks good to me :)


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index a0f5922..68ed863 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -525,18 +525,31 @@  static void update_timer_ints(struct kvm_vcpu *vcpu)
 		kvmppc_core_dequeue_watchdog(vcpu);
 }
 
+static int kvmppc_handle_requests(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	int ret = 0;
+
+	if (!vcpu->requests)
+		return 0;
+
+	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
+		smp_mb();
+		update_timer_ints(vcpu);
+	}
+
+	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
+		run->exit_reason = KVM_EXIT_WATCHDOG;
+		ret = 1;
+	}
+
+	return ret;
+}
+
 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
 {
 	unsigned long *pending = &vcpu->arch.pending_exceptions;
 	unsigned int priority;
 
-	if (vcpu->requests) {
-		if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
-			smp_mb();
-			update_timer_ints(vcpu);
-		}
-	}
-
 	priority = __ffs(*pending);
 	while (priority < BOOKE_IRQPRIO_MAX) {
 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -578,7 +591,7 @@  int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  *
  * returns !0 if a signal is pending and check_signal is true
  */
-static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
+static int kvmppc_prepare_to_enter(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
 	int r = 0;
 
@@ -593,9 +606,14 @@  static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
 
 		if (signal_pending(current)) {
 			r = 1;
+			run->exit_reason = KVM_EXIT_INTR;
 			break;
 		}
 
+		r = kvmppc_handle_requests(run, vcpu);
+		if (r)
+			break;
+
 		if (kvmppc_core_prepare_to_enter(vcpu)) {
 			/* interrupts got enabled in between, so we
 			   are back at square 1 */
@@ -623,15 +641,11 @@  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	}
 
 	local_irq_disable();
-	if (kvmppc_prepare_to_enter(vcpu)) {
-		kvm_run->exit_reason = KVM_EXIT_INTR;
-		ret = -EINTR;
-		goto out;
-	}
-
-	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
-		kvm_run->exit_reason = KVM_EXIT_WATCHDOG;
-		ret = 0;
+	if (kvmppc_prepare_to_enter(kvm_run, vcpu)) {
+		if (kvm_run->exit_reason == KVM_EXIT_INTR)
+			ret = -EINTR;
+		else
+			ret = 0;
 		goto out;
 	}
 
@@ -1090,18 +1104,15 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 */
 	if (!(r & RESUME_HOST)) {
 		local_irq_disable();
-		if (kvmppc_prepare_to_enter(vcpu)) {
-			run->exit_reason = KVM_EXIT_INTR;
-			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
-			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
+		if (kvmppc_prepare_to_enter(run, vcpu)) {
+			if (run->exit_reason == KVM_EXIT_INTR) {
+				r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
+				kvmppc_account_exit(vcpu, SIGNAL_EXITS);
+			} else 
+				r = RESUME_HOST | (r & RESUME_FLAG_NV);
 		}
 	}
 
-	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
-		run->exit_reason = KVM_EXIT_WATCHDOG;
-		r = RESUME_HOST | (r & RESUME_FLAG_NV);
-	}
-
 	return r;
 }