diff mbox

[v5,12/13] x86/traps: move some PV specific functions to pv/traps.c

Message ID 20170626162842.482-13-wei.liu2@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wei Liu June 26, 2017, 4:28 p.m. UTC
Those functions must be moved at the same time. Also move softirq_trap
because it is only used there.

Fix some coding style issues while moving code.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/pv/traps.c     | 104 ++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/traps.c        |  88 -------------------------------------
 xen/include/asm-x86/traps.h |   6 ---
 3 files changed, 104 insertions(+), 94 deletions(-)

Comments

Andrew Cooper June 27, 2017, 6:12 p.m. UTC | #1
On 26/06/17 17:28, Wei Liu wrote:
> @@ -148,6 +150,108 @@ void init_int80_direct_trap(struct vcpu *v)
>          tb->flags = TBF_EXCEPTION | (TI_GET_IF(ti) ? TBF_INTERRUPT : 0);
>  }
>  
> +struct softirq_trap {
> +    struct domain *domain; /* domain to inject trap */
> +    struct vcpu *vcpu;     /* vcpu to inject trap */
> +    int processor;         /* physical cpu to inject trap */
> +};
> +
> +static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
> +
> +static void nmi_mce_softirq(void)
> +{
> +    int cpu = smp_processor_id();
> +    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
> +
> +    BUG_ON(st->vcpu == NULL);
> +
> +    /*
> +     * Set the tmp value unconditionally, so that the check in the iret
> +     * hypercall works.
> +     */
> +    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
> +                 st->vcpu->cpu_hard_affinity);
> +
> +    if ( (cpu != st->processor) ||
> +         (st->processor != st->vcpu->processor) )
> +    {
> +
> +        /*
> +	 * We are on a different physical cpu.  Make sure to wakeup the vcpu on
> +	 * the specified processor.
> +         */

You have some stray tabs here.

> +        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
> +
> +        /* Affinity is restored in the iret hypercall. */
> +    }
> +
> +    /*
> +     * Only used to defer wakeup of domain/vcpu to a safe (non-NMI/MCE)
> +     * context.
> +     */
> +    vcpu_kick(st->vcpu);
> +    st->vcpu = NULL;
> +}
> +
> +void __init pv_trap_init(void)
> +{
> +    /* The 32-on-64 hypercall vector is only accessible from ring 1. */
> +    _set_gate(idt_table + HYPERCALL_VECTOR,
> +              SYS_DESC_trap_gate, 1, entry_int82);
> +
> +    /* Fast trap for int80 (faster than taking the #GP-fixup path). */
> +    _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3,
> +              &int80_direct_trap);
> +
> +    open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
> +}
> +
> +int pv_raise_interrupt(struct vcpu *v, uint8_t trap_nr)

Please s/trap_nr/vector/ as you are moving it.  Unlike
guest_has_trap_callback(), the uint8_t here isn't important.

> +{
> +    struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
> +
> +    switch ( trap_nr )
> +    {
> +    case TRAP_nmi:
> +        if ( cmpxchgptr(&st->vcpu, NULL, v) )
> +            return -EBUSY;
> +        if ( !test_and_set_bool(v->nmi_pending) )
> +        {
> +            st->domain = v->domain;
> +            st->processor = v->processor;
> +
> +            /* Not safe to wake up a vcpu here */
> +            raise_softirq(NMI_MCE_SOFTIRQ);
> +            return 0;
> +        }
> +        st->vcpu = NULL;
> +        break;
> +
> +    case TRAP_machine_check:
> +        if ( cmpxchgptr(&st->vcpu, NULL, v) )
> +            return -EBUSY;
> +
> +        /*
> +	 * We are called by the machine check (exception or polling) handlers
> +	 * on the physical CPU that reported a machine check error.
> +         */

And more tabs here.

Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Jan Beulich June 27, 2017, 6:34 p.m. UTC | #2
>>> Wei Liu <wei.liu2@citrix.com> 06/26/17 6:46 PM >>>
>@@ -148,6 +150,108 @@ void init_int80_direct_trap(struct vcpu *v)
         >tb->flags = TBF_EXCEPTION | (TI_GET_IF(ti) ? TBF_INTERRUPT : 0);
 >}
 >
>+struct softirq_trap {
>+    struct domain *domain; /* domain to inject trap */
>+    struct vcpu *vcpu;     /* vcpu to inject trap */
>+    int processor;         /* physical cpu to inject trap */

unsigned int please as you go.

>+static void nmi_mce_softirq(void)
>+{
>+    int cpu = smp_processor_id();

Same here.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 7d2f9aa638..1fce7df0c0 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -19,9 +19,11 @@ 
  * Copyright (c) 2017 Citrix Systems Ltd.
  */
 
+#include <xen/event.h>
 #include <xen/hypercall.h>
 #include <xen/lib.h>
 #include <xen/trace.h>
+#include <xen/softirq.h>
 
 #include <asm/apic.h>
 #include <asm/shared.h>
@@ -148,6 +150,108 @@  void init_int80_direct_trap(struct vcpu *v)
         tb->flags = TBF_EXCEPTION | (TI_GET_IF(ti) ? TBF_INTERRUPT : 0);
 }
 
+struct softirq_trap {
+    struct domain *domain; /* domain to inject trap */
+    struct vcpu *vcpu;     /* vcpu to inject trap */
+    int processor;         /* physical cpu to inject trap */
+};
+
+static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
+
+static void nmi_mce_softirq(void)
+{
+    int cpu = smp_processor_id();
+    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
+
+    BUG_ON(st->vcpu == NULL);
+
+    /*
+     * Set the tmp value unconditionally, so that the check in the iret
+     * hypercall works.
+     */
+    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
+                 st->vcpu->cpu_hard_affinity);
+
+    if ( (cpu != st->processor) ||
+         (st->processor != st->vcpu->processor) )
+    {
+
+        /*
+	 * We are on a different physical cpu.  Make sure to wakeup the vcpu on
+	 * the specified processor.
+         */
+        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
+
+        /* Affinity is restored in the iret hypercall. */
+    }
+
+    /*
+     * Only used to defer wakeup of domain/vcpu to a safe (non-NMI/MCE)
+     * context.
+     */
+    vcpu_kick(st->vcpu);
+    st->vcpu = NULL;
+}
+
+void __init pv_trap_init(void)
+{
+    /* The 32-on-64 hypercall vector is only accessible from ring 1. */
+    _set_gate(idt_table + HYPERCALL_VECTOR,
+              SYS_DESC_trap_gate, 1, entry_int82);
+
+    /* Fast trap for int80 (faster than taking the #GP-fixup path). */
+    _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3,
+              &int80_direct_trap);
+
+    open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
+}
+
+int pv_raise_interrupt(struct vcpu *v, uint8_t trap_nr)
+{
+    struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
+
+    switch ( trap_nr )
+    {
+    case TRAP_nmi:
+        if ( cmpxchgptr(&st->vcpu, NULL, v) )
+            return -EBUSY;
+        if ( !test_and_set_bool(v->nmi_pending) )
+        {
+            st->domain = v->domain;
+            st->processor = v->processor;
+
+            /* Not safe to wake up a vcpu here */
+            raise_softirq(NMI_MCE_SOFTIRQ);
+            return 0;
+        }
+        st->vcpu = NULL;
+        break;
+
+    case TRAP_machine_check:
+        if ( cmpxchgptr(&st->vcpu, NULL, v) )
+            return -EBUSY;
+
+        /*
+	 * We are called by the machine check (exception or polling) handlers
+	 * on the physical CPU that reported a machine check error.
+         */
+        if ( !test_and_set_bool(v->mce_pending) )
+        {
+            st->domain = v->domain;
+            st->processor = v->processor;
+
+            /* not safe to wake up a vcpu here */
+            raise_softirq(NMI_MCE_SOFTIRQ);
+            return 0;
+        }
+        st->vcpu = NULL;
+        break;
+    }
+
+    /* Delivery failed */
+    return -EIO;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index f12a52032a..4d6f42d168 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1540,39 +1540,6 @@  void do_general_protection(struct cpu_user_regs *regs)
     panic("GENERAL PROTECTION FAULT\n[error_code=%04x]", regs->error_code);
 }
 
-static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
-
-static void nmi_mce_softirq(void)
-{
-    int cpu = smp_processor_id();
-    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
-
-    BUG_ON(st->vcpu == NULL);
-
-    /* Set the tmp value unconditionally, so that
-     * the check in the iret hypercall works. */
-    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
-                 st->vcpu->cpu_hard_affinity);
-
-    if ((cpu != st->processor)
-       || (st->processor != st->vcpu->processor))
-    {
-        /* We are on a different physical cpu.
-         * Make sure to wakeup the vcpu on the
-         * specified processor.
-         */
-        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
-
-        /* Affinity is restored in the iret hypercall. */
-    }
-
-    /* Only used to defer wakeup of domain/vcpu to
-     * a safe (non-NMI/MCE) context.
-     */
-    vcpu_kick(st->vcpu);
-    st->vcpu = NULL;
-}
-
 static void pci_serr_softirq(void)
 {
     printk("\n\nNMI - PCI system error (SERR)\n");
@@ -1934,19 +1901,6 @@  void __init init_idt_traps(void)
     this_cpu(compat_gdt_table) = boot_cpu_compat_gdt_table;
 }
 
-void __init pv_trap_init(void)
-{
-    /* The 32-on-64 hypercall vector is only accessible from ring 1. */
-    _set_gate(idt_table + HYPERCALL_VECTOR,
-              SYS_DESC_trap_gate, 1, entry_int82);
-
-    /* Fast trap for int80 (faster than taking the #GP-fixup path). */
-    _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3,
-              &int80_direct_trap);
-
-    open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
-}
-
 extern void (*const autogen_entrypoints[NR_VECTORS])(void);
 void __init trap_init(void)
 {
@@ -1979,48 +1933,6 @@  void __init trap_init(void)
     open_softirq(PCI_SERR_SOFTIRQ, pci_serr_softirq);
 }
 
-int pv_raise_interrupt(struct vcpu *v, uint8_t trap_nr)
-{
-    struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
-
-    switch (trap_nr) {
-    case TRAP_nmi:
-        if ( cmpxchgptr(&st->vcpu, NULL, v) )
-            return -EBUSY;
-        if ( !test_and_set_bool(v->nmi_pending) ) {
-               st->domain = v->domain;
-               st->processor = v->processor;
-
-               /* not safe to wake up a vcpu here */
-               raise_softirq(NMI_MCE_SOFTIRQ);
-               return 0;
-        }
-        st->vcpu = NULL;
-        break;
-
-    case TRAP_machine_check:
-        if ( cmpxchgptr(&st->vcpu, NULL, v) )
-            return -EBUSY;
-
-        /* We are called by the machine check (exception or polling) handlers
-         * on the physical CPU that reported a machine check error. */
-
-        if ( !test_and_set_bool(v->mce_pending) ) {
-                st->domain = v->domain;
-                st->processor = v->processor;
-
-                /* not safe to wake up a vcpu here */
-                raise_softirq(NMI_MCE_SOFTIRQ);
-                return 0;
-        }
-        st->vcpu = NULL;
-        break;
-    }
-
-    /* delivery failed */
-    return -EIO;
-}
-
 void activate_debugregs(const struct vcpu *curr)
 {
     ASSERT(curr == current);
diff --git a/xen/include/asm-x86/traps.h b/xen/include/asm-x86/traps.h
index 1e3f9c7fad..8d903ec91b 100644
--- a/xen/include/asm-x86/traps.h
+++ b/xen/include/asm-x86/traps.h
@@ -19,12 +19,6 @@ 
 #ifndef ASM_TRAP_H
 #define ASM_TRAP_H
 
-struct softirq_trap {
-	struct domain *domain;  /* domain to inject trap */
-	struct vcpu *vcpu;	/* vcpu to inject trap */
-	int processor;		/* physical cpu to inject trap */
-};
-
 struct cpu_user_regs;
 
 void async_exception_cleanup(struct vcpu *);