diff mbox series

[v4,1/3] target/arm: Support SError injection

Message ID 20200218020416.50244-2-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series hw/arm/virt: Simulate NMI Injection | expand

Commit Message

Gavin Shan Feb. 18, 2020, 2:04 a.m. UTC
This supports SError injection, which will be used by "virt" board to
simulating the behavior of NMI injection in next patch. As Peter Maydell
suggested, this adds a new interrupt (ARM_CPU_SERROR), which is parallel
to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
or data abort to guest. When TCG is enabled, the behavior is simulated
by injecting SError and data abort to guest.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 target/arm/cpu.c      | 69 +++++++++++++++++++++++++++++++++++--------
 target/arm/cpu.h      | 20 ++++++++-----
 target/arm/helper.c   | 12 ++++++++
 target/arm/m_helper.c |  8 +++++
 target/arm/machine.c  |  3 +-
 5 files changed, 91 insertions(+), 21 deletions(-)

Comments

Marc Zyngier Feb. 18, 2020, 4:28 p.m. UTC | #1
On 2020-02-18 02:04, Gavin Shan wrote:
> This supports SError injection, which will be used by "virt" board to
> simulating the behavior of NMI injection in next patch. As Peter 
> Maydell
> suggested, this adds a new interrupt (ARM_CPU_SERROR), which is 
> parallel
> to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
> kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
> or data abort to guest. When TCG is enabled, the behavior is simulated
> by injecting SError and data abort to guest.

s/and/or/ (you can't inject both at the same time).

> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>  target/arm/cpu.c      | 69 +++++++++++++++++++++++++++++++++++--------
>  target/arm/cpu.h      | 20 ++++++++-----
>  target/arm/helper.c   | 12 ++++++++
>  target/arm/m_helper.c |  8 +++++
>  target/arm/machine.c  |  3 +-
>  5 files changed, 91 insertions(+), 21 deletions(-)
> 
> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> index de733aceeb..e5750080bc 100644
> --- a/target/arm/cpu.c
> +++ b/target/arm/cpu.c
> @@ -78,7 +78,7 @@ static bool arm_cpu_has_work(CPUState *cs)
>          && cs->interrupt_request &
>          (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>           | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
> -         | CPU_INTERRUPT_EXITTB);
> +         | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB);
>  }
> 
>  void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn 
> *hook,
> @@ -449,6 +449,9 @@ static inline bool arm_excp_unmasked(CPUState *cs,
> unsigned int excp_idx,
>              return false;
>          }
>          return !(env->daif & PSTATE_I);
> +    case EXCP_SERROR:
> +       pstate_unmasked = !(env->daif & PSTATE_A);
> +       break;

nit: Consider keeping the physical interrupts together, as they are 
closely
related.

>      default:
>          g_assert_not_reached();
>      }
> @@ -538,6 +541,15 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
> interrupt_request)
> 
>      /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
> 
> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
> +        excp_idx = EXCP_SERROR;
> +        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, 
> secure);
> +        if (arm_excp_unmasked(cs, excp_idx, target_el,
> +                              cur_el, secure, hcr_el2)) {
> +            goto found;
> +        }
> +    }
> +
>      if (interrupt_request & CPU_INTERRUPT_FIQ) {
>          excp_idx = EXCP_FIQ;
>          target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, 
> secure);
> @@ -570,6 +582,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
> interrupt_request)
>              goto found;
>          }
>      }
> +
>      return false;
> 
>   found:
> @@ -585,7 +598,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState
> *cs, int interrupt_request)
>      CPUClass *cc = CPU_GET_CLASS(cs);
>      ARMCPU *cpu = ARM_CPU(cs);
>      CPUARMState *env = &cpu->env;
> -    bool ret = false;
> +    uint32_t excp_idx;
> 
>      /* ARMv7-M interrupt masking works differently than -A or -R.
>       * There is no FIQ/IRQ distinction. Instead of I and F bits
> @@ -594,13 +607,26 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState
> *cs, int interrupt_request)
>       * (which depends on state like BASEPRI, FAULTMASK and the
>       * currently active exception).
>       */
> -    if (interrupt_request & CPU_INTERRUPT_HARD
> -        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
> -        cs->exception_index = EXCP_IRQ;
> -        cc->do_interrupt(cs);
> -        ret = true;
> +    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
> +        return false;
> +    }
> +
> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
> +        excp_idx = EXCP_SERROR;
> +        goto found;
> +    }
> +
> +    if (interrupt_request & CPU_INTERRUPT_HARD) {
> +        excp_idx = EXCP_IRQ;
> +        goto found;
>      }
> -    return ret;
> +
> +    return false;
> +
> +found:
> +    cs->exception_index = excp_idx;
> +    cc->do_interrupt(cs);
> +    return true;
>  }
>  #endif
> 
> @@ -656,7 +682,8 @@ static void arm_cpu_set_irq(void *opaque, int irq,
> int level)
>          [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
>          [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
>          [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
> -        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
> +        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
> +        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
>      };
> 
>      if (level) {
> @@ -676,6 +703,7 @@ static void arm_cpu_set_irq(void *opaque, int irq,
> int level)
>          break;
>      case ARM_CPU_IRQ:
>      case ARM_CPU_FIQ:
> +    case ARM_CPU_SERROR:
>          if (level) {
>              cpu_interrupt(cs, mask[irq]);
>          } else {
> @@ -693,8 +721,10 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
> irq, int level)
>      ARMCPU *cpu = opaque;
>      CPUARMState *env = &cpu->env;
>      CPUState *cs = CPU(cpu);
> +    struct kvm_vcpu_events events;
>      uint32_t linestate_bit;
>      int irq_id;
> +    bool inject_irq = true;
> 
>      switch (irq) {
>      case ARM_CPU_IRQ:
> @@ -705,6 +735,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
> irq, int level)
>          irq_id = KVM_ARM_IRQ_CPU_FIQ;
>          linestate_bit = CPU_INTERRUPT_FIQ;
>          break;
> +    case ARM_CPU_SERROR:
> +        if (!kvm_has_vcpu_events()) {
> +            return;
> +        }
> +
> +        inject_irq = false;
> +        linestate_bit = CPU_INTERRUPT_SERROR;
> +        break;
>      default:
>          g_assert_not_reached();
>      }
> @@ -714,7 +752,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
> irq, int level)
>      } else {
>          env->irq_line_state &= ~linestate_bit;
>      }
> -    kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, 
> !!level);
> +
> +    if (inject_irq) {

You could just have (linestate_bit != CPU_INTERRUPT_SERROR) here, and 
not have
inject_irq at all.

> +        kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, 
> !!level);
> +    } else if (level) {

Is there any case where you'd call this function with a SError and level 
== 0?
And even if it happens, you could exit early in the above switch 
statement.

> +        memset(&events, 0, sizeof(events));
> +        events.exception.serror_pending = 1;
> +        kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events);
> +    }
>  #endif
>  }
> 
> @@ -1064,9 +1109,9 @@ static void arm_cpu_initfn(Object *obj)
>          /* VIRQ and VFIQ are unused with KVM but we add them to 
> maintain
>           * the same interface as non-KVM CPUs.
>           */
> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 
> ARM_CPU_NUM_IRQ);
>      } else {
> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 
> ARM_CPU_NUM_IRQ);
>      }
> 
>      qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index e943ffe8a9..23e9f7ee2d 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -49,6 +49,7 @@
>  #define EXCP_LAZYFP         20   /* v7M fault during lazy FP stacking 
> */
>  #define EXCP_LSERR          21   /* v8M LSERR SecureFault */
>  #define EXCP_UNALIGNED      22   /* v7M UNALIGNED UsageFault */
> +#define EXCP_SERROR         23   /* SError Interrupt */
>  /* NB: add new EXCP_ defines to the array in arm_log_exception() too 
> */
> 
>  #define ARMV7M_EXCP_RESET   1
> @@ -79,9 +80,10 @@ enum {
>  };
> 
>  /* ARM-specific interrupt pending bits.  */
> -#define CPU_INTERRUPT_FIQ   CPU_INTERRUPT_TGT_EXT_1
> -#define CPU_INTERRUPT_VIRQ  CPU_INTERRUPT_TGT_EXT_2
> -#define CPU_INTERRUPT_VFIQ  CPU_INTERRUPT_TGT_EXT_3
> +#define CPU_INTERRUPT_FIQ     CPU_INTERRUPT_TGT_EXT_1
> +#define CPU_INTERRUPT_VIRQ    CPU_INTERRUPT_TGT_EXT_2
> +#define CPU_INTERRUPT_VFIQ    CPU_INTERRUPT_TGT_EXT_3
> +#define CPU_INTERRUPT_SERROR  CPU_INTERRUPT_TGT_EXT_4
> 
>  /* The usual mapping for an AArch64 system register to its AArch32
>   * counterpart is for the 32 bit world to have access to the lower
> @@ -97,11 +99,13 @@ enum {
>  #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
>  #endif
> 
> -/* Meanings of the ARMCPU object's four inbound GPIO lines */
> -#define ARM_CPU_IRQ 0
> -#define ARM_CPU_FIQ 1
> -#define ARM_CPU_VIRQ 2
> -#define ARM_CPU_VFIQ 3
> +/* ARMCPU object's inbound GPIO lines */
> +#define ARM_CPU_IRQ     0
> +#define ARM_CPU_FIQ     1
> +#define ARM_CPU_VIRQ    2
> +#define ARM_CPU_VFIQ    3
> +#define ARM_CPU_SERROR  4
> +#define ARM_CPU_NUM_IRQ 5

This probably should be turned into an enum, given that it is going to
grow again on the following patch.

> 
>  /* ARM-specific extra insn start words:
>   * 1: Conditional execution bits
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 366dbcf460..3f00af4c41 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -1969,6 +1969,12 @@ static uint64_t isr_read(CPUARMState *env,
> const ARMCPRegInfo *ri)
>          }
>      }
> 
> +    if (!allow_virt || !(hcr_el2 & HCR_AMO)) {

nit: It would be nicer to write this as

        if (!(allow_virt && (hcr_el2 & HCR_AMO)))

which fits the current code better, and makes a slightly less ugly
rewrite in the following patch.

> +        if (cs->interrupt_request & CPU_INTERRUPT_SERROR) {
> +            ret |= CPSR_A;
> +        }
> +    }
> +
>      /* External aborts are not possible in QEMU so A bit is always 
> clear */

nit: this comment seems obsolete now.

Thanks,

         M.
Gavin Shan Feb. 18, 2020, 11:09 p.m. UTC | #2
Hi Marc,

On 2/19/20 3:28 AM, Marc Zyngier wrote:
> On 2020-02-18 02:04, Gavin Shan wrote:
>> This supports SError injection, which will be used by "virt" board to
>> simulating the behavior of NMI injection in next patch. As Peter Maydell
>> suggested, this adds a new interrupt (ARM_CPU_SERROR), which is parallel
>> to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
>> kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
>> or data abort to guest. When TCG is enabled, the behavior is simulated
>> by injecting SError and data abort to guest.
> 
> s/and/or/ (you can't inject both at the same time).
> 

Absolutely, will be corrected in v5, which will be hold. I hope to receive
comments from Peter and Richard before going to do another respin :)

>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>> ---
>>  target/arm/cpu.c      | 69 +++++++++++++++++++++++++++++++++++--------
>>  target/arm/cpu.h      | 20 ++++++++-----
>>  target/arm/helper.c   | 12 ++++++++
>>  target/arm/m_helper.c |  8 +++++
>>  target/arm/machine.c  |  3 +-
>>  5 files changed, 91 insertions(+), 21 deletions(-)
>>
>> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
>> index de733aceeb..e5750080bc 100644
>> --- a/target/arm/cpu.c
>> +++ b/target/arm/cpu.c
>> @@ -78,7 +78,7 @@ static bool arm_cpu_has_work(CPUState *cs)
>>          && cs->interrupt_request &
>>          (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>>           | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
>> -         | CPU_INTERRUPT_EXITTB);
>> +         | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB);
>>  }
>>
>>  void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
>> @@ -449,6 +449,9 @@ static inline bool arm_excp_unmasked(CPUState *cs,
>> unsigned int excp_idx,
>>              return false;
>>          }
>>          return !(env->daif & PSTATE_I);
>> +    case EXCP_SERROR:
>> +       pstate_unmasked = !(env->daif & PSTATE_A);
>> +       break;
> 
> nit: Consider keeping the physical interrupts together, as they are closely
> related.
> 

Sorry, I didn't get the point. Maybe you're suggesting something like below?
If yes, I'm not sure if it's necessary.

     pstate_unmasked = !(env->daif & (PSTATE_A | PSTATE_I));

I think PSTATE_A is enough to mask out SError according to ARMv8 architecture
reference manual (D1.7), as below:

    A, I, F Asynchronous exception mask bits:
    A
       SError interrupt mask bit.
    I
       IRQ interrupt mask bit.
    F
       FIQ interrupt mask bit.

>>      default:
>>          g_assert_not_reached();
>>      }
>> @@ -538,6 +541,15 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
>> interrupt_request)
>>
>>      /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
>>
>> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
>> +        excp_idx = EXCP_SERROR;
>> +        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
>> +        if (arm_excp_unmasked(cs, excp_idx, target_el,
>> +                              cur_el, secure, hcr_el2)) {
>> +            goto found;
>> +        }
>> +    }
>> +
>>      if (interrupt_request & CPU_INTERRUPT_FIQ) {
>>          excp_idx = EXCP_FIQ;
>>          target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
>> @@ -570,6 +582,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
>> interrupt_request)
>>              goto found;
>>          }
>>      }
>> +
>>      return false;
>>
>>   found:
>> @@ -585,7 +598,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState
>> *cs, int interrupt_request)
>>      CPUClass *cc = CPU_GET_CLASS(cs);
>>      ARMCPU *cpu = ARM_CPU(cs);
>>      CPUARMState *env = &cpu->env;
>> -    bool ret = false;
>> +    uint32_t excp_idx;
>>
>>      /* ARMv7-M interrupt masking works differently than -A or -R.
>>       * There is no FIQ/IRQ distinction. Instead of I and F bits
>> @@ -594,13 +607,26 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState
>> *cs, int interrupt_request)
>>       * (which depends on state like BASEPRI, FAULTMASK and the
>>       * currently active exception).
>>       */
>> -    if (interrupt_request & CPU_INTERRUPT_HARD
>> -        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
>> -        cs->exception_index = EXCP_IRQ;
>> -        cc->do_interrupt(cs);
>> -        ret = true;
>> +    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
>> +        return false;
>> +    }
>> +
>> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
>> +        excp_idx = EXCP_SERROR;
>> +        goto found;
>> +    }
>> +
>> +    if (interrupt_request & CPU_INTERRUPT_HARD) {
>> +        excp_idx = EXCP_IRQ;
>> +        goto found;
>>      }
>> -    return ret;
>> +
>> +    return false;
>> +
>> +found:
>> +    cs->exception_index = excp_idx;
>> +    cc->do_interrupt(cs);
>> +    return true;
>>  }
>>  #endif
>>
>> @@ -656,7 +682,8 @@ static void arm_cpu_set_irq(void *opaque, int irq,
>> int level)
>>          [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
>>          [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
>>          [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
>> -        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
>> +        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
>> +        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
>>      };
>>
>>      if (level) {
>> @@ -676,6 +703,7 @@ static void arm_cpu_set_irq(void *opaque, int irq,
>> int level)
>>          break;
>>      case ARM_CPU_IRQ:
>>      case ARM_CPU_FIQ:
>> +    case ARM_CPU_SERROR:
>>          if (level) {
>>              cpu_interrupt(cs, mask[irq]);
>>          } else {
>> @@ -693,8 +721,10 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
>> irq, int level)
>>      ARMCPU *cpu = opaque;
>>      CPUARMState *env = &cpu->env;
>>      CPUState *cs = CPU(cpu);
>> +    struct kvm_vcpu_events events;
>>      uint32_t linestate_bit;
>>      int irq_id;
>> +    bool inject_irq = true;
>>
>>      switch (irq) {
>>      case ARM_CPU_IRQ:
>> @@ -705,6 +735,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
>> irq, int level)
>>          irq_id = KVM_ARM_IRQ_CPU_FIQ;
>>          linestate_bit = CPU_INTERRUPT_FIQ;
>>          break;
>> +    case ARM_CPU_SERROR:
>> +        if (!kvm_has_vcpu_events()) {
>> +            return;
>> +        }
>> +
>> +        inject_irq = false;
>> +        linestate_bit = CPU_INTERRUPT_SERROR;
>> +        break;
>>      default:
>>          g_assert_not_reached();
>>      }
>> @@ -714,7 +752,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
>> irq, int level)
>>      } else {
>>          env->irq_line_state &= ~linestate_bit;
>>      }
>> -    kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
>> +
>> +    if (inject_irq) {
> 
> You could just have (linestate_bit != CPU_INTERRUPT_SERROR) here, and not have
> inject_irq at all.
> 

Sure, will be improved in v5.

>> +        kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
>> +    } else if (level) {
> 
> Is there any case where you'd call this function with a SError and level == 0?
> And even if it happens, you could exit early in the above switch statement.
> 

The combination of SError and level == 0 isn't existing for now, meaning the
SError is always coming with level == 1. We can't exit early in above switch
statement because env->irq_line_state needs to be updated accordingly.

>> +        memset(&events, 0, sizeof(events));
>> +        events.exception.serror_pending = 1;
>> +        kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events);
>> +    }
>>  #endif
>>  }
>>
>> @@ -1064,9 +1109,9 @@ static void arm_cpu_initfn(Object *obj)
>>          /* VIRQ and VFIQ are unused with KVM but we add them to maintain
>>           * the same interface as non-KVM CPUs.
>>           */
>> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
>> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, ARM_CPU_NUM_IRQ);
>>      } else {
>> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
>> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, ARM_CPU_NUM_IRQ);
>>      }
>>
>>      qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
>> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
>> index e943ffe8a9..23e9f7ee2d 100644
>> --- a/target/arm/cpu.h
>> +++ b/target/arm/cpu.h
>> @@ -49,6 +49,7 @@
>>  #define EXCP_LAZYFP         20   /* v7M fault during lazy FP stacking */
>>  #define EXCP_LSERR          21   /* v8M LSERR SecureFault */
>>  #define EXCP_UNALIGNED      22   /* v7M UNALIGNED UsageFault */
>> +#define EXCP_SERROR         23   /* SError Interrupt */
>>  /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
>>
>>  #define ARMV7M_EXCP_RESET   1
>> @@ -79,9 +80,10 @@ enum {
>>  };
>>
>>  /* ARM-specific interrupt pending bits.  */
>> -#define CPU_INTERRUPT_FIQ   CPU_INTERRUPT_TGT_EXT_1
>> -#define CPU_INTERRUPT_VIRQ  CPU_INTERRUPT_TGT_EXT_2
>> -#define CPU_INTERRUPT_VFIQ  CPU_INTERRUPT_TGT_EXT_3
>> +#define CPU_INTERRUPT_FIQ     CPU_INTERRUPT_TGT_EXT_1
>> +#define CPU_INTERRUPT_VIRQ    CPU_INTERRUPT_TGT_EXT_2
>> +#define CPU_INTERRUPT_VFIQ    CPU_INTERRUPT_TGT_EXT_3
>> +#define CPU_INTERRUPT_SERROR  CPU_INTERRUPT_TGT_EXT_4
>>
>>  /* The usual mapping for an AArch64 system register to its AArch32
>>   * counterpart is for the 32 bit world to have access to the lower
>> @@ -97,11 +99,13 @@ enum {
>>  #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
>>  #endif
>>
>> -/* Meanings of the ARMCPU object's four inbound GPIO lines */
>> -#define ARM_CPU_IRQ 0
>> -#define ARM_CPU_FIQ 1
>> -#define ARM_CPU_VIRQ 2
>> -#define ARM_CPU_VFIQ 3
>> +/* ARMCPU object's inbound GPIO lines */
>> +#define ARM_CPU_IRQ     0
>> +#define ARM_CPU_FIQ     1
>> +#define ARM_CPU_VIRQ    2
>> +#define ARM_CPU_VFIQ    3
>> +#define ARM_CPU_SERROR  4
>> +#define ARM_CPU_NUM_IRQ 5
> 
> This probably should be turned into an enum, given that it is going to
> grow again on the following patch.
> 

Nice idea, will do in v5.

>>
>>  /* ARM-specific extra insn start words:
>>   * 1: Conditional execution bits
>> diff --git a/target/arm/helper.c b/target/arm/helper.c
>> index 366dbcf460..3f00af4c41 100644
>> --- a/target/arm/helper.c
>> +++ b/target/arm/helper.c
>> @@ -1969,6 +1969,12 @@ static uint64_t isr_read(CPUARMState *env,
>> const ARMCPRegInfo *ri)
>>          }
>>      }
>>
>> +    if (!allow_virt || !(hcr_el2 & HCR_AMO)) {
> 
> nit: It would be nicer to write this as
> 
>         if (!(allow_virt && (hcr_el2 & HCR_AMO)))
> 
> which fits the current code better, and makes a slightly less ugly
> rewrite in the following patch.
> 

Yeah, I had code as suggested, but changed to the code as-is.
Anyway, will change accordingly in v5.

>> +        if (cs->interrupt_request & CPU_INTERRUPT_SERROR) {
>> +            ret |= CPSR_A;
>> +        }
>> +    }
>> +
>>      /* External aborts are not possible in QEMU so A bit is always clear */
> 
> nit: this comment seems obsolete now.
> 

Yep, will be corrected in v5.

Thanks,
Gavin
Marc Zyngier Feb. 19, 2020, 8:03 a.m. UTC | #3
On Wed, 19 Feb 2020 10:09:39 +1100
Gavin Shan <gshan@redhat.com> wrote:

> Hi Marc,
> 
> On 2/19/20 3:28 AM, Marc Zyngier wrote:
> > On 2020-02-18 02:04, Gavin Shan wrote:  
> >> This supports SError injection, which will be used by "virt" board to
> >> simulating the behavior of NMI injection in next patch. As Peter Maydell
> >> suggested, this adds a new interrupt (ARM_CPU_SERROR), which is parallel
> >> to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
> >> kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
> >> or data abort to guest. When TCG is enabled, the behavior is simulated
> >> by injecting SError and data abort to guest.  
> > 
> > s/and/or/ (you can't inject both at the same time).
> >   
> 
> Absolutely, will be corrected in v5, which will be hold. I hope to receive
> comments from Peter and Richard before going to do another respin :)

Sure, there is no hurry at all.

> 
> >>
> >> Signed-off-by: Gavin Shan <gshan@redhat.com>
> >> ---
> >>  target/arm/cpu.c      | 69 ++++++++++++++++++++++++++++++++  
> +++--------
> >>  target/arm/cpu.h      | 20 ++++++++-----
> >>  target/arm/helper.c   | 12 ++++++++
> >>  target/arm/m_helper.c |  8 +++++
> >>  target/arm/machine.c  |  3 +-
> >>  5 files changed, 91 insertions(+), 21 deletions(-)
> >>
> >> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> >> index de733aceeb..e5750080bc 100644
> >> --- a/target/arm/cpu.c
> >> +++ b/target/arm/cpu.c
> >> @@ -78,7 +78,7 @@ static bool arm_cpu_has_work(CPUState *cs)
> >>          && cs->interrupt_request &
> >>          (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
> >>           | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
> >> -         | CPU_INTERRUPT_EXITTB);
> >> +         | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB)  
> ;
> >>  }
> >>
> >>  void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *  
> hook,
> >> @@ -449,6 +449,9 @@ static inline bool arm_excp_unmasked(CPUState *cs,
> >> unsigned int excp_idx,
> >>              return false;
> >>          }
> >>          return !(env->daif & PSTATE_I);
> >> +    case EXCP_SERROR:
> >> +       pstate_unmasked = !(env->daif & PSTATE_A);
> >> +       break;  
> > 
> > nit: Consider keeping the physical interrupts together, as they are close  
> ly
> > related.
> >   
> 
> Sorry, I didn't get the point. Maybe you're suggesting something like below
> ?
> If yes, I'm not sure if it's necessary.
> 
>      pstate_unmasked = !(env->daif & (PSTATE_A | PSTATE_I));
> 
> I think PSTATE_A is enough to mask out SError according to ARMv8 architectu
> re
> reference manual (D1.7), as below:
> 
>     A, I, F Asynchronous exception mask bits:
>     A
>        SError interrupt mask bit.
>     I
>        IRQ interrupt mask bit.
>     F
>        FIQ interrupt mask bit.

No, all I'm suggesting is that you keep the cases for IRQ, FIQ and
SError close together in the switch statement, instead of placing
SError after the virtual interrupts. Given that they use the same code
pattern, it makes sense to order them this way. But as I said, this is
a nit, and not something that affects the outcome of this code.

> >>      default:
> >>          g_assert_not_reached();
> >>      }
> >> @@ -538,6 +541,15 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
> >> interrupt_request)
> >>
> >>      /* The prioritization of interrupts is IMPLEMENTATION DEFIN  
> ED. */
> >>
> >> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
> >> +        excp_idx = EXCP_SERROR;
> >> +        target_el = arm_phys_excp_target_el(cs, excp_id  
> x, cur_el, secure);
> >> +        if (arm_excp_unmasked(cs, excp_idx, target_el,
> >> +                         
>        cur_el, secure, hcr_el2)) {
> >> +            goto found;
> >> +        }
> >> +    }
> >> +
> >>      if (interrupt_request & CPU_INTERRUPT_FIQ) {
> >>          excp_idx = EXCP_FIQ;
> >>          target_el = arm_phys_excp_target_el(cs, excp_  
> idx, cur_el, secure);
> >> @@ -570,6 +582,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
> >> interrupt_request)
> >>              goto found;
> >>          }
> >>      }
> >> +
> >>      return false;
> >>
> >>   found:
> >> @@ -585,7 +598,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState
> >> *cs, int interrupt_request)
> >>      CPUClass *cc = CPU_GET_CLASS(cs);
> >>      ARMCPU *cpu = ARM_CPU(cs);
> >>      CPUARMState *env = &cpu->env;
> >> -    bool ret = false;
> >> +    uint32_t excp_idx;
> >>
> >>      /* ARMv7-M interrupt masking works differently than -A or -  
> R.
> >>       * There is no FIQ/IRQ distinction. Instead of I and F bi  
> ts
> >> @@ -594,13 +607,26 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState
> >> *cs, int interrupt_request)
> >>       * (which depends on state like BASEPRI, FAULTMASK and th  
> e
> >>       * currently active exception).
> >>       */
> >> -    if (interrupt_request & CPU_INTERRUPT_HARD
> >> -        && (armv7m_nvic_can_take_pending_exception(env->n  
> vic))) {
> >> -        cs->exception_index = EXCP_IRQ;
> >> -        cc->do_interrupt(cs);
> >> -        ret = true;
> >> +    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
> >> +        return false;
> >> +    }
> >> +
> >> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
> >> +        excp_idx = EXCP_SERROR;
> >> +        goto found;
> >> +    }
> >> +
> >> +    if (interrupt_request & CPU_INTERRUPT_HARD) {
> >> +        excp_idx = EXCP_IRQ;
> >> +        goto found;
> >>      }
> >> -    return ret;
> >> +
> >> +    return false;
> >> +
> >> +found:
> >> +    cs->exception_index = excp_idx;
> >> +    cc->do_interrupt(cs);
> >> +    return true;
> >>  }
> >>  #endif
> >>
> >> @@ -656,7 +682,8 @@ static void arm_cpu_set_irq(void *opaque, int irq,
> >> int level)
> >>          [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
> >>          [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
> >>          [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
> >> -        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
> >> +        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
> >> +        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
> >>      };
> >>
> >>      if (level) {
> >> @@ -676,6 +703,7 @@ static void arm_cpu_set_irq(void *opaque, int irq,
> >> int level)
> >>          break;
> >>      case ARM_CPU_IRQ:
> >>      case ARM_CPU_FIQ:
> >> +    case ARM_CPU_SERROR:
> >>          if (level) {
> >>              cpu_interrupt(cs, mask[irq]);
> >>          } else {
> >> @@ -693,8 +721,10 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
> >> irq, int level)
> >>      ARMCPU *cpu = opaque;
> >>      CPUARMState *env = &cpu->env;
> >>      CPUState *cs = CPU(cpu);
> >> +    struct kvm_vcpu_events events;
> >>      uint32_t linestate_bit;
> >>      int irq_id;
> >> +    bool inject_irq = true;
> >>
> >>      switch (irq) {
> >>      case ARM_CPU_IRQ:
> >> @@ -705,6 +735,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
> >> irq, int level)
> >>          irq_id = KVM_ARM_IRQ_CPU_FIQ;
> >>          linestate_bit = CPU_INTERRUPT_FIQ;
> >>          break;
> >> +    case ARM_CPU_SERROR:
> >> +        if (!kvm_has_vcpu_events()) {
> >> +            return;
> >> +        }
> >> +
> >> +        inject_irq = false;
> >> +        linestate_bit = CPU_INTERRUPT_SERROR;
> >> +        break;
> >>      default:
> >>          g_assert_not_reached();
> >>      }
> >> @@ -714,7 +752,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int
> >> irq, int level)
> >>      } else {
> >>          env->irq_line_state &= ~linestate_bit;
> >>      }
> >> -    kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
> >> +
> >> +    if (inject_irq) {  
> > 
> > You could just have (linestate_bit != CPU_INTERRUPT_SERROR) here, and n  
> ot have
> > inject_irq at all.
> >   
> 
> Sure, will be improved in v5.
> 
> >> +        kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_C  
> PU, irq_id, !!level);
> >> +    } else if (level) {  
> > 
> > Is there any case where you'd call this function with a SError and level == 0?
> > And even if it happens, you could exit early in the above switch statemen  
> t.
> >   
> 
> The combination of SError and level == 0 isn't existing for now,
> meaning the SError is always coming with level == 1. We can't exit
> early in above s witch statement because env->irq_line_state needs to
> be updated accordingly.

I'm not sure level==0 makes much sense. A common implementation of
SError is as an edge interrupt (it is consumed by being handled, and
you can't "retire" it). I'm not familiar enough with QEMU's interrupt
delivery mechanism, but I'd expect SError to differ significantly from
IRQ/FIQ in that respect.

	M.
diff mbox series

Patch

diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index de733aceeb..e5750080bc 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -78,7 +78,7 @@  static bool arm_cpu_has_work(CPUState *cs)
         && cs->interrupt_request &
         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
-         | CPU_INTERRUPT_EXITTB);
+         | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB);
 }
 
 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
@@ -449,6 +449,9 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
             return false;
         }
         return !(env->daif & PSTATE_I);
+    case EXCP_SERROR:
+       pstate_unmasked = !(env->daif & PSTATE_A);
+       break;
     default:
         g_assert_not_reached();
     }
@@ -538,6 +541,15 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 
     /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
 
+    if (interrupt_request & CPU_INTERRUPT_SERROR) {
+        excp_idx = EXCP_SERROR;
+        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+        if (arm_excp_unmasked(cs, excp_idx, target_el,
+                              cur_el, secure, hcr_el2)) {
+            goto found;
+        }
+    }
+
     if (interrupt_request & CPU_INTERRUPT_FIQ) {
         excp_idx = EXCP_FIQ;
         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -570,6 +582,7 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
             goto found;
         }
     }
+
     return false;
 
  found:
@@ -585,7 +598,7 @@  static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
     CPUClass *cc = CPU_GET_CLASS(cs);
     ARMCPU *cpu = ARM_CPU(cs);
     CPUARMState *env = &cpu->env;
-    bool ret = false;
+    uint32_t excp_idx;
 
     /* ARMv7-M interrupt masking works differently than -A or -R.
      * There is no FIQ/IRQ distinction. Instead of I and F bits
@@ -594,13 +607,26 @@  static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
      * (which depends on state like BASEPRI, FAULTMASK and the
      * currently active exception).
      */
-    if (interrupt_request & CPU_INTERRUPT_HARD
-        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
-        cs->exception_index = EXCP_IRQ;
-        cc->do_interrupt(cs);
-        ret = true;
+    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
+        return false;
+    }
+
+    if (interrupt_request & CPU_INTERRUPT_SERROR) {
+        excp_idx = EXCP_SERROR;
+        goto found;
+    }
+
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        excp_idx = EXCP_IRQ;
+        goto found;
     }
-    return ret;
+
+    return false;
+
+found:
+    cs->exception_index = excp_idx;
+    cc->do_interrupt(cs);
+    return true;
 }
 #endif
 
@@ -656,7 +682,8 @@  static void arm_cpu_set_irq(void *opaque, int irq, int level)
         [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
         [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
-        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
+        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
     };
 
     if (level) {
@@ -676,6 +703,7 @@  static void arm_cpu_set_irq(void *opaque, int irq, int level)
         break;
     case ARM_CPU_IRQ:
     case ARM_CPU_FIQ:
+    case ARM_CPU_SERROR:
         if (level) {
             cpu_interrupt(cs, mask[irq]);
         } else {
@@ -693,8 +721,10 @@  static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
     ARMCPU *cpu = opaque;
     CPUARMState *env = &cpu->env;
     CPUState *cs = CPU(cpu);
+    struct kvm_vcpu_events events;
     uint32_t linestate_bit;
     int irq_id;
+    bool inject_irq = true;
 
     switch (irq) {
     case ARM_CPU_IRQ:
@@ -705,6 +735,14 @@  static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
         irq_id = KVM_ARM_IRQ_CPU_FIQ;
         linestate_bit = CPU_INTERRUPT_FIQ;
         break;
+    case ARM_CPU_SERROR:
+        if (!kvm_has_vcpu_events()) {
+            return;
+        }
+
+        inject_irq = false;
+        linestate_bit = CPU_INTERRUPT_SERROR;
+        break;
     default:
         g_assert_not_reached();
     }
@@ -714,7 +752,14 @@  static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
     } else {
         env->irq_line_state &= ~linestate_bit;
     }
-    kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+
+    if (inject_irq) {
+        kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+    } else if (level) {
+        memset(&events, 0, sizeof(events));
+        events.exception.serror_pending = 1;
+        kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events);
+    }
 #endif
 }
 
@@ -1064,9 +1109,9 @@  static void arm_cpu_initfn(Object *obj)
         /* VIRQ and VFIQ are unused with KVM but we add them to maintain
          * the same interface as non-KVM CPUs.
          */
-        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
+        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, ARM_CPU_NUM_IRQ);
     } else {
-        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
+        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, ARM_CPU_NUM_IRQ);
     }
 
     qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index e943ffe8a9..23e9f7ee2d 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -49,6 +49,7 @@ 
 #define EXCP_LAZYFP         20   /* v7M fault during lazy FP stacking */
 #define EXCP_LSERR          21   /* v8M LSERR SecureFault */
 #define EXCP_UNALIGNED      22   /* v7M UNALIGNED UsageFault */
+#define EXCP_SERROR         23   /* SError Interrupt */
 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
 
 #define ARMV7M_EXCP_RESET   1
@@ -79,9 +80,10 @@  enum {
 };
 
 /* ARM-specific interrupt pending bits.  */
-#define CPU_INTERRUPT_FIQ   CPU_INTERRUPT_TGT_EXT_1
-#define CPU_INTERRUPT_VIRQ  CPU_INTERRUPT_TGT_EXT_2
-#define CPU_INTERRUPT_VFIQ  CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_FIQ     CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ    CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ    CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_SERROR  CPU_INTERRUPT_TGT_EXT_4
 
 /* The usual mapping for an AArch64 system register to its AArch32
  * counterpart is for the 32 bit world to have access to the lower
@@ -97,11 +99,13 @@  enum {
 #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
 #endif
 
-/* Meanings of the ARMCPU object's four inbound GPIO lines */
-#define ARM_CPU_IRQ 0
-#define ARM_CPU_FIQ 1
-#define ARM_CPU_VIRQ 2
-#define ARM_CPU_VFIQ 3
+/* ARMCPU object's inbound GPIO lines */
+#define ARM_CPU_IRQ     0
+#define ARM_CPU_FIQ     1
+#define ARM_CPU_VIRQ    2
+#define ARM_CPU_VFIQ    3
+#define ARM_CPU_SERROR  4
+#define ARM_CPU_NUM_IRQ 5
 
 /* ARM-specific extra insn start words:
  * 1: Conditional execution bits
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 366dbcf460..3f00af4c41 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1969,6 +1969,12 @@  static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
         }
     }
 
+    if (!allow_virt || !(hcr_el2 & HCR_AMO)) {
+        if (cs->interrupt_request & CPU_INTERRUPT_SERROR) {
+            ret |= CPSR_A;
+        }
+    }
+
     /* External aborts are not possible in QEMU so A bit is always clear */
     return ret;
 }
@@ -8598,6 +8604,7 @@  void arm_log_exception(int idx)
             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
             [EXCP_LSERR] = "v8M LSERR UsageFault",
             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
+            [EXCP_SERROR] = "SError Interrupt",
         };
 
         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -8923,6 +8930,7 @@  static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
         addr = 0x0c;
         break;
     case EXCP_DATA_ABORT:
+    case EXCP_SERROR:
         env->cp15.dfar_s = env->exception.vaddress;
         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
                       (uint32_t)env->exception.vaddress);
@@ -9051,6 +9059,7 @@  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
         offset = 4;
         break;
     case EXCP_DATA_ABORT:
+    case EXCP_SERROR:
         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
@@ -9213,6 +9222,9 @@  static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
     case EXCP_VFIQ:
         addr += 0x100;
         break;
+    case EXCP_SERROR:
+        addr += 0x180;
+        break;
     default:
         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
     }
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index 33d414a684..a7271cc386 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -2211,6 +2211,14 @@  void arm_v7m_cpu_do_interrupt(CPUState *cs)
          * v7m_preserve_fp_state() helper function.
          */
         break;
+    case EXCP_SERROR:
+        env->v7m.cfsr[M_REG_NS] |=
+            (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+        env->v7m.bfar = env->exception.vaddress;
+        qemu_log_mask(CPU_LOG_INT,
+                      "...with CFSR.PRECISERR and BFAR 0x%x\n",
+                      env->v7m.bfar);
+        break;
     default:
         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
         return; /* Never happens.  Keep compiler happy.  */
diff --git a/target/arm/machine.c b/target/arm/machine.c
index 241890ac8c..e2ad2f156e 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -714,7 +714,8 @@  static int cpu_post_load(void *opaque, int version_id)
 
         env->irq_line_state = cs->interrupt_request &
             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
-             CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
+             CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ |
+             CPU_INTERRUPT_SERROR);
     }
 
     /* Update the values list from the incoming migration data.