diff mbox series

[v3,1/2] target/arm: Support SError injection

Message ID 20200214055950.62477-2-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series hw/arm/virt: Simulate NMI Injection | expand

Commit Message

Gavin Shan Feb. 14, 2020, 5:59 a.m. UTC
This supports SError injection, which will be used by "virt" board to
simulating the behavior of NMI injection in next patch. As Peter Maydell
suggested, this adds a new interrupt (ARM_CPU_SERROR), which is parallel
to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
or data abort to guest. When TCG is enabled, the behavior is simulated
by injecting SError and data abort to guest.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 target/arm/cpu.c      | 69 +++++++++++++++++++++++++++++++++++--------
 target/arm/cpu.h      | 17 ++++++-----
 target/arm/helper.c   |  6 ++++
 target/arm/m_helper.c |  8 +++++
 4 files changed, 81 insertions(+), 19 deletions(-)

Comments

Richard Henderson Feb. 16, 2020, 3:41 a.m. UTC | #1
On 2/13/20 9:59 PM, Gavin Shan wrote:
> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> index b0762a76c4..180e29fb83 100644
> --- a/target/arm/cpu.c
> +++ b/target/arm/cpu.c
> @@ -78,7 +78,7 @@ static bool arm_cpu_has_work(CPUState *cs)
>          && cs->interrupt_request &
>          (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>           | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
> -         | CPU_INTERRUPT_EXITTB);
> +         | ARM_CPU_SERROR | CPU_INTERRUPT_EXITTB);

CPU_INTERRUPT_SERROR, not ARM_CPU_SERROR.

> @@ -570,6 +573,16 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
>              goto found;
>          }
>      }
> +
> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
> +        excp_idx = EXCP_SERROR;
> +        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
> +        if (arm_excp_unmasked(cs, excp_idx, target_el,
> +                              cur_el, secure, hcr_el2)) {
> +            goto found;
> +        }
> +    }
> +
>      return false;
>  
>   found:

If you're intending to use Serror for NMI, perhaps it should be the first bit
tested, not the last.  Otherwise some bug that leaves a normal hard interrupt
line high will keep delivering the interrupt, and not the Serror.

As the comment at the top of the function says, the priority is implementation
defined, so we can put it anywhere we like.

> @@ -594,13 +607,26 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
>       * (which depends on state like BASEPRI, FAULTMASK and the
>       * currently active exception).
>       */
> -    if (interrupt_request & CPU_INTERRUPT_HARD
> -        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
> -        cs->exception_index = EXCP_IRQ;
> -        cc->do_interrupt(cs);
> -        ret = true;
> +    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
> +        return false;
> +    }
> +
> +    if (interrupt_request & CPU_INTERRUPT_HARD) {
> +        excp_idx = EXCP_IRQ;
> +        goto found;
>      }
> -    return ret;
> +
> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
> +        excp_idx = EXCP_SERROR;
> +        goto found;
> +    }

Likewise.

> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 5);
>      } else {
> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 5);

I wonder if we should have an ARM_CPU_NUM_IRQ define so that this is more
automatic.

> @@ -98,10 +100,11 @@ enum {
>  #endif
>  
>  /* Meanings of the ARMCPU object's four inbound GPIO lines */
> -#define ARM_CPU_IRQ 0
> -#define ARM_CPU_FIQ 1
> -#define ARM_CPU_VIRQ 2
> -#define ARM_CPU_VFIQ 3
> +#define ARM_CPU_IRQ    0
> +#define ARM_CPU_FIQ    1
> +#define ARM_CPU_VIRQ   2
> +#define ARM_CPU_VFIQ   3
> +#define ARM_CPU_SERROR 4

Comment is now wrong about the count.


r~
Marc Zyngier Feb. 16, 2020, 12:34 p.m. UTC | #2
Hi Gavin,

On 2020-02-14 05:59, Gavin Shan wrote:
> This supports SError injection, which will be used by "virt" board to
> simulating the behavior of NMI injection in next patch. As Peter 
> Maydell
> suggested, this adds a new interrupt (ARM_CPU_SERROR), which is 
> parallel
> to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
> kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
> or data abort to guest. When TCG is enabled, the behavior is simulated
> by injecting SError and data abort to guest.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>  target/arm/cpu.c      | 69 +++++++++++++++++++++++++++++++++++--------
>  target/arm/cpu.h      | 17 ++++++-----
>  target/arm/helper.c   |  6 ++++
>  target/arm/m_helper.c |  8 +++++
>  4 files changed, 81 insertions(+), 19 deletions(-)
> 

[...]

> @@ -656,7 +682,8 @@ static void arm_cpu_set_irq(void *opaque, int irq,
> int level)
>          [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
>          [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
>          [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
> -        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
> +        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
> +        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,

I'm a bit concerned with this. It makes sense for a host, but doesn't
allow the SError signal to be virtualised (there should be a VSError
signal in this list that can be injected via HCR_EL2.VA, just like
VIRQ is injected by HCR_EL2.VI).

Given that people use QEMU as a development platform for hypervisors,
I'd really like this functionality to be supported from day-1.

There is also the whole RAS stuff which quite a lot of work, but let's
start at least with the full ARMv8.0 semantics.

Thanks,

         M.
Gavin Shan Feb. 16, 2020, 11:42 p.m. UTC | #3
On 2/16/20 2:41 PM, Richard Henderson wrote:
> On 2/13/20 9:59 PM, Gavin Shan wrote:
>> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
>> index b0762a76c4..180e29fb83 100644
>> --- a/target/arm/cpu.c
>> +++ b/target/arm/cpu.c
>> @@ -78,7 +78,7 @@ static bool arm_cpu_has_work(CPUState *cs)
>>           && cs->interrupt_request &
>>           (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>>            | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
>> -         | CPU_INTERRUPT_EXITTB);
>> +         | ARM_CPU_SERROR | CPU_INTERRUPT_EXITTB);
> 
> CPU_INTERRUPT_SERROR, not ARM_CPU_SERROR.
> 

Yep, will be corrected in v4.

>> @@ -570,6 +573,16 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
>>               goto found;
>>           }
>>       }
>> +
>> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
>> +        excp_idx = EXCP_SERROR;
>> +        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
>> +        if (arm_excp_unmasked(cs, excp_idx, target_el,
>> +                              cur_el, secure, hcr_el2)) {
>> +            goto found;
>> +        }
>> +    }
>> +
>>       return false;
>>   
>>    found:
> 
> If you're intending to use Serror for NMI, perhaps it should be the first bit
> tested, not the last.  Otherwise some bug that leaves a normal hard interrupt
> line high will keep delivering the interrupt, and not the Serror.
> 
> As the comment at the top of the function says, the priority is implementation
> defined, so we can put it anywhere we like.
> 

Yes, SError will have highest priority in v4.

>> @@ -594,13 +607,26 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
>>        * (which depends on state like BASEPRI, FAULTMASK and the
>>        * currently active exception).
>>        */
>> -    if (interrupt_request & CPU_INTERRUPT_HARD
>> -        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
>> -        cs->exception_index = EXCP_IRQ;
>> -        cc->do_interrupt(cs);
>> -        ret = true;
>> +    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
>> +        return false;
>> +    }
>> +
>> +    if (interrupt_request & CPU_INTERRUPT_HARD) {
>> +        excp_idx = EXCP_IRQ;
>> +        goto found;
>>       }
>> -    return ret;
>> +
>> +    if (interrupt_request & CPU_INTERRUPT_SERROR) {
>> +        excp_idx = EXCP_SERROR;
>> +        goto found;
>> +    }
> 
> Likewise.
> 

Thanks, SError will have highest priority in v4.

>> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
>> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 5);
>>       } else {
>> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
>> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 5);
> 
> I wonder if we should have an ARM_CPU_NUM_IRQ define so that this is more
> automatic.
> 

Yes, It makes sense. ARM_CPU_NUM_IRQ will be introduced in v4.

>> @@ -98,10 +100,11 @@ enum {
>>   #endif
>>   
>>   /* Meanings of the ARMCPU object's four inbound GPIO lines */
>> -#define ARM_CPU_IRQ 0
>> -#define ARM_CPU_FIQ 1
>> -#define ARM_CPU_VIRQ 2
>> -#define ARM_CPU_VFIQ 3
>> +#define ARM_CPU_IRQ    0
>> +#define ARM_CPU_FIQ    1
>> +#define ARM_CPU_VIRQ   2
>> +#define ARM_CPU_VFIQ   3
>> +#define ARM_CPU_SERROR 4
> 
> Comment is now wrong about the count.
> 

Yes, It will be corrected to "ARMCPU object's inbound GPIO lines" in v4.

Thanks,
Gavin
Gavin Shan Feb. 17, 2020, 2:59 a.m. UTC | #4
Hi Marc,

On 2/16/20 11:34 PM, Marc Zyngier wrote:
> On 2020-02-14 05:59, Gavin Shan wrote:
>> This supports SError injection, which will be used by "virt" board to
>> simulating the behavior of NMI injection in next patch. As Peter Maydell
>> suggested, this adds a new interrupt (ARM_CPU_SERROR), which is parallel
>> to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
>> kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
>> or data abort to guest. When TCG is enabled, the behavior is simulated
>> by injecting SError and data abort to guest.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>> ---
>>  target/arm/cpu.c      | 69 +++++++++++++++++++++++++++++++++++--------
>>  target/arm/cpu.h      | 17 ++++++-----
>>  target/arm/helper.c   |  6 ++++
>>  target/arm/m_helper.c |  8 +++++
>>  4 files changed, 81 insertions(+), 19 deletions(-)
>>
> 
> [...]
> 
>> @@ -656,7 +682,8 @@ static void arm_cpu_set_irq(void *opaque, int irq,
>> int level)
>>          [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
>>          [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
>>          [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
>> -        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
>> +        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
>> +        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
> 
> I'm a bit concerned with this. It makes sense for a host, but doesn't
> allow the SError signal to be virtualised (there should be a VSError
> signal in this list that can be injected via HCR_EL2.VA, just like
> VIRQ is injected by HCR_EL2.VI).
> 
> Given that people use QEMU as a development platform for hypervisors,
> I'd really like this functionality to be supported from day-1.
> 
> There is also the whole RAS stuff which quite a lot of work, but let's
> start at least with the full ARMv8.0 semantics.
> 

Thanks for the comments. Yes, I think it's reasonable to support virtual
SError as well. Lets have a separate patch to support it in v4. I think
you were talking about HCR_EL2.VSE, which is defined as below in target/arm/cpu.h:

#define HCR_VSE       (1ULL << 8)

Thanks,
Gavin
diff mbox series

Patch

diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index b0762a76c4..180e29fb83 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -78,7 +78,7 @@  static bool arm_cpu_has_work(CPUState *cs)
         && cs->interrupt_request &
         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
-         | CPU_INTERRUPT_EXITTB);
+         | ARM_CPU_SERROR | CPU_INTERRUPT_EXITTB);
 }
 
 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
@@ -449,6 +449,9 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
             return false;
         }
         return !(env->daif & PSTATE_I);
+    case EXCP_SERROR:
+       pstate_unmasked = !(env->daif & PSTATE_A);
+       break;
     default:
         g_assert_not_reached();
     }
@@ -570,6 +573,16 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
             goto found;
         }
     }
+
+    if (interrupt_request & CPU_INTERRUPT_SERROR) {
+        excp_idx = EXCP_SERROR;
+        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+        if (arm_excp_unmasked(cs, excp_idx, target_el,
+                              cur_el, secure, hcr_el2)) {
+            goto found;
+        }
+    }
+
     return false;
 
  found:
@@ -585,7 +598,7 @@  static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
     CPUClass *cc = CPU_GET_CLASS(cs);
     ARMCPU *cpu = ARM_CPU(cs);
     CPUARMState *env = &cpu->env;
-    bool ret = false;
+    uint32_t excp_idx;
 
     /* ARMv7-M interrupt masking works differently than -A or -R.
      * There is no FIQ/IRQ distinction. Instead of I and F bits
@@ -594,13 +607,26 @@  static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
      * (which depends on state like BASEPRI, FAULTMASK and the
      * currently active exception).
      */
-    if (interrupt_request & CPU_INTERRUPT_HARD
-        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
-        cs->exception_index = EXCP_IRQ;
-        cc->do_interrupt(cs);
-        ret = true;
+    if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
+        return false;
+    }
+
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        excp_idx = EXCP_IRQ;
+        goto found;
     }
-    return ret;
+
+    if (interrupt_request & CPU_INTERRUPT_SERROR) {
+        excp_idx = EXCP_SERROR;
+        goto found;
+    }
+
+    return false;
+
+found:
+    cs->exception_index = excp_idx;
+    cc->do_interrupt(cs);
+    return true;
 }
 #endif
 
@@ -656,7 +682,8 @@  static void arm_cpu_set_irq(void *opaque, int irq, int level)
         [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
         [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
-        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
+        [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
     };
 
     if (level) {
@@ -676,6 +703,7 @@  static void arm_cpu_set_irq(void *opaque, int irq, int level)
         break;
     case ARM_CPU_IRQ:
     case ARM_CPU_FIQ:
+    case ARM_CPU_SERROR:
         if (level) {
             cpu_interrupt(cs, mask[irq]);
         } else {
@@ -693,8 +721,10 @@  static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
     ARMCPU *cpu = opaque;
     CPUARMState *env = &cpu->env;
     CPUState *cs = CPU(cpu);
+    struct kvm_vcpu_events events;
     uint32_t linestate_bit;
     int irq_id;
+    bool inject_irq = true;
 
     switch (irq) {
     case ARM_CPU_IRQ:
@@ -705,6 +735,14 @@  static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
         irq_id = KVM_ARM_IRQ_CPU_FIQ;
         linestate_bit = CPU_INTERRUPT_FIQ;
         break;
+    case ARM_CPU_SERROR:
+        if (!kvm_has_vcpu_events()) {
+            return;
+        }
+
+        inject_irq = false;
+        linestate_bit = CPU_INTERRUPT_SERROR;
+        break;
     default:
         g_assert_not_reached();
     }
@@ -714,7 +752,14 @@  static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
     } else {
         env->irq_line_state &= ~linestate_bit;
     }
-    kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+
+    if (inject_irq) {
+        kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+    } else if (level) {
+        memset(&events, 0, sizeof(events));
+        events.exception.serror_pending = 1;
+        kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events);
+    }
 #endif
 }
 
@@ -1064,9 +1109,9 @@  static void arm_cpu_initfn(Object *obj)
         /* VIRQ and VFIQ are unused with KVM but we add them to maintain
          * the same interface as non-KVM CPUs.
          */
-        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
+        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 5);
     } else {
-        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
+        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 5);
     }
 
     qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 0b3036c484..9a82378d6d 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -49,6 +49,7 @@ 
 #define EXCP_LAZYFP         20   /* v7M fault during lazy FP stacking */
 #define EXCP_LSERR          21   /* v8M LSERR SecureFault */
 #define EXCP_UNALIGNED      22   /* v7M UNALIGNED UsageFault */
+#define EXCP_SERROR         23   /* SError Interrupt */
 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
 
 #define ARMV7M_EXCP_RESET   1
@@ -79,9 +80,10 @@  enum {
 };
 
 /* ARM-specific interrupt pending bits.  */
-#define CPU_INTERRUPT_FIQ   CPU_INTERRUPT_TGT_EXT_1
-#define CPU_INTERRUPT_VIRQ  CPU_INTERRUPT_TGT_EXT_2
-#define CPU_INTERRUPT_VFIQ  CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_FIQ    CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ   CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ   CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_SERROR CPU_INTERRUPT_TGT_EXT_4
 
 /* The usual mapping for an AArch64 system register to its AArch32
  * counterpart is for the 32 bit world to have access to the lower
@@ -98,10 +100,11 @@  enum {
 #endif
 
 /* Meanings of the ARMCPU object's four inbound GPIO lines */
-#define ARM_CPU_IRQ 0
-#define ARM_CPU_FIQ 1
-#define ARM_CPU_VIRQ 2
-#define ARM_CPU_VFIQ 3
+#define ARM_CPU_IRQ    0
+#define ARM_CPU_FIQ    1
+#define ARM_CPU_VIRQ   2
+#define ARM_CPU_VFIQ   3
+#define ARM_CPU_SERROR 4
 
 /* ARM-specific extra insn start words:
  * 1: Conditional execution bits
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7d15d5c933..0a3b7e5be2 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -8487,6 +8487,7 @@  void arm_log_exception(int idx)
             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
             [EXCP_LSERR] = "v8M LSERR UsageFault",
             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
+            [EXCP_SERROR] = "SError Interrupt",
         };
 
         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -8789,6 +8790,7 @@  static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
         addr = 0x0c;
         break;
     case EXCP_DATA_ABORT:
+    case EXCP_SERROR:
         env->cp15.dfar_s = env->exception.vaddress;
         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
                       (uint32_t)env->exception.vaddress);
@@ -8917,6 +8919,7 @@  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
         offset = 4;
         break;
     case EXCP_DATA_ABORT:
+    case EXCP_SERROR:
         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
@@ -9078,6 +9081,9 @@  static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
     case EXCP_VFIQ:
         addr += 0x100;
         break;
+    case EXCP_SERROR:
+        addr += 0x180;
+        break;
     default:
         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
     }
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index 33d414a684..a7271cc386 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -2211,6 +2211,14 @@  void arm_v7m_cpu_do_interrupt(CPUState *cs)
          * v7m_preserve_fp_state() helper function.
          */
         break;
+    case EXCP_SERROR:
+        env->v7m.cfsr[M_REG_NS] |=
+            (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+        env->v7m.bfar = env->exception.vaddress;
+        qemu_log_mask(CPU_LOG_INT,
+                      "...with CFSR.PRECISERR and BFAR 0x%x\n",
+                      env->v7m.bfar);
+        break;
     default:
         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
         return; /* Never happens.  Keep compiler happy.  */