diff mbox series

[v3,1/4] LoongArch: KVM: Add HW Binary Translation extension support

Message ID 20240527074644.836699-2-maobibo@loongson.cn (mailing list archive)
State New, archived
Headers show
Series LoongArch: KVM: Add Binary Translation extension support | expand

Commit Message

Bibo Mao May 27, 2024, 7:46 a.m. UTC
Loongson Binary Translation (LBT) is used to accelerate binary translation,
which contains 4 scratch registers (scr0 to scr3), x86/ARM eflags (eflags)
and x87 fpu stack pointer (ftop).

Like FPU extension, here late enabling method is used for LBT. LBT context
is saved/restored on vcpu context switch path.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/loongarch/include/asm/kvm_host.h |  8 ++++
 arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++
 arch/loongarch/kvm/exit.c             |  9 ++++
 arch/loongarch/kvm/vcpu.c             | 59 ++++++++++++++++++++++++++-
 4 files changed, 85 insertions(+), 1 deletion(-)

Comments

Huacai Chen June 23, 2024, 10:11 a.m. UTC | #1
Hi, Bibo,

On Mon, May 27, 2024 at 3:46 PM Bibo Mao <maobibo@loongson.cn> wrote:
>
> Loongson Binary Translation (LBT) is used to accelerate binary translation,
> which contains 4 scratch registers (scr0 to scr3), x86/ARM eflags (eflags)
> and x87 fpu stack pointer (ftop).
>
> Like FPU extension, here late enabling method is used for LBT. LBT context
> is saved/restored on vcpu context switch path.
>
> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> ---
>  arch/loongarch/include/asm/kvm_host.h |  8 ++++
>  arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++
>  arch/loongarch/kvm/exit.c             |  9 ++++
>  arch/loongarch/kvm/vcpu.c             | 59 ++++++++++++++++++++++++++-
>  4 files changed, 85 insertions(+), 1 deletion(-)
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> index 2eb2f7572023..88023ab59486 100644
> --- a/arch/loongarch/include/asm/kvm_host.h
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -133,6 +133,7 @@ enum emulation_result {
>  #define KVM_LARCH_LASX         (0x1 << 2)
>  #define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
>  #define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
> +#define KVM_LARCH_LBT          (0x1 << 5)
>
>  struct kvm_vcpu_arch {
>         /*
> @@ -166,6 +167,7 @@ struct kvm_vcpu_arch {
>
>         /* FPU state */
>         struct loongarch_fpu fpu FPU_ALIGN;
> +       struct loongarch_lbt lbt;
>
>         /* CSR state */
>         struct loongarch_csrs *csr;
> @@ -235,6 +237,12 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
>         return arch->cpucfg[2] & CPUCFG2_LASX;
>  }
>
> +static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
> +{
> +       return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT
> +                                       | CPUCFG2_MIPSBT);
> +}
> +
>  /* Debug: dump vcpu state */
>  int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>
> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> index d7e51300a89f..ec46009be29b 100644
> --- a/arch/loongarch/include/asm/kvm_vcpu.h
> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> @@ -75,6 +75,16 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
>  static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
>  #endif
>
> +#ifdef CONFIG_CPU_HAS_LBT
> +int kvm_own_lbt(struct kvm_vcpu *vcpu);
> +#else
> +static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
> +static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
> +static inline void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu,
> +                                       unsigned long fcsr) { }
> +static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu) { }
> +#endif
It is better to keep symmetry here. That means also declare
kvm_lose_lbt for CONFIG_CPU_HAS_LBT, and move the last two functions
to .c because they are static.

> +
>  void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>  void kvm_reset_timer(struct kvm_vcpu *vcpu);
>  void kvm_save_timer(struct kvm_vcpu *vcpu);
> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
> index e2abd97fb13f..e1bd81d27fd8 100644
> --- a/arch/loongarch/kvm/exit.c
> +++ b/arch/loongarch/kvm/exit.c
> @@ -835,6 +835,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
>         return ret;
>  }
>
> +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
> +{
> +       if (kvm_own_lbt(vcpu))
> +               kvm_queue_exception(vcpu, EXCCODE_INE, 0);
> +
> +       return RESUME_GUEST;
> +}
> +
>  /*
>   * LoongArch KVM callback handling for unimplemented guest exiting
>   */
> @@ -867,6 +875,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
>         [EXCCODE_LASXDIS]               = kvm_handle_lasx_disabled,
>         [EXCCODE_GSPR]                  = kvm_handle_gspr,
>         [EXCCODE_HVC]                   = kvm_handle_hypercall,
> +       [EXCCODE_BTDIS]                 = kvm_handle_lbt_disabled,
>  };
>
>  int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 382796f1d3e6..8f80d1a2dcbb 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -6,6 +6,7 @@
>  #include <linux/kvm_host.h>
>  #include <linux/entry-kvm.h>
>  #include <asm/fpu.h>
> +#include <asm/lbt.h>
>  #include <asm/loongarch.h>
>  #include <asm/setup.h>
>  #include <asm/time.h>
> @@ -952,12 +953,64 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
>         return 0;
>  }
>
> +#ifdef CONFIG_CPU_HAS_LBT
> +int kvm_own_lbt(struct kvm_vcpu *vcpu)
> +{
> +       if (!kvm_guest_has_lbt(&vcpu->arch))
> +               return -EINVAL;
> +
> +       preempt_disable();
> +       set_csr_euen(CSR_EUEN_LBTEN);
> +
> +       _restore_lbt(&vcpu->arch.lbt);
> +       vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
> +       preempt_enable();
> +       return 0;
> +}
> +
> +static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
> +{
> +       preempt_disable();
> +       if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
> +               _save_lbt(&vcpu->arch.lbt);
> +               clear_csr_euen(CSR_EUEN_LBTEN);
> +               vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
> +       }
> +       preempt_enable();
> +}
> +
> +static void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)
It is better to rename it to kvm_own_lbt_tm().

> +{
> +       /*
> +        * if TM is enabled, top register save/restore will
> +        * cause lbt exception, here enable lbt in advance
> +        */
> +       if (fcsr & FPU_CSR_TM)
> +               kvm_own_lbt(vcpu);
> +}
> +
> +static void kvm_check_fcsr(struct kvm_vcpu *vcpu)
> +{
> +       unsigned long fcsr;
> +
> +       if (vcpu->arch.aux_inuse & KVM_LARCH_FPU)
> +               if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
The condition can be simplified " if (vcpu->arch.aux_inuse &
(KVM_LARCH_FPU|KVM_LARCH_LBT) == KVM_LARCH_FPU)"

> +                       fcsr = read_fcsr(LOONGARCH_FCSR0);
> +                       kvm_enable_lbt_fpu(vcpu, fcsr);
> +               }
> +}
> +#endif
> +
>  /* Enable FPU and restore context */
>  void kvm_own_fpu(struct kvm_vcpu *vcpu)
>  {
>         preempt_disable();
>
> -       /* Enable FPU */
> +       /*
> +        * Enable FPU for guest
> +        * We set FR and FRE according to guest context
> +        */
> +       kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
>         set_csr_euen(CSR_EUEN_FPEN);
>
>         kvm_restore_fpu(&vcpu->arch.fpu);
> @@ -977,6 +1030,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
>         preempt_disable();
>
>         /* Enable LSX for guest */
> +       kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
>         set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
>         switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>         case KVM_LARCH_FPU:
> @@ -1011,6 +1065,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
>
>         preempt_disable();
>
> +       kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
>         set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
>         switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
>         case KVM_LARCH_LSX:
> @@ -1042,6 +1097,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
>  {
>         preempt_disable();
>
> +       kvm_check_fcsr(vcpu);
>         if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
>                 kvm_save_lasx(&vcpu->arch.fpu);
>                 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
> @@ -1064,6 +1120,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
>                 /* Disable FPU */
>                 clear_csr_euen(CSR_EUEN_FPEN);
>         }
> +       kvm_lose_lbt(vcpu);
>
>         preempt_enable();
>  }
> --
> 2.39.3
>
Bibo Mao June 24, 2024, 1:51 a.m. UTC | #2
On 2024/6/23 下午6:11, Huacai Chen wrote:
> Hi, Bibo,
> 
> On Mon, May 27, 2024 at 3:46 PM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>> Loongson Binary Translation (LBT) is used to accelerate binary translation,
>> which contains 4 scratch registers (scr0 to scr3), x86/ARM eflags (eflags)
>> and x87 fpu stack pointer (ftop).
>>
>> Like FPU extension, here late enabling method is used for LBT. LBT context
>> is saved/restored on vcpu context switch path.
>>
>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>> ---
>>   arch/loongarch/include/asm/kvm_host.h |  8 ++++
>>   arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++
>>   arch/loongarch/kvm/exit.c             |  9 ++++
>>   arch/loongarch/kvm/vcpu.c             | 59 ++++++++++++++++++++++++++-
>>   4 files changed, 85 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
>> index 2eb2f7572023..88023ab59486 100644
>> --- a/arch/loongarch/include/asm/kvm_host.h
>> +++ b/arch/loongarch/include/asm/kvm_host.h
>> @@ -133,6 +133,7 @@ enum emulation_result {
>>   #define KVM_LARCH_LASX         (0x1 << 2)
>>   #define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
>>   #define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
>> +#define KVM_LARCH_LBT          (0x1 << 5)
>>
>>   struct kvm_vcpu_arch {
>>          /*
>> @@ -166,6 +167,7 @@ struct kvm_vcpu_arch {
>>
>>          /* FPU state */
>>          struct loongarch_fpu fpu FPU_ALIGN;
>> +       struct loongarch_lbt lbt;
>>
>>          /* CSR state */
>>          struct loongarch_csrs *csr;
>> @@ -235,6 +237,12 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
>>          return arch->cpucfg[2] & CPUCFG2_LASX;
>>   }
>>
>> +static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
>> +{
>> +       return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT
>> +                                       | CPUCFG2_MIPSBT);
>> +}
>> +
>>   /* Debug: dump vcpu state */
>>   int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>
>> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
>> index d7e51300a89f..ec46009be29b 100644
>> --- a/arch/loongarch/include/asm/kvm_vcpu.h
>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>> @@ -75,6 +75,16 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
>>   static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
>>   #endif
>>
>> +#ifdef CONFIG_CPU_HAS_LBT
>> +int kvm_own_lbt(struct kvm_vcpu *vcpu);
>> +#else
>> +static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
>> +static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
>> +static inline void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu,
>> +                                       unsigned long fcsr) { }
>> +static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu) { }
>> +#endif
> It is better to keep symmetry here. That means also declare
> kvm_lose_lbt for CONFIG_CPU_HAS_LBT, and move the last two functions
> to .c because they are static.
Sure, will do in this way in next version.

Regards
Bibo Mao
> 
>> +
>>   void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>   void kvm_reset_timer(struct kvm_vcpu *vcpu);
>>   void kvm_save_timer(struct kvm_vcpu *vcpu);
>> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
>> index e2abd97fb13f..e1bd81d27fd8 100644
>> --- a/arch/loongarch/kvm/exit.c
>> +++ b/arch/loongarch/kvm/exit.c
>> @@ -835,6 +835,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
>>          return ret;
>>   }
>>
>> +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
>> +{
>> +       if (kvm_own_lbt(vcpu))
>> +               kvm_queue_exception(vcpu, EXCCODE_INE, 0);
>> +
>> +       return RESUME_GUEST;
>> +}
>> +
>>   /*
>>    * LoongArch KVM callback handling for unimplemented guest exiting
>>    */
>> @@ -867,6 +875,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
>>          [EXCCODE_LASXDIS]               = kvm_handle_lasx_disabled,
>>          [EXCCODE_GSPR]                  = kvm_handle_gspr,
>>          [EXCCODE_HVC]                   = kvm_handle_hypercall,
>> +       [EXCCODE_BTDIS]                 = kvm_handle_lbt_disabled,
>>   };
>>
>>   int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
>> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
>> index 382796f1d3e6..8f80d1a2dcbb 100644
>> --- a/arch/loongarch/kvm/vcpu.c
>> +++ b/arch/loongarch/kvm/vcpu.c
>> @@ -6,6 +6,7 @@
>>   #include <linux/kvm_host.h>
>>   #include <linux/entry-kvm.h>
>>   #include <asm/fpu.h>
>> +#include <asm/lbt.h>
>>   #include <asm/loongarch.h>
>>   #include <asm/setup.h>
>>   #include <asm/time.h>
>> @@ -952,12 +953,64 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
>>          return 0;
>>   }
>>
>> +#ifdef CONFIG_CPU_HAS_LBT
>> +int kvm_own_lbt(struct kvm_vcpu *vcpu)
>> +{
>> +       if (!kvm_guest_has_lbt(&vcpu->arch))
>> +               return -EINVAL;
>> +
>> +       preempt_disable();
>> +       set_csr_euen(CSR_EUEN_LBTEN);
>> +
>> +       _restore_lbt(&vcpu->arch.lbt);
>> +       vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
>> +       preempt_enable();
>> +       return 0;
>> +}
>> +
>> +static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
>> +{
>> +       preempt_disable();
>> +       if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
>> +               _save_lbt(&vcpu->arch.lbt);
>> +               clear_csr_euen(CSR_EUEN_LBTEN);
>> +               vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
>> +       }
>> +       preempt_enable();
>> +}
>> +
>> +static void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)
> It is better to rename it to kvm_own_lbt_tm().
> 
>> +{
>> +       /*
>> +        * if TM is enabled, top register save/restore will
>> +        * cause lbt exception, here enable lbt in advance
>> +        */
>> +       if (fcsr & FPU_CSR_TM)
>> +               kvm_own_lbt(vcpu);
>> +}
>> +
>> +static void kvm_check_fcsr(struct kvm_vcpu *vcpu)
>> +{
>> +       unsigned long fcsr;
>> +
>> +       if (vcpu->arch.aux_inuse & KVM_LARCH_FPU)
>> +               if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
> The condition can be simplified " if (vcpu->arch.aux_inuse &
> (KVM_LARCH_FPU|KVM_LARCH_LBT) == KVM_LARCH_FPU)"
> 
>> +                       fcsr = read_fcsr(LOONGARCH_FCSR0);
>> +                       kvm_enable_lbt_fpu(vcpu, fcsr);
>> +               }
>> +}
>> +#endif
>> +
>>   /* Enable FPU and restore context */
>>   void kvm_own_fpu(struct kvm_vcpu *vcpu)
>>   {
>>          preempt_disable();
>>
>> -       /* Enable FPU */
>> +       /*
>> +        * Enable FPU for guest
>> +        * We set FR and FRE according to guest context
>> +        */
>> +       kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
>>          set_csr_euen(CSR_EUEN_FPEN);
>>
>>          kvm_restore_fpu(&vcpu->arch.fpu);
>> @@ -977,6 +1030,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
>>          preempt_disable();
>>
>>          /* Enable LSX for guest */
>> +       kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
>>          set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
>>          switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>>          case KVM_LARCH_FPU:
>> @@ -1011,6 +1065,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
>>
>>          preempt_disable();
>>
>> +       kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
>>          set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
>>          switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
>>          case KVM_LARCH_LSX:
>> @@ -1042,6 +1097,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
>>   {
>>          preempt_disable();
>>
>> +       kvm_check_fcsr(vcpu);
>>          if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
>>                  kvm_save_lasx(&vcpu->arch.fpu);
>>                  vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
>> @@ -1064,6 +1120,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
>>                  /* Disable FPU */
>>                  clear_csr_euen(CSR_EUEN_FPEN);
>>          }
>> +       kvm_lose_lbt(vcpu);
>>
>>          preempt_enable();
>>   }
>> --
>> 2.39.3
>>
diff mbox series

Patch

diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 2eb2f7572023..88023ab59486 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -133,6 +133,7 @@  enum emulation_result {
 #define KVM_LARCH_LASX		(0x1 << 2)
 #define KVM_LARCH_SWCSR_LATEST	(0x1 << 3)
 #define KVM_LARCH_HWCSR_USABLE	(0x1 << 4)
+#define KVM_LARCH_LBT		(0x1 << 5)
 
 struct kvm_vcpu_arch {
 	/*
@@ -166,6 +167,7 @@  struct kvm_vcpu_arch {
 
 	/* FPU state */
 	struct loongarch_fpu fpu FPU_ALIGN;
+	struct loongarch_lbt lbt;
 
 	/* CSR state */
 	struct loongarch_csrs *csr;
@@ -235,6 +237,12 @@  static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
 	return arch->cpucfg[2] & CPUCFG2_LASX;
 }
 
+static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
+{
+	return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT
+					| CPUCFG2_MIPSBT);
+}
+
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index d7e51300a89f..ec46009be29b 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -75,6 +75,16 @@  static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
 static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
 #endif
 
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
+static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
+static inline void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu,
+					unsigned long fcsr) { }
+static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu) { }
+#endif
+
 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
 void kvm_reset_timer(struct kvm_vcpu *vcpu);
 void kvm_save_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index e2abd97fb13f..e1bd81d27fd8 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -835,6 +835,14 @@  static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
+{
+	if (kvm_own_lbt(vcpu))
+		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+	return RESUME_GUEST;
+}
+
 /*
  * LoongArch KVM callback handling for unimplemented guest exiting
  */
@@ -867,6 +875,7 @@  static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
 	[EXCCODE_LASXDIS]		= kvm_handle_lasx_disabled,
 	[EXCCODE_GSPR]			= kvm_handle_gspr,
 	[EXCCODE_HVC]			= kvm_handle_hypercall,
+	[EXCCODE_BTDIS]			= kvm_handle_lbt_disabled,
 };
 
 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 382796f1d3e6..8f80d1a2dcbb 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -6,6 +6,7 @@ 
 #include <linux/kvm_host.h>
 #include <linux/entry-kvm.h>
 #include <asm/fpu.h>
+#include <asm/lbt.h>
 #include <asm/loongarch.h>
 #include <asm/setup.h>
 #include <asm/time.h>
@@ -952,12 +953,64 @@  int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 	return 0;
 }
 
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu)
+{
+	if (!kvm_guest_has_lbt(&vcpu->arch))
+		return -EINVAL;
+
+	preempt_disable();
+	set_csr_euen(CSR_EUEN_LBTEN);
+
+	_restore_lbt(&vcpu->arch.lbt);
+	vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+	preempt_enable();
+	return 0;
+}
+
+static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
+{
+	preempt_disable();
+	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
+		_save_lbt(&vcpu->arch.lbt);
+		clear_csr_euen(CSR_EUEN_LBTEN);
+		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
+	}
+	preempt_enable();
+}
+
+static void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)
+{
+	/*
+	 * if TM is enabled, top register save/restore will
+	 * cause lbt exception, here enable lbt in advance
+	 */
+	if (fcsr & FPU_CSR_TM)
+		kvm_own_lbt(vcpu);
+}
+
+static void kvm_check_fcsr(struct kvm_vcpu *vcpu)
+{
+	unsigned long fcsr;
+
+	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU)
+		if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
+			fcsr = read_fcsr(LOONGARCH_FCSR0);
+			kvm_enable_lbt_fpu(vcpu, fcsr);
+		}
+}
+#endif
+
 /* Enable FPU and restore context */
 void kvm_own_fpu(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
 
-	/* Enable FPU */
+	/*
+	 * Enable FPU for guest
+	 * We set FR and FRE according to guest context
+	 */
+	kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
 	set_csr_euen(CSR_EUEN_FPEN);
 
 	kvm_restore_fpu(&vcpu->arch.fpu);
@@ -977,6 +1030,7 @@  int kvm_own_lsx(struct kvm_vcpu *vcpu)
 	preempt_disable();
 
 	/* Enable LSX for guest */
+	kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
 	case KVM_LARCH_FPU:
@@ -1011,6 +1065,7 @@  int kvm_own_lasx(struct kvm_vcpu *vcpu)
 
 	preempt_disable();
 
+	kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
 	case KVM_LARCH_LSX:
@@ -1042,6 +1097,7 @@  void kvm_lose_fpu(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
 
+	kvm_check_fcsr(vcpu);
 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
 		kvm_save_lasx(&vcpu->arch.fpu);
 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
@@ -1064,6 +1120,7 @@  void kvm_lose_fpu(struct kvm_vcpu *vcpu)
 		/* Disable FPU */
 		clear_csr_euen(CSR_EUEN_FPEN);
 	}
+	kvm_lose_lbt(vcpu);
 
 	preempt_enable();
 }