@@ -5,6 +5,7 @@
#ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H
#define _ASM_POWERPC_GUEST_STATE_BUFFER_H
+#include "asm/hvcall.h"
#include <linux/gfp.h>
#include <linux/bitmap.h>
#include <asm/plpar_wrappers.h>
@@ -313,6 +314,8 @@ struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
unsigned long vcpu_id, gfp_t flags);
void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb);
void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size);
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags);
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags);
/**
* kvmppc_gsb_header() - the header of a guest state buffer
@@ -901,4 +904,92 @@ static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm)
kvmppc_gsbm_zero(&gsm->bitmap);
}
+/**
+ * kvmppc_gsb_receive_data - flexibly update values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Requests updated values for the guest state values included in the guest
+ * state message. The guest state message will then deserialize the guest state
+ * buffer.
+ */
+static inline int kvmppc_gsb_receive_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsb_recv(gsb, gsm->flags);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsm_refresh_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_recv - receive a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_receive_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_receive_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_send_data - flexibly send values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Sends the guest state values included in the guest state message.
+ */
+static inline int kvmppc_gsb_send_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ rc = kvmppc_gsb_send(gsb, gsm->flags);
+
+ return rc;
+}
+
+/**
+ * kvmppc_gsb_recv - send a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_send_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
#endif /* _ASM_POWERPC_GUEST_STATE_BUFFER_H */
@@ -100,6 +100,18 @@
#define H_COP_HW -74
#define H_STATE -75
#define H_IN_USE -77
+
+#define H_INVALID_ELEMENT_ID -79
+#define H_INVALID_ELEMENT_SIZE -80
+#define H_INVALID_ELEMENT_VALUE -81
+#define H_INPUT_BUFFER_NOT_DEFINED -82
+#define H_INPUT_BUFFER_TOO_SMALL -83
+#define H_OUTPUT_BUFFER_NOT_DEFINED -84
+#define H_OUTPUT_BUFFER_TOO_SMALL -85
+#define H_PARTITION_PAGE_TABLE_NOT_DEFINED -86
+#define H_GUEST_VCPU_STATE_NOT_HV_OWNED -87
+
+
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
@@ -381,6 +393,15 @@
#define H_ENTER_NESTED 0xF804
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_COPY_MEMORY 0x484
+#define H_GUEST_DELETE 0x488
/* Flags for H_SVM_PAGE_IN */
#define H_PAGE_IN_SHARED 0x1
@@ -467,6 +488,15 @@
#define H_RPTI_PAGE_1G 0x08
#define H_RPTI_PAGE_ALL (-1UL)
+/* Flags for H_GUEST_{S,G}_STATE */
+#define H_GUEST_FLAGS_WIDE (1UL<<(63-0))
+
+/* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */
+#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0))
+#define H_GUEST_CAP_POWER9 (1UL<<(63-1))
+#define H_GUEST_CAP_POWER10 (1UL<<(63-2))
+#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63))
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/guest-state-buffer.h>
struct kvmppc_bat {
u64 raw;
@@ -295,6 +296,7 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
+extern unsigned long nested_capabilities;
long kvmhv_nested_init(void);
void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
@@ -316,6 +318,69 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+extern struct static_key_false __kvmhv_is_nestedv2;
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return static_branch_unlikely(&__kvmhv_is_nestedv2);
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return !static_branch_likely(&__kvmhv_is_nestedv2);
+}
+
+#else
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+#endif
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_cached_reload(vcpu, iden);
+ return 0;
+}
+
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
@@ -335,60 +400,72 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.regs.gpr[num] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.regs.ccr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.xer = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.ctr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.link = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.nip = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
return vcpu->arch.regs.nip;
}
@@ -405,27 +482,32 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
}
static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
{
vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
}
static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
return vcpu->arch.fp.fpscr;
}
static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.fp.fpscr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
}
static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
return vcpu->arch.fp.fpr[i][j];
}
@@ -433,11 +515,13 @@ static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
u64 val)
{
vcpu->arch.fp.fpr[i][j] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
}
#ifdef CONFIG_ALTIVEC
static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
*v = vcpu->arch.vr.vr[i];
}
@@ -445,75 +529,86 @@ static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
vector128 *val)
{
vcpu->arch.vr.vr[i] = *val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
}
static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
return vcpu->arch.vr.vscr.u[3];
}
static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.vr.vscr.u[3] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
}
#endif
-#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size) \
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
\
vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
}
-#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
return vcpu->arch.reg; \
}
-#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size) \
- KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size) \
- KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
-KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32)
-KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64)
-KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64)
-KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64)
-KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64)
-KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64)
-KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
-#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size) \
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
vcpu->arch.vcore->reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
}
-#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
return vcpu->arch.vcore->reg; \
}
-#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size) \
- KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size) \
- KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
-KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64)
-KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64)
-KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32)
-KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64)
+KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
+KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
return vcpu->arch.dec_expires;
}
static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.dec_expires = val;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
}
/* Expiry time of vcpu DEC relative to host TB */
@@ -677,6 +677,12 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift);
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/hvcall.h>
#include <asm/mce.h>
+#include <asm/guest-state-buffer.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -509,6 +510,23 @@ union xive_tma_w01 {
__be64 w01;
};
+ /* Nestedv2 H_GUEST_RUN_VCPU configuration */
+struct kvmhv_nestedv2_config {
+ struct kvmppc_gs_buff_info vcpu_run_output_cfg;
+ struct kvmppc_gs_buff_info vcpu_run_input_cfg;
+ u64 vcpu_run_output_size;
+};
+
+ /* Nestedv2 L1<->L0 communication state */
+struct kvmhv_nestedv2_io {
+ struct kvmhv_nestedv2_config cfg;
+ struct kvmppc_gs_buff *vcpu_run_output;
+ struct kvmppc_gs_buff *vcpu_run_input;
+ struct kvmppc_gs_msg *vcpu_message;
+ struct kvmppc_gs_msg *vcore_message;
+ struct kvmppc_gs_bitmap valids;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -829,6 +847,8 @@ struct kvm_vcpu_arch {
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr;
+ /* For nested APIv2 guests*/
+ struct kvmhv_nestedv2_io nestedv2_io;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
@@ -615,6 +615,42 @@ static inline bool kvmhv_on_pseries(void)
{
return false;
}
+
+#endif
+
+#ifndef CONFIG_PPC_BOOK3S
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
#endif
#ifdef CONFIG_KVM_XICS
@@ -939,27 +975,32 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
mtspr(bookehv_spr, val); \
} \
-#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
+ if (iden) \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
if (kvmppc_shared_big_endian(vcpu)) \
return be##size##_to_cpu(vcpu->arch.shared->reg); \
else \
return le##size##_to_cpu(vcpu->arch.shared->reg); \
} \
-#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
if (kvmppc_shared_big_endian(vcpu)) \
vcpu->arch.shared->reg = cpu_to_be##size(val); \
else \
vcpu->arch.shared->reg = cpu_to_le##size(val); \
+ \
+ if (iden) \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
} \
-#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \
- KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \
- KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
@@ -967,39 +1008,40 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
#ifdef CONFIG_KVM_BOOKE_HV
-#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
#else
-#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \
- KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
#endif
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR)
-KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR)
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
if (kvmppc_shared_big_endian(vcpu))
vcpu->arch.shared->msr = cpu_to_be64(val);
else
vcpu->arch.shared->msr = cpu_to_le64(val);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
}
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32)
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32)
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64)
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64)
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64)
-KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0)
static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/delay.h>
#include <asm/hvcall.h>
#include <asm/paca.h>
@@ -343,6 +344,193 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
return rc;
}
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long token;
+ long rc;
+
+ token = -1UL;
+ do {
+ rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
+ if (rc == H_SUCCESS)
+ *guest_id = retbuf[0];
+
+ if (rc == H_BUSY) {
+ token = retbuf[0];
+ cond_resched();
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ token = retbuf[0];
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
+
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
+ if (rc == H_SUCCESS)
+ *trap = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
+
+ return rc;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
+ if (rc == H_SUCCESS)
+ *capabilities = retbuf[0];
+
+ return rc;
+}
+
/*
* Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
*
@@ -407,6 +595,62 @@ static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
return 0;
}
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ return 0;
+}
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
@@ -87,6 +87,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_p9_perf.o \
+ book3s_hv_nestedv2.o \
guest-state-buffer.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
@@ -393,7 +393,7 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
- unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
+ unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* We can (emulate) our own architecture version and anything older */
@@ -424,9 +424,11 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
break;
case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300;
+ cap = H_GUEST_CAP_POWER9;
break;
case PVR_ARCH_31:
guest_pcr_bit = PCR_ARCH_31;
+ cap = H_GUEST_CAP_POWER10;
break;
default:
return -EINVAL;
@@ -437,8 +439,14 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
if (guest_pcr_bit > host_pcr_bit)
return -EINVAL;
+ if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) {
+ if (!(cap & nested_capabilities))
+ return -EINVAL;
+ }
+
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR);
/*
* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
* Also set all reserved PCR bits
@@ -2187,6 +2195,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
}
vc->lpcr = new_lpcr;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
spin_unlock(&vc->lock);
}
@@ -2927,8 +2936,14 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.shared_big_endian = false;
#endif
#endif
- kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
+ if (kvmhv_is_nestedv2()) {
+ err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io);
+ if (err < 0)
+ return err;
+ }
+
+ kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
@@ -3084,6 +3099,8 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io);
}
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
@@ -4048,6 +4065,55 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}
+static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr, u64 *tb)
+{
+ struct kvmhv_nestedv2_io *io;
+ unsigned long msr, i;
+ int trap;
+ long rc;
+
+ io = &vcpu->arch.nestedv2_io;
+
+ msr = mfmsr();
+ kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending())
+ return 0;
+
+ rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit);
+ if (rc < 0)
+ return -EINVAL;
+
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+ rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
+ &trap, &i);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM Guest Run VCPU hcall failed\n");
+ if (rc == H_INVALID_ELEMENT_ID)
+ pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i);
+ return -EINVAL;
+ }
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+
+ *tb = mftb();
+ kvmppc_gsm_reset(io->vcpu_message);
+ kvmppc_gsm_reset(io->vcore_message);
+ kvmppc_gsbm_zero(&io->valids);
+
+ rc = kvmhv_nestedv2_parse_output(vcpu);
+ if (rc < 0)
+ return -EINVAL;
+
+ timer_rearm_host_dec(*tb);
+
+ return trap;
+}
+
/* call our hypervisor to load up HV regs and go */
static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
@@ -4165,7 +4231,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
- trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ if (kvmhv_is_nestedv1())
+ trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ else
+ trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
@@ -5145,6 +5214,14 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
if (++cores_done >= kvm->arch.online_vcores)
break;
}
+
+ if (kvmhv_is_nestedv2()) {
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
+ }
+ }
}
void kvmppc_setup_partition_table(struct kvm *kvm)
@@ -5411,15 +5488,43 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
- lpid = kvmppc_alloc_lpid();
- if ((long)lpid < 0)
- return -ENOMEM;
- kvm->arch.lpid = lpid;
+ if (!kvmhv_is_nestedv2()) {
+ lpid = kvmppc_alloc_lpid();
+ if ((long)lpid < 0)
+ return -ENOMEM;
+ kvm->arch.lpid = lpid;
+ }
kvmppc_alloc_host_rm_ops();
kvmhv_vm_nested_init(kvm);
+ if (kvmhv_is_nestedv2()) {
+ long rc;
+ unsigned long guest_id;
+
+ rc = plpar_guest_create(0, &guest_id);
+
+ if (rc != H_SUCCESS)
+ pr_err("KVM: Create Guest hcall failed, rc=%ld\n", rc);
+
+ switch (rc) {
+ case H_PARAMETER:
+ case H_FUNCTION:
+ case H_STATE:
+ return -EINVAL;
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ case H_NOT_AVAILABLE:
+ return -EBUSY;
+ }
+ kvm->arch.lpid = guest_id;
+ }
+
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -5489,7 +5594,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm);
if (ret) {
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
return ret;
}
kvmppc_setup_partition_table(kvm);
@@ -5579,10 +5687,14 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm->arch.process_table = 0;
if (kvm->arch.secure_guest)
uv_svm_terminate(kvm->arch.lpid);
- kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
+ if (!kvmhv_is_nestedv2())
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
}
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
kvmppc_free_pimap(kvm);
}
@@ -5994,6 +6106,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
return -ENODEV;
if (!radix_enabled())
return -ENODEV;
+ if (kvmhv_is_nestedv2())
+ return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
if (kvm)
@@ -3,6 +3,8 @@
/*
* Privileged (non-hypervisor) host registers to save.
*/
+#include "asm/guest-state-buffer.h"
+
struct p9_host_os_sprs {
unsigned long iamr;
unsigned long amr;
@@ -54,67 +56,73 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.shregs.msr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
}
static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0);
return vcpu->arch.shregs.msr;
}
-#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
{ \
vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
{ \
+ kvmhv_nestedv2_cached_reload(vcpu, iden); \
return vcpu->arch.reg; \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
-#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
{ \
vcpu->arch.reg[i] = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden(i)); \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0); \
return vcpu->arch.reg[i]; \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
-
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64)
-
-KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64)
-KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32)
-
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32)
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL);
+
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC)
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB)
@@ -428,10 +428,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
return vcpu->arch.trap;
}
+unsigned long nested_capabilities;
+
long kvmhv_nested_init(void)
{
long int ptb_order;
- unsigned long ptcr;
+ unsigned long ptcr, host_capabilities;
long rc;
if (!kvmhv_on_pseries())
@@ -439,6 +441,27 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
+ rc = plpar_guest_get_capabilities(0, &host_capabilities);
+ if (rc == H_SUCCESS) {
+ unsigned long capabilities = 0;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ capabilities |= H_GUEST_CAP_POWER10;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ capabilities |= H_GUEST_CAP_POWER9;
+
+ nested_capabilities = capabilities & host_capabilities;
+ rc = plpar_guest_set_capabilities(0, nested_capabilities);
+ if (rc != H_SUCCESS) {
+ pr_err("kvm-hv: Could not configure parent hypervisor capabilities (rc=%ld)",
+ rc);
+ return -ENODEV;
+ }
+
+ static_branch_enable(&__kvmhv_is_nestedv2);
+ return 0;
+ }
+
/* Partition table entry is 1<<4 bytes in size, hence the 4. */
ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
/* Minimum partition table size is 1<<12 bytes */
@@ -507,10 +530,15 @@ void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1)
return;
}
- pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
- pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
- /* L0 will do the necessary barriers */
- kvmhv_flush_lpid(lpid);
+ if (kvmhv_is_nestedv1()) {
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ kvmhv_flush_lpid(lpid);
+ }
+
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_set_ptbl_entry(lpid, dw0, dw1);
}
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
new file mode 100644
@@ -0,0 +1,998 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
+ *
+ * Authors:
+ * Jordan Niethe <jniethe5@gmail.com>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors as a NESTEDv2 guest.
+ *
+ */
+
+#include "linux/blk-mq.h"
+#include "linux/console.h"
+#include "linux/gfp_types.h"
+#include "linux/signal.h"
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/pgtable.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/hvcall.h>
+#include <asm/pgalloc.h>
+#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/guest-state-buffer.h>
+#include "trace_hv.h"
+
+struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
+EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
+
+
+static size_t
+gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
+{
+ u16 ids[] = {
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_RUN_OUTPUT,
+
+ };
+ size_t size = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ int rc;
+
+ cfg = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ cfg->vcpu_run_output_size);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
+ cfg->vcpu_run_input_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ cfg = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (gse)
+ cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops config_msg_ops = {
+ .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
+ .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
+ .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
+};
+
+static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ size_t size = 0;
+ u16 iden;
+
+ kvmppc_gsbm_fill(&gsbm);
+ kvmppc_gsbm_for_each(&gsbm, iden)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_PARTITION_TABLE:
+ case KVMPPC_GSID_PROCESS_TABLE:
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ break;
+ default:
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
+ }
+ }
+ return size;
+}
+
+static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvm_vcpu *vcpu;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ kvmppc_gsm_for_each(gsm, iden)
+ {
+ rc = 0;
+
+ if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
+ (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
+ continue;
+
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
+ break;
+ case KVMPPC_GSID_PURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
+ break;
+ case KVMPPC_GSID_SPURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
+ break;
+ case KVMPPC_GSID_AMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
+ break;
+ case KVMPPC_GSID_SIAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
+ break;
+ case KVMPPC_GSID_SDAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
+ break;
+ case KVMPPC_GSID_IAMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
+ break;
+ case KVMPPC_GSID_CIABR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
+ break;
+ case KVMPPC_GSID_WORT:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
+ break;
+ case KVMPPC_GSID_PPR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
+ break;
+ case KVMPPC_GSID_PSPB:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
+ break;
+ case KVMPPC_GSID_TAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
+ break;
+ case KVMPPC_GSID_FSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
+ break;
+ case KVMPPC_GSID_BESCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
+ break;
+ case KVMPPC_GSID_IC:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
+ break;
+ case KVMPPC_GSID_CTRL:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
+ break;
+ case KVMPPC_GSID_PIDR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
+ break;
+ case KVMPPC_GSID_AMOR: {
+ u64 amor = ~0;
+
+ rc = kvmppc_gse_put_u64(gsb, iden, amor);
+ break;
+ }
+ case KVMPPC_GSID_VRSAVE:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.gpr[i]);
+ break;
+ case KVMPPC_GSID_CR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
+ break;
+ case KVMPPC_GSID_XER:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
+ break;
+ case KVMPPC_GSID_CTR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
+ break;
+ case KVMPPC_GSID_LR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.link);
+ break;
+ case KVMPPC_GSID_NIA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
+ break;
+ case KVMPPC_GSID_SRR0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr0);
+ break;
+ case KVMPPC_GSID_SRR1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr1);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg0);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg1);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg2);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg3);
+ break;
+ case KVMPPC_GSID_DAR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.dar);
+ break;
+ case KVMPPC_GSID_DSISR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.shregs.dsisr);
+ break;
+ case KVMPPC_GSID_MSR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.msr);
+ break;
+ case KVMPPC_GSID_VTB:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->vtb);
+ break;
+ case KVMPPC_GSID_LPCR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->lpcr);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->tb_offset);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&v, &vcpu->arch.fp.fpr[i],
+ sizeof(vcpu->arch.fp.fpr[i]));
+ rc = kvmppc_gse_put_vector128(gsb, iden, &v);
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vr.vscr.u[3]);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ rc = kvmppc_gse_put_vector128(gsb, iden,
+ &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = vcpu->arch.dec_expires -
+ vcpu->arch.vcore->tb_offset;
+ rc = kvmppc_gse_put_u64(gsb, iden, dw);
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vcore->arch_compat);
+ break;
+ }
+
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_gs_elem *gse;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ kvmppc_gsp_for_each(&gsp, iden, gse)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PURR:
+ vcpu->arch.purr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPURR:
+ vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_AMR:
+ vcpu->arch.amr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIAR:
+ vcpu->arch.siar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SDAR:
+ vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IAMR:
+ vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_CIABR:
+ vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_WORT:
+ vcpu->arch.wort = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_PPR:
+ vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PSPB:
+ vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_TAR:
+ vcpu->arch.tar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FSCR:
+ vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_BESCR:
+ vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IC:
+ vcpu->arch.ic = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTRL:
+ vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PIDR:
+ vcpu->arch.pid = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_AMOR: {
+ u64 amor;
+
+ amor = kvmppc_gse_get_u64(gse);
+ break;
+ }
+ case KVMPPC_GSID_VRSAVE:
+ vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CR:
+ vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_XER:
+ vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTR:
+ vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LR:
+ vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_NIA:
+ vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR0:
+ vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR1:
+ vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAR:
+ vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DSISR:
+ vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MSR:
+ vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VTB:
+ vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LPCR:
+ vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ kvmppc_gse_get_vector128(gse, &v);
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&vcpu->arch.fp.fpr[i], &v,
+ sizeof(vcpu->arch.fp.fpr[i]));
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_HDAR:
+ vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HDSISR:
+ vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_ASDR:
+ vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HEIR:
+ vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = kvmppc_gse_get_u64(gse);
+ vcpu->arch.dec_expires =
+ dw + vcpu->arch.vcore->tb_offset;
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
+ break;
+ default:
+ continue;
+ }
+ kvmppc_gsbm_set(valids, iden);
+ }
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops vcpu_message_ops = {
+ .get_size = gs_msg_ops_vcpu_get_size,
+ .fill_info = gs_msg_ops_vcpu_fill_info,
+ .refresh_info = gs_msg_ops_vcpu_refresh_info,
+};
+
+static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
+ unsigned long guest_id, vcpu_id;
+ struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
+ int rc;
+
+ cfg = &io->cfg;
+ guest_id = vcpu->kvm->arch.lpid;
+ vcpu_id = vcpu->vcpu_id;
+
+ gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
+ GFP_KERNEL);
+ if (!gsm) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
+ GFP_KERNEL);
+ if (!gsb) {
+ rc = -ENOMEM;
+ goto free_gsm;
+ }
+
+ rc = kvmppc_gsb_receive_datum(gsb, gsm,
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
+ goto free_gsb;
+ }
+
+ vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_output) {
+ rc = -ENOMEM;
+ goto free_gsb;
+ }
+
+ cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
+ cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
+ io->vcpu_run_output = vcpu_run_output;
+
+ gsm->flags = 0;
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
+ goto free_gs_out;
+ }
+
+ vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
+ if (!vcpu_message) {
+ rc = -ENOMEM;
+ goto free_gs_out;
+ }
+ kvmppc_gsm_include_all(vcpu_message);
+
+ io->vcpu_message = vcpu_message;
+
+ vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_input) {
+ rc = -ENOMEM;
+ goto free_vcpu_message;
+ }
+
+ io->vcpu_run_input = vcpu_run_input;
+ cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
+ cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
+ goto free_vcpu_run_input;
+ }
+
+ vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
+ KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
+ if (!vcore_message) {
+ rc = -ENOMEM;
+ goto free_vcpu_run_input;
+ }
+
+ kvmppc_gsm_include_all(vcore_message);
+ kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
+ io->vcore_message = vcore_message;
+
+ kvmppc_gsbm_fill(&io->valids);
+ kvmppc_gsm_free(gsm);
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_vcpu_run_input:
+ kvmppc_gsb_free(vcpu_run_input);
+free_vcpu_message:
+ kvmppc_gsm_free(vcpu_message);
+free_gs_out:
+ kvmppc_gsb_free(vcpu_run_output);
+free_gsb:
+ kvmppc_gsb_free(gsb);
+free_gsm:
+ kvmppc_gsm_free(gsm);
+err:
+ return rc;
+}
+
+/**
+ * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Mark a guest state ID as having been changed by the L1 host and thus
+ * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
+ */
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_msg *gsm;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ gsm = io->vcpu_message;
+ kvmppc_gsm_include(gsm, iden);
+ gsm = io->vcore_message;
+ kvmppc_gsm_include(gsm, iden);
+ kvmppc_gsbm_set(valids, iden);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
+
+/**
+ * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Reload the value for the guest state ID from the L0 host into the L1 host.
+ * This is cached so that going out to the L0 host only happens if necessary.
+ */
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ if (kvmppc_gsbm_test(valids, iden))
+ return 0;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
+ rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
+
+/**
+ * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
+ * @vcpu: vcpu
+ * @time_limit: hdec expiry tb
+ *
+ * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
+ * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
+ * wide values need to be sent with H_GUEST_SET first.
+ *
+ * The hdec tb offset is always sent to L0 host.
+ */
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg *gsm;
+ int rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_input;
+ gsm = io->vcore_message;
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
+ return rc;
+ }
+
+ gsm = io->vcpu_message;
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
+ return rc;
+ }
+
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
+
+/**
+ * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
+ * L0 host
+ * @lpid: guest id
+ * @dw0: partition table double word
+ * @dw1: process table double word
+ */
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
+{
+ struct kvmppc_gs_part_table patbl;
+ struct kvmppc_gs_proc_table prtbl;
+ struct kvmppc_gs_buff *gsb;
+ size_t size;
+ int rc;
+
+ size = kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
+ kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
+ sizeof(struct kvmppc_gs_header);
+ gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
+ if (!gsb)
+ return -ENOMEM;
+
+ patbl.address = dw0 & RPDB_MASK;
+ patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
+ ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
+ 31);
+ patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
+ rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ prtbl.address = dw1 & PRTB_MASK;
+ prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
+ rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
+ goto free_gsb;
+ }
+
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_gsb:
+ kvmppc_gsb_free(gsb);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
+
+/**
+ * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
+ * @vcpu: vcpu
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_output;
+
+ vcpu->arch.fault_dar = 0;
+ vcpu->arch.fault_dsisr = 0;
+ vcpu->arch.fault_gpa = 0;
+ vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
+
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+ return kvmppc_gsm_refresh_info(&gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
+
+static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmppc_gsm_free(io->vcpu_message);
+ kvmppc_gsm_free(io->vcore_message);
+ kvmppc_gsb_free(io->vcpu_run_input);
+ kvmppc_gsb_free(io->vcpu_run_output);
+}
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc = 0;
+
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+
+ for (int i = 0; i < 32; i++) {
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
+ }
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
+
+ rc = kvmppc_gsb_receive_data(gsb, &gsm);
+ if (rc < 0)
+ pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
+
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ for (int i = 0; i < 32; i++)
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
+
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
+
+/**
+ * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ long rc;
+
+ rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
+ switch (rc) {
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ rc = kvmhv_nestedv2_host_create(vcpu, io);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
+
+/**
+ * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ */
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmhv_nestedv2_host_free(vcpu, io);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
@@ -92,7 +92,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_host_swabbed = 0;
emulated = EMULATE_FAIL;
- vcpu->arch.regs.msr = vcpu->arch.shared->msr;
+ vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
+ kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
@@ -357,6 +358,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
+ kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
@@ -569,3 +569,53 @@ int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
return gsm->ops->refresh_info(gsm, gsb);
}
EXPORT_SYMBOL_GPL(kvmppc_gsm_refresh_info);
+
+/**
+ * kvmppc_gsb_send - send all elements in the buffer to the hypervisor.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_SET_STATE hcall for the guest state buffer.
+ */
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (kvmppc_gsb_nelems(gsb) == 0)
+ return 0;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_send);
+
+/**
+ * kvmppc_gsb_recv - request all elements in the buffer have their value
+ * updated.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_GET_STATE hcall for the guest state buffer.
+ * After returning from the hcall the guest state elements that were
+ * present in the buffer will have updated values from the hypervisor.
+ */
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_recv);