Message ID | 20190204110618.GA938@blackberry (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: PPC: Book3S PR: Add emulation for slbfee. instruction | expand |
On 2/4/19 12:06 PM, Paul Mackerras wrote: > Recent kernels, since commit e15a4fea4dee ("powerpc/64s/hash: Add > some SLB debugging tests", 2018-10-03) use the slbfee. instruction, > which PR KVM currently does not have code to emulate. Consequently > recent kernels fail to boot under PR KVM. This adds emulation of > slbfee., enabling these kernels to boot successfully. I still see a problem with a PR guest which I don't think this patch is supposed to solve : [ 0.022884] kernel tried to execute exec-protected page (c000000001630afc) -exploit attempt? (uid: 0) [ 0.022984] BUG: Unable to handle kernel instruction fetch [ 0.023023] Faulting instruction address: 0xc000000001630afc [ 0.023080] Oops: Kernel access of bad area, sig: 11 [#1] [ 0.023119] LE SMP NR_CPUS=1024 NUMA pSeries [ 0.023167] Modules linked in: [ 0.023207] CPU: 3 PID: 0 Comm: swapper/3 Not tainted 5.0.0-rc5+ #7 [ 0.023255] NIP: c000000001630afc LR: c00000000000ac8c CTR: c00000003fffbc00 [ 0.023311] REGS: c00000003ffbfa70 TRAP: 0400 Not tainted (5.0.0-rc5+) [ 0.023358] MSR: a000000010001033 <SF,ME,IR,DR,RI,LE> CR: 24000288 XER: 00000000 [ 0.023422] CFAR: 0000000000000000 IRQMASK: 1 [ 0.023422] GPR00: 0000000000000000 c00000003ffbfd00 c000000001580600 c00000003ffbfd70 [ 0.023422] GPR04: 0000000000000000 c0000001ff5d16f0 0000000000000000 0000000000000808 [ 0.023422] GPR08: 0000000000000000 0000000000000000 0000000000000002 0000000000000900 [ 0.023422] GPR12: a000000000009033 c00000003fffbc00 c0000001fc233f90 0000000000000000 [ 0.023422] GPR16: 0000000000000000 0000000000000000 0000000000000003 c0000000000503d0 [ 0.023422] GPR20: c0000000015b0124 0000000000000400 0000000000000000 c0000000015adc70 [ 0.023422] GPR24: c00000000172fe28 0000000000000000 0000000000080000 0000000000000000 [ 0.023422] GPR28: c0000000015affb8 0000000000000001 0000000000000008 c0000000015a9c18 [ 0.023912] NIP [c000000001630afc] kvm_tmp+0x162c/0x100000 [ 0.023956] LR [c00000000000ac8c] soft_nmi_common+0xcc/0xd0 Any idea where that comes from ? Thanks, C. > > Signed-off-by: Paul Mackerras <paulus@ozlabs.org> > --- > arch/powerpc/include/asm/kvm_host.h | 1 + > arch/powerpc/kvm/book3s_32_mmu.c | 1 + > arch/powerpc/kvm/book3s_64_mmu.c | 14 ++++++++++++++ > arch/powerpc/kvm/book3s_emulate.c | 18 ++++++++++++++++++ > 4 files changed, 34 insertions(+) > > diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h > index 0f98f00..0914303 100644 > --- a/arch/powerpc/include/asm/kvm_host.h > +++ b/arch/powerpc/include/asm/kvm_host.h > @@ -377,6 +377,7 @@ struct kvmppc_mmu { > void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); > u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); > u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); > + int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); > void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); > void (*slbia)(struct kvm_vcpu *vcpu); > /* book3s */ > diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c > index 6121699..6f789f6 100644 > --- a/arch/powerpc/kvm/book3s_32_mmu.c > +++ b/arch/powerpc/kvm/book3s_32_mmu.c > @@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) > mmu->slbmte = NULL; > mmu->slbmfee = NULL; > mmu->slbmfev = NULL; > + mmu->slbfee = NULL; > mmu->slbie = NULL; > mmu->slbia = NULL; > } > diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c > index c92dd25..d4b967f 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu.c > +++ b/arch/powerpc/kvm/book3s_64_mmu.c > @@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) > kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); > } > > +static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, > + ulong *ret_slb) > +{ > + struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); > + > + if (slbe) { > + *ret_slb = slbe->origv; > + return 0; > + } > + *ret_slb = 0; > + return -ENOENT; > +} > + > static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) > { > struct kvmppc_slb *slbe; > @@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) > mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; > mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; > mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; > + mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; > mmu->slbie = kvmppc_mmu_book3s_64_slbie; > mmu->slbia = kvmppc_mmu_book3s_64_slbia; > mmu->xlate = kvmppc_mmu_book3s_64_xlate; > diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c > index 8c7e933..6ef7c5f 100644 > --- a/arch/powerpc/kvm/book3s_emulate.c > +++ b/arch/powerpc/kvm/book3s_emulate.c > @@ -47,6 +47,7 @@ > #define OP_31_XOP_SLBMFEV 851 > #define OP_31_XOP_EIOIO 854 > #define OP_31_XOP_SLBMFEE 915 > +#define OP_31_XOP_SLBFEE 979 > > #define OP_31_XOP_TBEGIN 654 > #define OP_31_XOP_TABORT 910 > @@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, > > vcpu->arch.mmu.slbia(vcpu); > break; > + case OP_31_XOP_SLBFEE: > + if (!(inst & 1) || !vcpu->arch.mmu.slbfee) { > + return EMULATE_FAIL; > + } else { > + ulong b, t; > + ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK; > + > + b = kvmppc_get_gpr(vcpu, rb); > + if (!vcpu->arch.mmu.slbfee(vcpu, b, &t)) > + cr |= 2 << CR0_SHIFT; > + kvmppc_set_gpr(vcpu, rt, t); > + /* copy XER[SO] bit to CR0[SO] */ > + cr |= (vcpu->arch.regs.xer & 0x80000000) >> > + (31 - CR0_SHIFT); > + kvmppc_set_cr(vcpu, cr); > + } > + break; > case OP_31_XOP_SLBMFEE: > if (!vcpu->arch.mmu.slbmfee) { > emulated = EMULATE_FAIL; >
On Mon, Feb 04, 2019 at 06:07:40PM +0100, Cédric Le Goater wrote: > On 2/4/19 12:06 PM, Paul Mackerras wrote: > > Recent kernels, since commit e15a4fea4dee ("powerpc/64s/hash: Add > > some SLB debugging tests", 2018-10-03) use the slbfee. instruction, > > which PR KVM currently does not have code to emulate. Consequently > > recent kernels fail to boot under PR KVM. This adds emulation of > > slbfee., enabling these kernels to boot successfully. > > I still see a problem with a PR guest which I don't think this patch > is supposed to solve : > > [ 0.022884] kernel tried to execute exec-protected page (c000000001630afc) -exploit attempt? (uid: 0) > [ 0.022984] BUG: Unable to handle kernel instruction fetch > [ 0.023023] Faulting instruction address: 0xc000000001630afc > [ 0.023080] Oops: Kernel access of bad area, sig: 11 [#1] > [ 0.023119] LE SMP NR_CPUS=1024 NUMA pSeries > [ 0.023167] Modules linked in: > [ 0.023207] CPU: 3 PID: 0 Comm: swapper/3 Not tainted 5.0.0-rc5+ #7 > [ 0.023255] NIP: c000000001630afc LR: c00000000000ac8c CTR: c00000003fffbc00 > [ 0.023311] REGS: c00000003ffbfa70 TRAP: 0400 Not tainted (5.0.0-rc5+) > [ 0.023358] MSR: a000000010001033 <SF,ME,IR,DR,RI,LE> CR: 24000288 XER: 00000000 > [ 0.023422] CFAR: 0000000000000000 IRQMASK: 1 > [ 0.023422] GPR00: 0000000000000000 c00000003ffbfd00 c000000001580600 c00000003ffbfd70 > [ 0.023422] GPR04: 0000000000000000 c0000001ff5d16f0 0000000000000000 0000000000000808 > [ 0.023422] GPR08: 0000000000000000 0000000000000000 0000000000000002 0000000000000900 > [ 0.023422] GPR12: a000000000009033 c00000003fffbc00 c0000001fc233f90 0000000000000000 > [ 0.023422] GPR16: 0000000000000000 0000000000000000 0000000000000003 c0000000000503d0 > [ 0.023422] GPR20: c0000000015b0124 0000000000000400 0000000000000000 c0000000015adc70 > [ 0.023422] GPR24: c00000000172fe28 0000000000000000 0000000000080000 0000000000000000 > [ 0.023422] GPR28: c0000000015affb8 0000000000000001 0000000000000008 c0000000015a9c18 > [ 0.023912] NIP [c000000001630afc] kvm_tmp+0x162c/0x100000 > [ 0.023956] LR [c00000000000ac8c] soft_nmi_common+0xcc/0xd0 > > Any idea where that comes from ? So, I know that PR is currently broken because it doesn't support all the page sizes / segment sizes that qemu wants it to (I sacrificed PR support for consistency in the environment we present to the guest). I don't know if the crash above is that or something else, though. > > Thanks, > > C. > > > > > Signed-off-by: Paul Mackerras <paulus@ozlabs.org> > > --- > > arch/powerpc/include/asm/kvm_host.h | 1 + > > arch/powerpc/kvm/book3s_32_mmu.c | 1 + > > arch/powerpc/kvm/book3s_64_mmu.c | 14 ++++++++++++++ > > arch/powerpc/kvm/book3s_emulate.c | 18 ++++++++++++++++++ > > 4 files changed, 34 insertions(+) > > > > diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h > > index 0f98f00..0914303 100644 > > --- a/arch/powerpc/include/asm/kvm_host.h > > +++ b/arch/powerpc/include/asm/kvm_host.h > > @@ -377,6 +377,7 @@ struct kvmppc_mmu { > > void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); > > u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); > > u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); > > + int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); > > void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); > > void (*slbia)(struct kvm_vcpu *vcpu); > > /* book3s */ > > diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c > > index 6121699..6f789f6 100644 > > --- a/arch/powerpc/kvm/book3s_32_mmu.c > > +++ b/arch/powerpc/kvm/book3s_32_mmu.c > > @@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) > > mmu->slbmte = NULL; > > mmu->slbmfee = NULL; > > mmu->slbmfev = NULL; > > + mmu->slbfee = NULL; > > mmu->slbie = NULL; > > mmu->slbia = NULL; > > } > > diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c > > index c92dd25..d4b967f 100644 > > --- a/arch/powerpc/kvm/book3s_64_mmu.c > > +++ b/arch/powerpc/kvm/book3s_64_mmu.c > > @@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) > > kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); > > } > > > > +static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, > > + ulong *ret_slb) > > +{ > > + struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); > > + > > + if (slbe) { > > + *ret_slb = slbe->origv; > > + return 0; > > + } > > + *ret_slb = 0; > > + return -ENOENT; > > +} > > + > > static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) > > { > > struct kvmppc_slb *slbe; > > @@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) > > mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; > > mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; > > mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; > > + mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; > > mmu->slbie = kvmppc_mmu_book3s_64_slbie; > > mmu->slbia = kvmppc_mmu_book3s_64_slbia; > > mmu->xlate = kvmppc_mmu_book3s_64_xlate; > > diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c > > index 8c7e933..6ef7c5f 100644 > > --- a/arch/powerpc/kvm/book3s_emulate.c > > +++ b/arch/powerpc/kvm/book3s_emulate.c > > @@ -47,6 +47,7 @@ > > #define OP_31_XOP_SLBMFEV 851 > > #define OP_31_XOP_EIOIO 854 > > #define OP_31_XOP_SLBMFEE 915 > > +#define OP_31_XOP_SLBFEE 979 > > > > #define OP_31_XOP_TBEGIN 654 > > #define OP_31_XOP_TABORT 910 > > @@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, > > > > vcpu->arch.mmu.slbia(vcpu); > > break; > > + case OP_31_XOP_SLBFEE: > > + if (!(inst & 1) || !vcpu->arch.mmu.slbfee) { > > + return EMULATE_FAIL; > > + } else { > > + ulong b, t; > > + ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK; > > + > > + b = kvmppc_get_gpr(vcpu, rb); > > + if (!vcpu->arch.mmu.slbfee(vcpu, b, &t)) > > + cr |= 2 << CR0_SHIFT; > > + kvmppc_set_gpr(vcpu, rt, t); > > + /* copy XER[SO] bit to CR0[SO] */ > > + cr |= (vcpu->arch.regs.xer & 0x80000000) >> > > + (31 - CR0_SHIFT); > > + kvmppc_set_cr(vcpu, cr); > > + } > > + break; > > case OP_31_XOP_SLBMFEE: > > if (!vcpu->arch.mmu.slbmfee) { > > emulated = EMULATE_FAIL; > > >
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 0f98f00..0914303 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -377,6 +377,7 @@ struct kvmppc_mmu { void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); + int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); void (*slbia)(struct kvm_vcpu *vcpu); /* book3s */ diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 6121699..6f789f6 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) mmu->slbmte = NULL; mmu->slbmfee = NULL; mmu->slbmfev = NULL; + mmu->slbfee = NULL; mmu->slbie = NULL; mmu->slbia = NULL; } diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index c92dd25..d4b967f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); } +static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, + ulong *ret_slb) +{ + struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); + + if (slbe) { + *ret_slb = slbe->origv; + return 0; + } + *ret_slb = 0; + return -ENOENT; +} + static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) { struct kvmppc_slb *slbe; @@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; + mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; mmu->slbie = kvmppc_mmu_book3s_64_slbie; mmu->slbia = kvmppc_mmu_book3s_64_slbia; mmu->xlate = kvmppc_mmu_book3s_64_xlate; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 8c7e933..6ef7c5f 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -47,6 +47,7 @@ #define OP_31_XOP_SLBMFEV 851 #define OP_31_XOP_EIOIO 854 #define OP_31_XOP_SLBMFEE 915 +#define OP_31_XOP_SLBFEE 979 #define OP_31_XOP_TBEGIN 654 #define OP_31_XOP_TABORT 910 @@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.mmu.slbia(vcpu); break; + case OP_31_XOP_SLBFEE: + if (!(inst & 1) || !vcpu->arch.mmu.slbfee) { + return EMULATE_FAIL; + } else { + ulong b, t; + ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK; + + b = kvmppc_get_gpr(vcpu, rb); + if (!vcpu->arch.mmu.slbfee(vcpu, b, &t)) + cr |= 2 << CR0_SHIFT; + kvmppc_set_gpr(vcpu, rt, t); + /* copy XER[SO] bit to CR0[SO] */ + cr |= (vcpu->arch.regs.xer & 0x80000000) >> + (31 - CR0_SHIFT); + kvmppc_set_cr(vcpu, cr); + } + break; case OP_31_XOP_SLBMFEE: if (!vcpu->arch.mmu.slbmfee) { emulated = EMULATE_FAIL;
Recent kernels, since commit e15a4fea4dee ("powerpc/64s/hash: Add some SLB debugging tests", 2018-10-03) use the slbfee. instruction, which PR KVM currently does not have code to emulate. Consequently recent kernels fail to boot under PR KVM. This adds emulation of slbfee., enabling these kernels to boot successfully. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s_32_mmu.c | 1 + arch/powerpc/kvm/book3s_64_mmu.c | 14 ++++++++++++++ arch/powerpc/kvm/book3s_emulate.c | 18 ++++++++++++++++++ 4 files changed, 34 insertions(+)