Message ID | 20201202184122.26046-18-dbrazdil@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Opt-in always-on nVHE hypervisor | expand |
On Wed, 2020-12-02 at 18:41 +0000, David Brazdil wrote: > Add a host-initialized constant to KVM nVHE hyp code for converting > between EL2 linear map virtual addresses and physical addresses. > Also add `__hyp_pa` macro that performs the conversion. > > Signed-off-by: David Brazdil <dbrazdil@google.com> > --- > arch/arm64/kvm/hyp/nvhe/psci-relay.c | 3 +++ > arch/arm64/kvm/va_layout.c | 30 +++++++++++++++++++++++++--- > 2 files changed, 30 insertions(+), 3 deletions(-) > > diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c > index 61375d4571c2..70b42f433449 100644 > --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c > +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c > @@ -18,6 +18,9 @@ > /* Config options set by the host. */ > __ro_after_init u32 kvm_host_psci_version; > __ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids; > +__ro_after_init s64 hyp_physvirt_offset; > + > +#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset) > > static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt) > { > diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c > index 4130b72e6891..d8cc51bd60bf 100644 > --- a/arch/arm64/kvm/va_layout.c > +++ b/arch/arm64/kvm/va_layout.c > @@ -23,6 +23,30 @@ static u8 tag_lsb; > static u64 tag_val; > static u64 va_mask; > > +/* > + * Compute HYP VA by using the same computation as kern_hyp_va(). > + */ > +static u64 __early_kern_hyp_va(u64 addr) > +{ > + addr &= va_mask; > + addr |= tag_val << tag_lsb; > + return addr; > +} > + > +/* > + * Store a hyp VA <-> PA offset into a hyp-owned variable. > + */ > +static void init_hyp_physvirt_offset(void) > +{ > + extern s64 kvm_nvhe_sym(hyp_physvirt_offset); > + u64 kern_va, hyp_va; > + > + /* Compute the offset from the hyp VA and PA of a random symbol. */ > + kern_va = (u64)kvm_ksym_ref(__hyp_text_start); > + hyp_va = __early_kern_hyp_va(kern_va); > + CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va; The code here introduced a warning on TX2 from today's linux-next. .config: https://cailca.coding.net/public/linux/mm/git/files/master/arm64.config [ 29.356963] CPU255: Booted secondary processor 0x0000011f03 [0x431f0af1] [ 29.358301] smp: Brought up 2 nodes, 256 CPUs [ 29.364962] SMP: Total of 256 processors activated. [ 29.364985] CPU features: detected: Privileged Access Never [ 29.365003] CPU features: detected: LSE atomic instructions [ 29.365023] CPU features: detected: CRC32 instructions [ 29.431660] CPU: All CPU(s) started at EL2 [ 29.431685] ------------[ cut here ]------------ [ 29.431713] virt_to_phys used for non-linear address: (____ptrval____) (__hyp_idmap_text_end+0x0/0x534) [ 29.431744] WARNING: CPU: 0 PID: 1 at arch/arm64/mm/physaddr.c:15 __virt_to_phys+0x80/0xc0 [ 29.431759] Modules linked in: [ 29.431787] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.10.0-rc6-next-20201207+ #2 [ 29.431804] pstate: 10400009 (nzcV daif +PAN -UAO -TCO BTYPE=--) [ 29.431819] pc : __virt_to_phys+0x80/0xc0 [ 29.431834] lr : __virt_to_phys+0x80/0xc0 [ 29.431848] sp : ffff000005fefc90 [ 29.431862] x29: ffff000005fefc90 x28: ffff8000191c9010 [ 29.431891] x27: ffff000005f21228 x26: b14e19fe279ae3eb [ 29.431920] x25: ffff8000191c9010 x24: ffff8000191c9000 [ 29.431948] x23: ffff8000191c9000 x22: 000f800011235acc [ 29.431975] x21: 0000000000000001 x20: 000f800000000000 [ 29.432003] x19: ffff800011235acc x18: ffff6001cedcc336 [ 29.432031] x17: 0000000000001308 x16: 0000000000000002 [ 29.432058] x15: 0000000000000000 x14: 7261656e696c2d6e [ 29.432086] x13: ffff600000bfdee7 x12: 1fffe00000bfdee6 [ 29.432113] x11: 1fffe00000bfdee6 x10: ffff600000bfdee6 [ 29.432141] x9 : ffff80001020a928 x8 : ffff000005fef737 [ 29.432169] x7 : 0000000000000001 x6 : ffff600000bfdee7 [ 29.432196] x5 : ffff600000bfdee7 x4 : 1fffe00000bfdedc [ 29.432223] x3 : 1fffe00000be4009 x2 : ffff600000bfdf5c [ 29.432251] x1 : 8fd448c3d76ca800 x0 : 0000000000000000 [ 29.432279] Call trace: [ 29.432294] __virt_to_phys+0x80/0xc0 [ 29.432312] kvm_compute_layout+0x21c/0x264 init_hyp_physvirt_offset at arch/arm64/kvm/va_layout.c:47 (inlined by) kvm_compute_layout at arch/arm64/kvm/va_layout.c:82 [ 29.432327] smp_cpus_done+0x164/0x17c [ 29.432342] smp_init+0xc4/0xd8 [ 29.432358] kernel_init_freeable+0x4ec/0x734 [ 29.432375] kernel_init+0x18/0x12c [ 29.432391] ret_from_fork+0x10/0x1c [ 29.432405] irq event stamp: 490612 [ 29.432424] hardirqs last enabled at (490611): [<ffff800010207f60>] console_unlock+0x8e0/0xca0 [ 29.432440] hardirqs last disabled at (490612): [<ffff800011212f44>] el1_dbg+0x24/0x50 [ 29.432455] softirqs last enabled at (487946): [<ffff800010010a98>] _stext+0xa98/0x113c [ 29.432473] softirqs last disabled at (487939): [<ffff8000100ec5e8>] irq_exit+0x500/0x5e0 [ 29.432492] ---[ end trace 96247b4cbbdf9333 ]--- > +} > + > /* > * We want to generate a hyp VA with the following format (with V == > * vabits_actual): > @@ -54,6 +78,8 @@ __init void kvm_compute_layout(void) > tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); > } > tag_val >>= tag_lsb; > + > + init_hyp_physvirt_offset(); > } > > static u32 compute_instruction(int n, u32 rd, u32 rn) > @@ -146,9 +172,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt, > /* > * Compute HYP VA by using the same computation as kern_hyp_va() > */ > - addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector); > - addr &= va_mask; > - addr |= tag_val << tag_lsb; > + addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector)); > > /* Use PC[10:7] to branch to the same vector in KVM */ > addr |= ((u64)origptr & GENMASK_ULL(10, 7));
Hi Qian, > > +/* > > + * Store a hyp VA <-> PA offset into a hyp-owned variable. > > + */ > > +static void init_hyp_physvirt_offset(void) > > +{ > > + extern s64 kvm_nvhe_sym(hyp_physvirt_offset); > > + u64 kern_va, hyp_va; > > + > > + /* Compute the offset from the hyp VA and PA of a random symbol. */ > > + kern_va = (u64)kvm_ksym_ref(__hyp_text_start); > > + hyp_va = __early_kern_hyp_va(kern_va); > > + CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va; > > The code here introduced a warning on TX2 from today's linux-next. > > .config: https://cailca.coding.net/public/linux/mm/git/files/master/arm64.config > > [ 29.356963] CPU255: Booted secondary processor 0x0000011f03 [0x431f0af1] > [ 29.358301] smp: Brought up 2 nodes, 256 CPUs > [ 29.364962] SMP: Total of 256 processors activated. > [ 29.364985] CPU features: detected: Privileged Access Never > [ 29.365003] CPU features: detected: LSE atomic instructions > [ 29.365023] CPU features: detected: CRC32 instructions > [ 29.431660] CPU: All CPU(s) started at EL2 > [ 29.431685] ------------[ cut here ]------------ > [ 29.431713] virt_to_phys used for non-linear address: (____ptrval____) (__hyp_idmap_text_end+0x0/0x534) > [ 29.431744] WARNING: CPU: 0 PID: 1 at arch/arm64/mm/physaddr.c:15 __virt_to_phys+0x80/0xc0 > [ 29.431759] Modules linked in: > [ 29.431787] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.10.0-rc6-next-20201207+ #2 > [ 29.431804] pstate: 10400009 (nzcV daif +PAN -UAO -TCO BTYPE=--) > [ 29.431819] pc : __virt_to_phys+0x80/0xc0 > [ 29.431834] lr : __virt_to_phys+0x80/0xc0 > [ 29.431848] sp : ffff000005fefc90 > [ 29.431862] x29: ffff000005fefc90 x28: ffff8000191c9010 > [ 29.431891] x27: ffff000005f21228 x26: b14e19fe279ae3eb > [ 29.431920] x25: ffff8000191c9010 x24: ffff8000191c9000 > [ 29.431948] x23: ffff8000191c9000 x22: 000f800011235acc > [ 29.431975] x21: 0000000000000001 x20: 000f800000000000 > [ 29.432003] x19: ffff800011235acc x18: ffff6001cedcc336 > [ 29.432031] x17: 0000000000001308 x16: 0000000000000002 > [ 29.432058] x15: 0000000000000000 x14: 7261656e696c2d6e > [ 29.432086] x13: ffff600000bfdee7 x12: 1fffe00000bfdee6 > [ 29.432113] x11: 1fffe00000bfdee6 x10: ffff600000bfdee6 > [ 29.432141] x9 : ffff80001020a928 x8 : ffff000005fef737 > [ 29.432169] x7 : 0000000000000001 x6 : ffff600000bfdee7 > [ 29.432196] x5 : ffff600000bfdee7 x4 : 1fffe00000bfdedc > [ 29.432223] x3 : 1fffe00000be4009 x2 : ffff600000bfdf5c > [ 29.432251] x1 : 8fd448c3d76ca800 x0 : 0000000000000000 > [ 29.432279] Call trace: > [ 29.432294] __virt_to_phys+0x80/0xc0 > [ 29.432312] kvm_compute_layout+0x21c/0x264 > init_hyp_physvirt_offset at arch/arm64/kvm/va_layout.c:47 > (inlined by) kvm_compute_layout at arch/arm64/kvm/va_layout.c:82 > [ 29.432327] smp_cpus_done+0x164/0x17c > [ 29.432342] smp_init+0xc4/0xd8 > [ 29.432358] kernel_init_freeable+0x4ec/0x734 > [ 29.432375] kernel_init+0x18/0x12c > [ 29.432391] ret_from_fork+0x10/0x1c > [ 29.432405] irq event stamp: 490612 > [ 29.432424] hardirqs last enabled at (490611): [<ffff800010207f60>] console_unlock+0x8e0/0xca0 > [ 29.432440] hardirqs last disabled at (490612): [<ffff800011212f44>] el1_dbg+0x24/0x50 > [ 29.432455] softirqs last enabled at (487946): [<ffff800010010a98>] _stext+0xa98/0x113c > [ 29.432473] softirqs last disabled at (487939): [<ffff8000100ec5e8>] irq_exit+0x500/0x5e0 > [ 29.432492] ---[ end trace 96247b4cbbdf9333 ]--- > Thanks for the report, I've been able to reproduce with CONFIG_DEBUG_VIRTUAL=y and the kernel running in EL2. Doesn't pose any functional issues as the results of this are never used. I'll post a fix later today. David
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 61375d4571c2..70b42f433449 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -18,6 +18,9 @@ /* Config options set by the host. */ __ro_after_init u32 kvm_host_psci_version; __ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids; +__ro_after_init s64 hyp_physvirt_offset; + +#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset) static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt) { diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 4130b72e6891..d8cc51bd60bf 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -23,6 +23,30 @@ static u8 tag_lsb; static u64 tag_val; static u64 va_mask; +/* + * Compute HYP VA by using the same computation as kern_hyp_va(). + */ +static u64 __early_kern_hyp_va(u64 addr) +{ + addr &= va_mask; + addr |= tag_val << tag_lsb; + return addr; +} + +/* + * Store a hyp VA <-> PA offset into a hyp-owned variable. + */ +static void init_hyp_physvirt_offset(void) +{ + extern s64 kvm_nvhe_sym(hyp_physvirt_offset); + u64 kern_va, hyp_va; + + /* Compute the offset from the hyp VA and PA of a random symbol. */ + kern_va = (u64)kvm_ksym_ref(__hyp_text_start); + hyp_va = __early_kern_hyp_va(kern_va); + CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va; +} + /* * We want to generate a hyp VA with the following format (with V == * vabits_actual): @@ -54,6 +78,8 @@ __init void kvm_compute_layout(void) tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); } tag_val >>= tag_lsb; + + init_hyp_physvirt_offset(); } static u32 compute_instruction(int n, u32 rd, u32 rn) @@ -146,9 +172,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt, /* * Compute HYP VA by using the same computation as kern_hyp_va() */ - addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector); - addr &= va_mask; - addr |= tag_val << tag_lsb; + addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector)); /* Use PC[10:7] to branch to the same vector in KVM */ addr |= ((u64)origptr & GENMASK_ULL(10, 7));
Add a host-initialized constant to KVM nVHE hyp code for converting between EL2 linear map virtual addresses and physical addresses. Also add `__hyp_pa` macro that performs the conversion. Signed-off-by: David Brazdil <dbrazdil@google.com> --- arch/arm64/kvm/hyp/nvhe/psci-relay.c | 3 +++ arch/arm64/kvm/va_layout.c | 30 +++++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 3 deletions(-)