diff mbox series

[WIP,14/14] x86/nestedsvm: Note some places for improvement

Message ID 20240626133853.4150731-15-george.dunlap@cloud.com (mailing list archive)
State New, archived
Headers show
Series AMD Nested Virt Preparation | expand

Commit Message

George Dunlap June 26, 2024, 1:38 p.m. UTC
Signed-off-by: George Dunlap <george.dunlap@cloud.com>
---
 xen/arch/x86/hvm/svm/nestedsvm.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 35a2cbfd7d..dca06f2a6c 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1349,8 +1349,21 @@  nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs,
         return NESTEDHVM_VMEXIT_INJECT;
     case VMEXIT_VMMCALL:
         /* Always let the guest handle VMMCALL/VMCALL */
+        /*
+         * FIXME: This is wrong; if the L1 hasn't set the VMMCALL
+         * intercept and the L2 executes a VMMACALL, it should result
+         * in a #UD, not a VMMCALL exception.
+         */
         return NESTEDHVM_VMEXIT_INJECT;
     default:
+        /*
+         * FIXME: It's not true that any VMEXIT not intercepted by L1
+         * can safely be handled "safely" by L0; VMCALL above is one
+         * such example, but there may be more.  We either need to
+         * combine this switch statement into the one in
+         * nsvm_vmcb_guest_intercepts_exitcode(), or explicitly list
+         * known-safe exits here.
+         */
         break;
     }