@@ -370,16 +370,23 @@ static int fuzz_cmpxchg(
return maybe_fail(ctxt, "cmpxchg", true);
}
-static int fuzz_invlpg(
- enum x86_segment seg,
- unsigned long offset,
+static int fuzz_tlb_op(
+ enum x86emul_tlb_op op,
+ unsigned long addr,
+ unsigned long aux,
struct x86_emulate_ctxt *ctxt)
{
- /* invlpg(), unlike all other hooks, may be called with x86_seg_none. */
- assert(is_x86_user_segment(seg) || seg == x86_seg_none);
- assert(ctxt->addr_size == 64 || !(offset >> 32));
+ switch ( op )
+ {
+ case x86emul_invlpg:
+ assert(is_x86_user_segment(aux));
+ /* fall through */
+ case x86emul_invlpga:
+ assert(ctxt->addr_size == 64 || !(addr >> 32));
+ break;
+ }
- return maybe_fail(ctxt, "invlpg", false);
+ return maybe_fail(ctxt, "TLB-management", false);
}
static int fuzz_cache_op(
@@ -624,7 +631,7 @@ static const struct x86_emulate_ops all_
SET(read_msr),
SET(write_msr),
SET(cache_op),
- SET(invlpg),
+ SET(tlb_op),
.get_fpu = emul_test_get_fpu,
.put_fpu = emul_test_put_fpu,
.cpuid = emul_test_cpuid,
@@ -733,12 +740,12 @@ enum {
HOOK_read_msr,
HOOK_write_msr,
HOOK_cache_op,
+ HOOK_tlb_op,
HOOK_cpuid,
HOOK_inject_hw_exception,
HOOK_inject_sw_interrupt,
HOOK_get_fpu,
HOOK_put_fpu,
- HOOK_invlpg,
HOOK_vmfunc,
CANONICALIZE_rip,
CANONICALIZE_rsp,
@@ -777,9 +784,9 @@ static void disable_hooks(struct x86_emu
MAYBE_DISABLE_HOOK(read_msr);
MAYBE_DISABLE_HOOK(write_msr);
MAYBE_DISABLE_HOOK(cache_op);
+ MAYBE_DISABLE_HOOK(tlb_op);
MAYBE_DISABLE_HOOK(cpuid);
MAYBE_DISABLE_HOOK(get_fpu);
- MAYBE_DISABLE_HOOK(invlpg);
}
/*
@@ -2339,36 +2339,53 @@ static void hvmemul_put_fpu(
}
}
-static int hvmemul_invlpg(
- enum x86_segment seg,
- unsigned long offset,
+static int hvmemul_tlb_op(
+ enum x86emul_tlb_op op,
+ unsigned long addr,
+ unsigned long aux,
struct x86_emulate_ctxt *ctxt)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long addr, reps = 1;
- int rc;
-
- rc = hvmemul_virtual_to_linear(
- seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
+ unsigned long reps = 1;
+ int rc = X86EMUL_OKAY;
- if ( rc == X86EMUL_EXCEPTION )
+ switch ( op )
{
- /*
- * `invlpg` takes segment bases into account, but is not subject to
- * faults from segment type/limit checks, and is specified as a NOP
- * when issued on non-canonical addresses.
- *
- * hvmemul_virtual_to_linear() raises exceptions for type/limit
- * violations, so squash them.
- */
- x86_emul_reset_event(ctxt);
- rc = X86EMUL_OKAY;
+ case x86emul_invlpg:
+ rc = hvmemul_virtual_to_linear(aux, addr, 1, &reps, hvm_access_none,
+ hvmemul_ctxt, &addr);
+
+ if ( rc == X86EMUL_EXCEPTION )
+ {
+ /*
+ * `invlpg` takes segment bases into account, but is not subject
+ * to faults from segment type/limit checks, and is specified as
+ * a NOP when issued on non-canonical addresses.
+ *
+ * hvmemul_virtual_to_linear() raises exceptions for type/limit
+ * violations, so squash them.
+ */
+ x86_emul_reset_event(ctxt);
+ rc = X86EMUL_OKAY;
+ }
+
+ if ( rc == X86EMUL_OKAY )
+ paging_invlpg(current, addr);
+ break;
+
+ case x86emul_invlpga:
+ /* TODO: Support ASIDs. */
+ if ( !aux )
+ paging_invlpg(current, addr);
+ else
+ {
+ x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ rc = X86EMUL_EXCEPTION;
+ }
+ break;
}
- if ( rc == X86EMUL_OKAY )
- paging_invlpg(current, addr);
-
return rc;
}
@@ -2408,10 +2425,10 @@ static const struct x86_emulate_ops hvm_
.read_msr = hvmemul_read_msr,
.write_msr = hvmemul_write_msr,
.cache_op = hvmemul_cache_op,
+ .tlb_op = hvmemul_tlb_op,
.cpuid = x86emul_cpuid,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
- .invlpg = hvmemul_invlpg,
.vmfunc = hvmemul_vmfunc,
};
@@ -2435,10 +2452,10 @@ static const struct x86_emulate_ops hvm_
.read_msr = hvmemul_read_msr,
.write_msr = hvmemul_write_msr_discard,
.cache_op = hvmemul_cache_op_discard,
+ .tlb_op = hvmemul_tlb_op,
.cpuid = x86emul_cpuid,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
- .invlpg = hvmemul_invlpg,
.vmfunc = hvmemul_vmfunc,
};
@@ -5590,10 +5590,9 @@ x86_emulate(
generate_exception_if(!(msr_val & EFER_SVME) ||
!in_protmode(ctxt, ops), EXC_UD);
generate_exception_if(!mode_ring0(), EXC_GP, 0);
- generate_exception_if(_regs.ecx, EXC_UD); /* TODO: Support ASIDs. */
- fail_if(ops->invlpg == NULL);
- if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)),
- ctxt)) )
+ fail_if(!ops->tlb_op);
+ if ( (rc = ops->tlb_op(x86emul_invlpga, truncate_ea(_regs.r(ax)),
+ _regs.ecx, ctxt)) != X86EMUL_OKAY )
goto done;
break;
@@ -5747,8 +5746,9 @@ x86_emulate(
case GRP7_MEM(7): /* invlpg */
ASSERT(ea.type == OP_MEM);
generate_exception_if(!mode_ring0(), EXC_GP, 0);
- fail_if(ops->invlpg == NULL);
- if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
+ fail_if(!ops->tlb_op);
+ if ( (rc = ops->tlb_op(x86emul_invlpg, ea.mem.off, ea.mem.seg,
+ ctxt)) != X86EMUL_OKAY )
goto done;
break;
@@ -185,6 +185,11 @@ enum x86emul_cache_op {
x86emul_wbnoinvd,
};
+enum x86emul_tlb_op {
+ x86emul_invlpg,
+ x86emul_invlpga,
+};
+
struct x86_emulate_state;
/*
@@ -472,6 +477,19 @@ struct x86_emulate_ops
unsigned long offset,
struct x86_emulate_ctxt *ctxt);
+ /*
+ * tlb_op: Invalidate paging structures which map addressed byte.
+ *
+ * @addr and @aux have @op-specific meaning:
+ * - INVLPG: @aux:@addr represent seg:offset
+ * - INVLPGA: @addr is the linear address, @aux the ASID
+ */
+ int (*tlb_op)(
+ enum x86emul_tlb_op op,
+ unsigned long addr,
+ unsigned long aux,
+ struct x86_emulate_ctxt *ctxt);
+
/* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
int (*cpuid)(
uint32_t leaf,
@@ -499,12 +517,6 @@ struct x86_emulate_ops
enum x86_emulate_fpu_type backout,
const struct x86_emul_fpu_aux *aux);
- /* invlpg: Invalidate paging structures which map addressed byte. */
- int (*invlpg)(
- enum x86_segment seg,
- unsigned long offset,
- struct x86_emulate_ctxt *ctxt);
-
/* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */
int (*vmfunc)(
struct x86_emulate_ctxt *ctxt);
The hook is already in use for INVLPGA as well. Rename the hook and add parameters. For the moment INVLPGA with a non-zero ASID remains unsupported, but the TODO item gets pushed into the actual hook handler. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v2: New.