@@ -703,6 +703,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ ctxt->ops->untag_addr(ctxt, (u64 *)&la, flags);
*linear = la;
va_bits = ctxt_virt_addr_bits(ctxt);
if (!__is_canonical_address(la, va_bits))
@@ -774,7 +775,11 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
- u32 flags = X86EMUL_F_FETCH;
+ /*
+ * LAM doesn't apply to addresses that specify the targets of jump and
+ * call instructions.
+ */
+ u32 flags = X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM;
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
@@ -903,7 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
- u32 flags = X86EMUL_F_FETCH;
+ /* LAM doesn't apply to addresses used for instruction fetches */
+ u32 flags = X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM;
/*
* We do not know exactly how many bytes will be needed, and
@@ -3448,8 +3454,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
+ unsigned max_size;
- rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+ /* LAM doesn't apply to invlpg */
+ rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1,
+ X86EMUL_F_SKIPLAM, ctxt->mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
@@ -230,6 +230,8 @@ struct x86_emulate_ops {
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+
+ void (*untag_addr)(struct x86_emulate_ctxt *ctxt, u64 *addr, u32 flags);
};
/* Type, address-of, and value of an instruction's operand. */
@@ -4981,6 +4981,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else
*ret = off;
+ vmx_untag_addr(vcpu, (u64 *)ret, 0);
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
@@ -5798,6 +5799,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+ /* LAM doesn't apply to the address in descriptor of invvpid */
if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_fail(vcpu,
@@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
if (!IS_ALIGNED(*gva, alignment)) {
fault = true;
} else if (likely(is_64_bit_mode(vcpu))) {
+ vmx_untag_addr(vcpu, (u64 *)gva, 0);
fault = is_noncanonical_address(*gva, vcpu);
} else {
*gva &= 0xffffffff;
@@ -8283,6 +8283,11 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
kvm_vm_bugged(kvm);
}
+static void emulator_untag_addr(struct x86_emulate_ctxt *ctxt, u64 *addr, u32 flags)
+{
+ static_call(kvm_x86_untag_addr)(emul_to_vcpu(ctxt), addr, flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.vm_bugged = emulator_vm_bugged,
.read_gpr = emulator_read_gpr,
@@ -8328,6 +8333,7 @@ static const struct x86_emulate_ops emulate_ops = {
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
+ .untag_addr = emulator_untag_addr,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -13354,6 +13360,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
+ /* LAM doesn't apply to the address in descriptor of invpcid */
if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0);