@@ -230,6 +230,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
- u16 tss_selector, int reason);
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
@@ -596,7 +596,8 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code);
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -242,6 +242,7 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
@@ -2344,8 +2344,9 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- u16 tss_selector, int reason)
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
{
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
@@ -2418,12 +2419,22 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
+ if (has_error_code) {
+ struct decode_cache *c = &ctxt->decode;
+
+ c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
+ c->lock_prefix = 0;
+ c->src.val = (unsigned long) error_code;
+ emulate_push(ctxt);
+ }
+
return ret;
}
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
- u16 tss_selector, int reason)
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
{
struct decode_cache *c = &ctxt->decode;
int rc;
@@ -2431,12 +2442,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
memset(c, 0, sizeof(struct decode_cache));
c->eip = ctxt->eip;
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
+ c->dst.type = OP_NONE;
- rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason);
+ rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
+ has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) {
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(ctxt->vcpu, c->eip);
+ rc = writeback(ctxt, ops);
}
return rc;
@@ -2277,6 +2277,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
uint32_t idt_v =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
+ bool has_error_code = false;
+ u32 error_code = 0;
tss_selector = (u16)svm->vmcb->control.exit_info_1;
@@ -2297,6 +2299,12 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = false;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
+ has_error_code = true;
+ error_code =
+ (u32)svm->vmcb->control.exit_info_2;
+ }
kvm_clear_exception_queue(&svm->vcpu);
break;
case SVM_EXITINTINFO_TYPE_INTR:
@@ -2313,7 +2321,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
(int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
skip_emulated_instruction(&svm->vcpu);
- return kvm_task_switch(&svm->vcpu, tss_selector, reason);
+ return kvm_task_switch(&svm->vcpu, tss_selector, reason,
+ has_error_code, error_code);
}
static int cpuid_interception(struct vcpu_svm *svm)
@@ -3327,6 +3327,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification;
+ bool has_error_code = false;
+ u32 error_code = 0;
u16 tss_selector;
int reason, type, idt_v;
@@ -3349,6 +3351,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
kvm_clear_interrupt_queue(vcpu);
break;
case INTR_TYPE_HARD_EXCEPTION:
+ if (vmx->idt_vectoring_info &
+ VECTORING_INFO_DELIVER_CODE_MASK) {
+ has_error_code = true;
+ error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ }
+ /* fall through */
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
@@ -3363,7 +3372,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
type != INTR_TYPE_NMI_INTR))
skip_emulated_instruction(vcpu);
- if (!kvm_task_switch(vcpu, tss_selector, reason))
+ if (!kvm_task_switch(vcpu, tss_selector, reason, has_error_code,
+ error_code))
return 0;
/* clear all local breakpoint enable flags */
@@ -4717,7 +4717,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return 0;
}
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
{
int cs_db, cs_l, ret;
cache_all_regs(vcpu);
@@ -4735,7 +4736,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
- tss_selector, reason);
+ tss_selector, reason, has_error_code,
+ error_code);
if (ret == X86EMUL_CONTINUE)
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);