@@ -2010,6 +2010,8 @@ int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
int xc_monitor_singlestep(xc_interface *xch, domid_t domain_id, bool enable);
int xc_monitor_software_breakpoint(xc_interface *xch, domid_t domain_id,
bool enable);
+int xc_monitor_descriptor_access(xc_interface *xch, domid_t domain_id,
+ bool enable);
int xc_monitor_guest_request(xc_interface *xch, domid_t domain_id,
bool enable, bool sync);
int xc_monitor_debug_exceptions(xc_interface *xch, domid_t domain_id,
@@ -129,6 +129,20 @@ int xc_monitor_singlestep(xc_interface *xch, domid_t domain_id,
return do_domctl(xch, &domctl);
}
+int xc_monitor_descriptor_access(xc_interface *xch, domid_t domain_id,
+ bool enable)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_monitor_op;
+ domctl.domain = domain_id;
+ domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+ : XEN_DOMCTL_MONITOR_OP_DISABLE;
+ domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS;
+
+ return do_domctl(xch, &domctl);
+}
+
int xc_monitor_guest_request(xc_interface *xch, domid_t domain_id, bool enable,
bool sync)
{
@@ -337,7 +337,7 @@ void usage(char* progname)
{
fprintf(stderr, "Usage: %s [-m] <domain_id> write|exec", progname);
#if defined(__i386__) || defined(__x86_64__)
- fprintf(stderr, "|breakpoint|altp2m_write|altp2m_exec|debug|cpuid");
+ fprintf(stderr, "|breakpoint|altp2m_write|altp2m_exec|debug|cpuid|desc_access");
#elif defined(__arm__) || defined(__aarch64__)
fprintf(stderr, "|privcall");
#endif
@@ -368,6 +368,7 @@ int main(int argc, char *argv[])
int altp2m = 0;
int debug = 0;
int cpuid = 0;
+ int desc_access = 0;
uint16_t altp2m_view_id = 0;
char* progname = argv[0];
@@ -434,6 +435,10 @@ int main(int argc, char *argv[])
{
cpuid = 1;
}
+ else if ( !strcmp(argv[0], "desc_access") )
+ {
+ desc_access = 1;
+ }
#elif defined(__arm__) || defined(__aarch64__)
else if ( !strcmp(argv[0], "privcall") )
{
@@ -571,6 +576,16 @@ int main(int argc, char *argv[])
}
}
+ if ( desc_access )
+ {
+ rc = xc_monitor_descriptor_access(xch, domain_id, 1);
+ if ( rc < 0 )
+ {
+ ERROR("Error %d setting descriptor access listener with vm_event\n", rc);
+ goto exit;
+ }
+ }
+
if ( privcall )
{
rc = xc_monitor_privileged_call(xch, domain_id, 1);
@@ -595,6 +610,8 @@ int main(int argc, char *argv[])
rc = xc_monitor_debug_exceptions(xch, domain_id, 0, 0);
if ( cpuid )
rc = xc_monitor_cpuid(xch, domain_id, 0);
+ if ( desc_access )
+ rc = xc_monitor_descriptor_access(xch, domain_id, 0);
if ( privcall )
rc = xc_monitor_privileged_call(xch, domain_id, 0);
@@ -779,6 +796,16 @@ int main(int argc, char *argv[])
rsp.data = req.data;
rsp.data.regs.x86.rip += req.u.cpuid.insn_length;
break;
+ case VM_EVENT_REASON_DESCRIPTOR_ACCESS:
+ printf("Descriptor access: rip=%016"PRIx64", vcpu %d: "\
+ "VMExit info=0x%"PRIx32", descriptor=%d, is write=%d\n",
+ req.data.regs.x86.rip,
+ req.vcpu_id,
+ req.u.desc_access.arch.vmx.instr_info,
+ req.u.desc_access.descriptor,
+ req.u.desc_access.is_write);
+ rsp.flags |= VM_EVENT_FLAG_EMULATE;
+ break;
default:
fprintf(stderr, "UNKNOWN REASON CODE %d\n", req.reason);
}
@@ -3589,6 +3589,41 @@ gp_fault:
return X86EMUL_EXCEPTION;
}
+int hvm_descriptor_access_intercept(uint64_t exit_info,
+ uint64_t vmx_exit_qualification,
+ unsigned int descriptor, bool is_write)
+{
+ struct vcpu *curr = current;
+ struct domain *currd = curr->domain;
+
+ if ( currd->arch.monitor.descriptor_access_enabled )
+ {
+ ASSERT(curr->arch.vm_event);
+ hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
+ descriptor, is_write);
+ }
+ else
+ {
+ struct hvm_emulate_ctxt ctxt = {};
+
+ hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
+ switch ( hvm_emulate_one(&ctxt) )
+ {
+ case X86EMUL_UNHANDLEABLE:
+ domain_crash(currd);
+ return X86EMUL_UNHANDLEABLE;
+ case X86EMUL_EXCEPTION:
+ hvm_inject_event(&ctxt.ctxt.event);
+ /* fall through */
+ default:
+ hvm_emulate_writeback(&ctxt);
+ break;
+ }
+ }
+
+ return X86EMUL_OKAY;
+}
+
static bool is_cross_vendor(const struct x86_emulate_state *state,
const struct x86_emulate_ctxt *ctxt)
{
@@ -72,6 +72,30 @@ void hvm_monitor_msr(unsigned int msr, uint64_t value)
}
}
+void hvm_monitor_descriptor_access(uint64_t exit_info,
+ uint64_t vmx_exit_qualification,
+ uint8_t descriptor, bool is_write)
+{
+ struct vcpu *curr = current;
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
+ .u.desc_access.descriptor = descriptor,
+ .u.desc_access.is_write = is_write,
+ };
+
+ if ( cpu_has_vmx )
+ {
+ req.u.desc_access.arch.vmx.instr_info = exit_info;
+ req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
+ }
+ else
+ {
+ req.u.desc_access.arch.svm.exitinfo = exit_info;
+ }
+
+ monitor_traps(curr, true, &req);
+}
+
static inline unsigned long gfn_of_rip(unsigned long rip)
{
struct vcpu *curr = current;
@@ -856,6 +856,23 @@ static void svm_set_rdtsc_exiting(struct vcpu *v, bool_t enable)
vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
+static void svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+ u32 mask = GENERAL1_INTERCEPT_IDTR_READ | GENERAL1_INTERCEPT_GDTR_READ
+ | GENERAL1_INTERCEPT_LDTR_READ | GENERAL1_INTERCEPT_TR_READ
+ | GENERAL1_INTERCEPT_IDTR_WRITE | GENERAL1_INTERCEPT_GDTR_WRITE
+ | GENERAL1_INTERCEPT_LDTR_WRITE | GENERAL1_INTERCEPT_TR_WRITE;
+
+ if ( enable )
+ general1_intercepts |= mask;
+ else
+ general1_intercepts &= ~mask;
+
+ vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+}
+
static unsigned int svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -2358,6 +2375,7 @@ static struct hvm_function_table __initdata svm_function_table = {
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
+ .set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
.get_insn_bytes = svm_get_insn_bytes,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
@@ -2777,6 +2795,30 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
svm_vmexit_do_pause(regs);
break;
+ case VMEXIT_IDTR_READ:
+ case VMEXIT_IDTR_WRITE:
+ hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0,
+ VM_EVENT_DESC_IDTR, exit_reason == VMEXIT_IDTR_WRITE);
+ break;
+
+ case VMEXIT_GDTR_READ:
+ case VMEXIT_GDTR_WRITE:
+ hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0,
+ VM_EVENT_DESC_GDTR, exit_reason == VMEXIT_GDTR_WRITE);
+ break;
+
+ case VMEXIT_LDTR_READ:
+ case VMEXIT_LDTR_WRITE:
+ hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0,
+ VM_EVENT_DESC_LDTR, exit_reason == VMEXIT_LDTR_WRITE);
+ break;
+
+ case VMEXIT_TR_READ:
+ case VMEXIT_TR_WRITE:
+ hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0,
+ VM_EVENT_DESC_TR, exit_reason == VMEXIT_TR_WRITE);
+ break;
+
default:
unexpected_exit_type:
gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = %#"PRIx64", "
@@ -1343,6 +1343,20 @@ static void vmx_set_rdtsc_exiting(struct vcpu *v, bool_t enable)
vmx_vmcs_exit(v);
}
+static void vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
+{
+ if ( enable )
+ v->arch.hvm_vmx.secondary_exec_control |=
+ SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+ else
+ v->arch.hvm_vmx.secondary_exec_control &=
+ ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+
+ vmx_vmcs_enter(v);
+ vmx_update_secondary_exec_control(v);
+ vmx_vmcs_exit(v);
+}
+
static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
{
char *p;
@@ -2230,6 +2244,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
.handle_cd = vmx_handle_cd,
.set_info_guest = vmx_set_info_guest,
.set_rdtsc_exiting = vmx_set_rdtsc_exiting,
+ .set_descriptor_access_exiting = vmx_set_descriptor_access_exiting,
.nhvm_vcpu_initialise = nvmx_vcpu_initialise,
.nhvm_vcpu_destroy = nvmx_vcpu_destroy,
.nhvm_vcpu_reset = nvmx_vcpu_reset,
@@ -3361,6 +3376,33 @@ static void vmx_handle_xrstors(void)
domain_crash(current->domain);
}
+static void vmx_handle_descriptor_access(uint32_t exit_reason)
+{
+ uint64_t instr_info;
+ uint64_t exit_qualification;
+ unsigned int desc;
+
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ __vmread(VMX_INSTRUCTION_INFO, &instr_info);
+
+ if ( exit_reason == EXIT_REASON_ACCESS_GDTR_OR_IDTR )
+ {
+ idt_or_gdt_instr_info_t info;
+ info.raw = instr_info;
+ desc = info.instr_identity ? VM_EVENT_DESC_IDTR : VM_EVENT_DESC_GDTR;
+ hvm_descriptor_access_intercept(info.raw, exit_qualification, desc,
+ info.instr_write);
+ }
+ else
+ {
+ ldt_or_tr_instr_info_t info;
+ info.raw = instr_info;
+ desc = info.instr_identity ? VM_EVENT_DESC_TR : VM_EVENT_DESC_LDTR;
+ hvm_descriptor_access_intercept(info.raw, exit_qualification, desc,
+ info.instr_write);
+ }
+}
+
static int vmx_handle_apic_write(void)
{
unsigned long exit_qualification;
@@ -3972,6 +4014,9 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
case EXIT_REASON_ACCESS_LDTR_OR_TR:
+ vmx_handle_descriptor_access(exit_reason);
+ break;
+
case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED:
case EXIT_REASON_INVPCID:
/* fall through */
@@ -211,6 +211,24 @@ int arch_monitor_domctl_event(struct domain *d,
break;
}
+ case XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS:
+ {
+ bool old_status = ad->monitor.descriptor_access_enabled;
+ struct vcpu *v;
+
+ if ( unlikely(old_status == requested_status) )
+ return -EEXIST;
+
+ domain_pause(d);
+ ad->monitor.descriptor_access_enabled = requested_status;
+
+ for_each_vcpu ( d, v )
+ hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+
+ domain_unpause(d);
+ break;
+ }
+
case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT:
{
bool_t old_status = ad->monitor.software_breakpoint_enabled;
@@ -229,6 +229,12 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
}
break;
+ case VM_EVENT_REASON_DESCRIPTOR_ACCESS:
+ if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+ v->arch.vm_event->emul.read = rsp->data.emul.read;
+ v->arch.vm_event->emulate_flags = rsp->flags;
+ break;
+
default:
break;
};
@@ -404,6 +404,7 @@ struct arch_domain
unsigned int debug_exception_enabled : 1;
unsigned int debug_exception_sync : 1;
unsigned int cpuid_enabled : 1;
+ unsigned int descriptor_access_enabled : 1;
struct monitor_msr_bitmap *msr_bitmap;
} monitor;
@@ -173,6 +173,7 @@ struct hvm_function_table {
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
+ void (*set_descriptor_access_exiting)(struct vcpu *v, bool);
/* Nested HVM */
int (*nhvm_vcpu_initialise)(struct vcpu *v);
@@ -38,6 +38,9 @@ bool_t hvm_monitor_cr(unsigned int index, unsigned long value,
#define hvm_monitor_crX(cr, new, old) \
hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
void hvm_monitor_msr(unsigned int msr, uint64_t value);
+void hvm_monitor_descriptor_access(uint64_t exit_info,
+ uint64_t vmx_exit_qualification,
+ uint8_t descriptor, bool is_write);
int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
unsigned long trap_type, unsigned long insn_length);
int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
@@ -128,6 +128,9 @@ int hvm_set_efer(uint64_t value);
int hvm_set_cr0(unsigned long value, bool_t may_defer);
int hvm_set_cr3(unsigned long value, bool_t may_defer);
int hvm_set_cr4(unsigned long value, bool_t may_defer);
+int hvm_descriptor_access_intercept(uint64_t exit_info,
+ uint64_t vmx_exit_qualification,
+ unsigned int descriptor, bool is_write);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
void hvm_ud_intercept(struct cpu_user_regs *);
@@ -628,4 +628,48 @@ typedef struct {
u16 eptp_index;
} ve_info_t;
+/* VM-Exit instruction info for LIDT, LGDT, SIDT, SGDT */
+typedef union idt_or_gdt_instr_info {
+ unsigned long raw;
+ struct {
+ unsigned long scaling :2, /* bits 0:1 - Scaling */
+ :5, /* bits 6:2 - Undefined */
+ addr_size :3, /* bits 9:7 - Address size */
+ :1, /* bit 10 - Cleared to 0 */
+ operand_size :1, /* bit 11 - Operand size */
+ :3, /* bits 14:12 - Undefined */
+ segment_reg :3, /* bits 17:15 - Segment register */
+ index_reg :4, /* bits 21:18 - Index register */
+ index_reg_invalid :1, /* bit 22 - Index register invalid */
+ base_reg :4, /* bits 26:23 - Base register */
+ base_reg_invalid :1, /* bit 27 - Base register invalid */
+ instr_identity :1, /* bit 28 - 0:GDT, 1:IDT */
+ instr_write :1, /* bit 29 - 0:store, 1:load */
+ :2, /* bits 30:31 - Undefined */
+ :32; /* bits 32:63 - Undefined */
+ };
+} idt_or_gdt_instr_info_t;
+
+/* VM-Exit instruction info for LLDT, LTR, SLDT, STR */
+typedef union ldt_or_tr_instr_info {
+ unsigned long raw;
+ struct {
+ unsigned long scaling :2, /* bits 0:1 - Scaling */
+ :1, /* bit 2 - Undefined */
+ reg1 :4, /* bits 6:3 - Reg1 */
+ addr_size :3, /* bits 9:7 - Address size */
+ mem_reg :1, /* bit 10 - Mem/Reg */
+ :4, /* bits 14:11 - Undefined */
+ segment_reg :3, /* bits 17:15 - Segment register */
+ index_reg :4, /* bits 21:18 - Index register */
+ index_reg_invalid :1, /* bit 22 - Index register invalid */
+ base_reg :4, /* bits 26:23 - Base register */
+ base_reg_invalid :1, /* bit 27 - Base register invalid */
+ instr_identity :1, /* bit 28 - 0:LDT, 1:TR */
+ instr_write :1, /* bit 29 - 0:store, 1:load */
+ :2, /* bits 31:30 - Undefined */
+ :32; /* bits 32:63 - Undefined */
+ };
+} ldt_or_tr_instr_info_t;
+
#endif /* __ASM_X86_HVM_VMX_VMX_H__ */
@@ -77,7 +77,8 @@ static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
(1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
(1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT);
+ (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS);
/* Since we know this is on VMX, we can just call the hvm func */
if ( hvm_is_singlestep_supported() )
@@ -1081,6 +1081,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
#define XEN_DOMCTL_MONITOR_EVENT_CPUID 6
#define XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
#define XEN_DOMCTL_MONITOR_EVENT_INTERRUPT 8
+#define XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS 9
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
@@ -146,6 +146,8 @@
#define VM_EVENT_REASON_PRIVILEGED_CALL 11
/* An interrupt has been delivered. */
#define VM_EVENT_REASON_INTERRUPT 12
+/* A descriptor table register was accessed. */
+#define VM_EVENT_REASON_DESCRIPTOR_ACCESS 13
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
@@ -259,6 +261,28 @@ struct vm_event_mov_to_msr {
uint64_t value;
};
+#define VM_EVENT_DESC_IDTR 1
+#define VM_EVENT_DESC_GDTR 2
+#define VM_EVENT_DESC_LDTR 3
+#define VM_EVENT_DESC_TR 4
+
+struct vm_event_desc_access {
+ union {
+ struct {
+ uint32_t instr_info; /* VMX: VMCS Instruction-Information */
+ uint32_t _pad1;
+ uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */
+ } vmx;
+ struct {
+ uint64_t exitinfo; /* SVM: VMCB EXITINFO */
+ uint64_t _pad2;
+ } svm;
+ } arch;
+ uint8_t descriptor; /* VM_EVENT_DESC_* */
+ uint8_t is_write;
+ uint8_t _pad[6];
+};
+
struct vm_event_cpuid {
uint32_t insn_length;
uint32_t leaf;
@@ -313,6 +337,7 @@ typedef struct vm_event_st {
struct vm_event_mem_access mem_access;
struct vm_event_write_ctrlreg write_ctrlreg;
struct vm_event_mov_to_msr mov_to_msr;
+ struct vm_event_desc_access desc_access;
struct vm_event_singlestep singlestep;
struct vm_event_debug software_breakpoint;
struct vm_event_debug debug_exception;