@@ -238,7 +238,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
if ( !pvh_steal_ram(d, HVM_VM86_TSS_SIZE, 128, GB(4), &gaddr) )
{
if ( hvm_copy_to_guest_phys(gaddr, NULL, HVM_VM86_TSS_SIZE, v) !=
- HVMCOPY_okay )
+ HVMTRANS_okay )
printk("Unable to zero VM86 TSS area\n");
d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] =
VM86_TSS_UPDATED | ((uint64_t)HVM_VM86_TSS_SIZE << 32) | gaddr;
@@ -100,7 +100,7 @@ static int ioreq_server_read(const struct hvm_io_handler *io_handler,
uint32_t size,
uint64_t *data)
{
- if ( hvm_copy_from_guest_phys(data, addr, size) != HVMCOPY_okay )
+ if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay )
return X86EMUL_UNHANDLEABLE;
return X86EMUL_OKAY;
@@ -892,18 +892,18 @@ static int __hvmemul_read(
switch ( rc )
{
- case HVMCOPY_okay:
+ case HVMTRANS_okay:
break;
- case HVMCOPY_bad_gva_to_gfn:
+ case HVMTRANS_bad_linear_to_gfn:
x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
- case HVMCOPY_bad_gfn_to_mfn:
+ case HVMTRANS_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
default:
return X86EMUL_UNHANDLEABLE;
@@ -1011,15 +1011,15 @@ static int hvmemul_write(
switch ( rc )
{
- case HVMCOPY_okay:
+ case HVMTRANS_okay:
break;
- case HVMCOPY_bad_gva_to_gfn:
+ case HVMTRANS_bad_linear_to_gfn:
x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
- case HVMCOPY_bad_gfn_to_mfn:
+ case HVMTRANS_bad_gfn_to_mfn:
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
default:
return X86EMUL_UNHANDLEABLE;
@@ -1383,7 +1383,7 @@ static int hvmemul_rep_movs(
return rc;
}
- rc = HVMCOPY_okay;
+ rc = HVMTRANS_okay;
}
else
/*
@@ -1393,16 +1393,16 @@ static int hvmemul_rep_movs(
*/
rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
- if ( rc == HVMCOPY_okay )
+ if ( rc == HVMTRANS_okay )
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current);
xfree(buf);
- if ( rc == HVMCOPY_gfn_paged_out )
+ if ( rc == HVMTRANS_gfn_paged_out )
return X86EMUL_RETRY;
- if ( rc == HVMCOPY_gfn_shared )
+ if ( rc == HVMTRANS_gfn_shared )
return X86EMUL_RETRY;
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
{
gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
@@ -1512,10 +1512,10 @@ static int hvmemul_rep_stos(
switch ( rc )
{
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
- case HVMCOPY_okay:
+ case HVMTRANS_okay:
return X86EMUL_OKAY;
}
@@ -2171,7 +2171,7 @@ void hvm_emulate_init_per_insn(
&addr) &&
hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
sizeof(hvmemul_ctxt->insn_buf),
- pfec, NULL) == HVMCOPY_okay) ?
+ pfec, NULL) == HVMTRANS_okay) ?
sizeof(hvmemul_ctxt->insn_buf) : 0;
}
else
@@ -2915,9 +2915,9 @@ void hvm_task_switch(
rc = hvm_copy_from_guest_linear(
&tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
goto out;
eflags = regs->eflags;
@@ -2955,20 +2955,20 @@ void hvm_task_switch(
offsetof(typeof(tss), trace) -
offsetof(typeof(tss), eip),
PFEC_page_present, &pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
goto out;
rc = hvm_copy_from_guest_linear(
&tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
/*
- * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
+ * Note: The HVMTRANS_gfn_shared case could be optimised, if the callee
* functions knew we want RO access.
*/
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
goto out;
new_cpl = tss.eflags & X86_EFLAGS_VM ? 3 : tss.cs & 3;
@@ -3010,12 +3010,12 @@ void hvm_task_switch(
rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
&tss.back_link, sizeof(tss.back_link), 0,
&pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
{
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
exn_raised = 1;
}
- else if ( rc != HVMCOPY_okay )
+ else if ( rc != HVMTRANS_okay )
goto out;
}
@@ -3051,12 +3051,12 @@ void hvm_task_switch(
{
rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
&pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
{
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
exn_raised = 1;
}
- else if ( rc != HVMCOPY_okay )
+ else if ( rc != HVMTRANS_okay )
goto out;
}
}
@@ -3073,7 +3073,7 @@ void hvm_task_switch(
#define HVMCOPY_to_guest (1u<<0)
#define HVMCOPY_phys (0u<<2)
#define HVMCOPY_linear (1u<<2)
-static enum hvm_copy_result __hvm_copy(
+static enum hvm_translation_result __hvm_copy(
void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
uint32_t pfec, pagefault_info_t *pfinfo)
{
@@ -3098,7 +3098,7 @@ static enum hvm_copy_result __hvm_copy(
* Hence we bail immediately if called from atomic context.
*/
if ( in_atomic() )
- return HVMCOPY_unhandleable;
+ return HVMTRANS_unhandleable;
#endif
while ( todo > 0 )
@@ -3113,15 +3113,15 @@ static enum hvm_copy_result __hvm_copy(
if ( gfn == gfn_x(INVALID_GFN) )
{
if ( pfec & PFEC_page_paged )
- return HVMCOPY_gfn_paged_out;
+ return HVMTRANS_gfn_paged_out;
if ( pfec & PFEC_page_shared )
- return HVMCOPY_gfn_shared;
+ return HVMTRANS_gfn_shared;
if ( pfinfo )
{
pfinfo->linear = addr;
pfinfo->ec = pfec & ~PFEC_implicit;
}
- return HVMCOPY_bad_gva_to_gfn;
+ return HVMTRANS_bad_linear_to_gfn;
}
gpa |= (paddr_t)gfn << PAGE_SHIFT;
}
@@ -3139,28 +3139,28 @@ static enum hvm_copy_result __hvm_copy(
if ( v == current
&& !nestedhvm_vcpu_in_guestmode(v)
&& hvm_mmio_internal(gpa) )
- return HVMCOPY_bad_gfn_to_mfn;
+ return HVMTRANS_bad_gfn_to_mfn;
page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
if ( !page )
- return HVMCOPY_bad_gfn_to_mfn;
+ return HVMTRANS_bad_gfn_to_mfn;
if ( p2m_is_paging(p2mt) )
{
put_page(page);
p2m_mem_paging_populate(v->domain, gfn);
- return HVMCOPY_gfn_paged_out;
+ return HVMTRANS_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
{
put_page(page);
- return HVMCOPY_gfn_shared;
+ return HVMTRANS_gfn_shared;
}
if ( p2m_is_grant(p2mt) )
{
put_page(page);
- return HVMCOPY_unhandleable;
+ return HVMTRANS_unhandleable;
}
p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
@@ -3198,24 +3198,24 @@ static enum hvm_copy_result __hvm_copy(
put_page(page);
}
- return HVMCOPY_okay;
+ return HVMTRANS_okay;
}
-enum hvm_copy_result hvm_copy_to_guest_phys(
+enum hvm_translation_result hvm_copy_to_guest_phys(
paddr_t paddr, void *buf, int size, struct vcpu *v)
{
return __hvm_copy(buf, paddr, size, v,
HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL);
}
-enum hvm_copy_result hvm_copy_from_guest_phys(
+enum hvm_translation_result hvm_copy_from_guest_phys(
void *buf, paddr_t paddr, int size)
{
return __hvm_copy(buf, paddr, size, current,
HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
}
-enum hvm_copy_result hvm_copy_to_guest_linear(
+enum hvm_translation_result hvm_copy_to_guest_linear(
unsigned long addr, void *buf, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
@@ -3224,7 +3224,7 @@ enum hvm_copy_result hvm_copy_to_guest_linear(
PFEC_page_present | PFEC_write_access | pfec, pfinfo);
}
-enum hvm_copy_result hvm_copy_from_guest_linear(
+enum hvm_translation_result hvm_copy_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
@@ -3233,7 +3233,7 @@ enum hvm_copy_result hvm_copy_from_guest_linear(
PFEC_page_present | pfec, pfinfo);
}
-enum hvm_copy_result hvm_fetch_from_guest_linear(
+enum hvm_translation_result hvm_fetch_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
@@ -3670,7 +3670,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
sizeof(sig), hvm_access_insn_fetch,
cs, &addr) &&
(hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
- walk, NULL) == HVMCOPY_okay) &&
+ walk, NULL) == HVMTRANS_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->rip += sizeof(sig);
@@ -136,14 +136,14 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
switch ( hvm_copy_to_guest_phys(p->data + step * i,
&data, p->size, current) )
{
- case HVMCOPY_okay:
+ case HVMTRANS_okay:
break;
- case HVMCOPY_bad_gfn_to_mfn:
+ case HVMTRANS_bad_gfn_to_mfn:
/* Drop the write as real hardware would. */
continue;
- case HVMCOPY_bad_gva_to_gfn:
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
+ case HVMTRANS_bad_linear_to_gfn:
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
ASSERT_UNREACHABLE();
/* fall through */
default:
@@ -164,14 +164,14 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
p->size) )
{
- case HVMCOPY_okay:
+ case HVMTRANS_okay:
break;
- case HVMCOPY_bad_gfn_to_mfn:
+ case HVMTRANS_bad_gfn_to_mfn:
data = ~0;
break;
- case HVMCOPY_bad_gva_to_gfn:
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
+ case HVMTRANS_bad_linear_to_gfn:
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
ASSERT_UNREACHABLE();
/* fall through */
default:
@@ -357,7 +357,7 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
struct vmcb_struct *host_vmcb = arch_svm->vmcb;
unsigned long *ns_msrpm_ptr;
unsigned int i;
- enum hvm_copy_result ret;
+ enum hvm_translation_result ret;
unsigned long *ns_viomap;
bool_t ioport_80 = 1, ioport_ed = 1;
@@ -365,7 +365,8 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
ret = hvm_copy_from_guest_phys(svm->ns_cached_msrpm,
ns_vmcb->_msrpm_base_pa, MSRPM_SIZE);
- if (ret != HVMCOPY_okay) {
+ if ( ret != HVMTRANS_okay )
+ {
gdprintk(XENLOG_ERR, "hvm_copy_from_guest_phys msrpm %u\n", ret);
return 1;
}
@@ -1266,7 +1266,7 @@ static void svm_emul_swint_injection(struct x86_event *event)
PFEC_implicit, &pfinfo);
if ( rc )
{
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
{
fault = TRAP_page_fault;
ec = pfinfo.ec;
@@ -914,7 +914,7 @@ int viridian_hypercall(struct cpu_user_regs *regs)
/* Get input parameters. */
if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
- sizeof(input_params)) != HVMCOPY_okay )
+ sizeof(input_params)) != HVMTRANS_okay )
break;
/*
@@ -609,7 +609,7 @@ void msix_write_completion(struct vcpu *v)
if ( desc &&
hvm_copy_from_guest_phys(&data,
v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
- sizeof(data)) == HVMCOPY_okay &&
+ sizeof(data)) == HVMTRANS_okay &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
ctrl_address = snoop_addr;
}
@@ -40,7 +40,7 @@ static void realmode_deliver_exception(
last_byte = (vector * 4) + 3;
if ( idtr->limit < last_byte ||
hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
- HVMCOPY_okay )
+ HVMTRANS_okay )
{
/* Software interrupt? */
if ( insn_len != 0 )
@@ -481,9 +481,9 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
int rc = hvm_copy_from_guest_linear(poperandS, base, size,
0, &pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
return X86EMUL_EXCEPTION;
}
decode->mem = base;
@@ -1468,7 +1468,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
}
rc = hvm_copy_from_guest_phys(&nvmcs_revid, gpa, sizeof(nvmcs_revid));
- if ( rc != HVMCOPY_okay ||
+ if ( rc != HVMTRANS_okay ||
(nvmcs_revid & ~VMX_BASIC_REVISION_MASK) ||
((nvmcs_revid ^ vmx_basic_msr) & VMX_BASIC_REVISION_MASK) )
{
@@ -1746,9 +1746,9 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
gpa = nvcpu->nv_vvmcxaddr;
rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
return X86EMUL_EXCEPTION;
vmsucceed(regs);
@@ -1835,9 +1835,9 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
+ if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
- if ( rc != HVMCOPY_okay )
+ if ( rc != HVMTRANS_okay )
return X86EMUL_EXCEPTION;
break;
case VMX_INST_MEMREG_TYPE_REG:
@@ -196,16 +196,16 @@ hvm_read(enum x86_segment seg,
switch ( rc )
{
- case HVMCOPY_okay:
+ case HVMTRANS_okay:
return X86EMUL_OKAY;
- case HVMCOPY_bad_gva_to_gfn:
+ case HVMTRANS_bad_linear_to_gfn:
x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
return X86EMUL_EXCEPTION;
- case HVMCOPY_bad_gfn_to_mfn:
- case HVMCOPY_unhandleable:
+ case HVMTRANS_bad_gfn_to_mfn:
+ case HVMTRANS_unhandleable:
return X86EMUL_UNHANDLEABLE;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
}
@@ -154,10 +154,10 @@ static elf_errorstatus elf_memcpy(struct vcpu *v, void *dst, void *src,
#ifdef CONFIG_X86
if ( is_hvm_vcpu(v) )
{
- enum hvm_copy_result rc;
+ enum hvm_translation_result rc;
rc = hvm_copy_to_guest_phys((paddr_t)dst, src, size, v);
- return rc != HVMCOPY_okay ? -1 : 0;
+ return rc != HVMTRANS_okay ? -1 : 0;
}
#endif
@@ -53,23 +53,23 @@ extern unsigned int opt_hvm_debug_level;
extern unsigned long hvm_io_bitmap[];
-enum hvm_copy_result {
- HVMCOPY_okay = 0,
- HVMCOPY_bad_gva_to_gfn,
- HVMCOPY_bad_gfn_to_mfn,
- HVMCOPY_unhandleable,
- HVMCOPY_gfn_paged_out,
- HVMCOPY_gfn_shared,
+enum hvm_translation_result {
+ HVMTRANS_okay,
+ HVMTRANS_bad_linear_to_gfn,
+ HVMTRANS_bad_gfn_to_mfn,
+ HVMTRANS_unhandleable,
+ HVMTRANS_gfn_paged_out,
+ HVMTRANS_gfn_shared,
};
/*
* Copy to/from a guest physical address.
- * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the given physical
* address range does not map entirely onto ordinary machine memory.
*/
-enum hvm_copy_result hvm_copy_to_guest_phys(
+enum hvm_translation_result hvm_copy_to_guest_phys(
paddr_t paddr, void *buf, int size, struct vcpu *v);
-enum hvm_copy_result hvm_copy_from_guest_phys(
+enum hvm_translation_result hvm_copy_from_guest_phys(
void *buf, paddr_t paddr, int size);
/*
@@ -79,13 +79,13 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
* to set them.
*
* Returns:
- * HVMCOPY_okay: Copy was entirely successful.
- * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
- * ordinary machine memory.
- * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
- * mapping to a guest physical address. The
- * pagefault_info_t structure will be filled in if
- * provided.
+ * HVMTRANS_okay: Copy was entirely successful.
+ * HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map to
+ * ordinary machine memory.
+ * HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have a
+ * valid mapping to a guest physical address.
+ * The pagefault_info_t structure will be filled
+ * in if provided.
*/
typedef struct pagefault_info
{
@@ -93,13 +93,13 @@ typedef struct pagefault_info
int ec;
} pagefault_info_t;
-enum hvm_copy_result hvm_copy_to_guest_linear(
+enum hvm_translation_result hvm_copy_to_guest_linear(
unsigned long addr, void *buf, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_copy_from_guest_linear(
+enum hvm_translation_result hvm_copy_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_fetch_from_guest_linear(
+enum hvm_translation_result hvm_fetch_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);