@@ -793,6 +793,7 @@ void kvm_set_phys_mem(target_phys_addr_t start_addr,
}
}
+#endif
int kvm_ioctl(KVMState *s, int type, ...)
{
int ret;
@@ -809,7 +810,6 @@ int kvm_ioctl(KVMState *s, int type, ...)
return ret;
}
-#endif
int kvm_vm_ioctl(KVMState *s, int type, ...)
{
@@ -38,7 +38,7 @@ int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
#ifdef KVM_CAP_SET_TSS_ADDR
int r;
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
if (r > 0) {
r = kvm_vm_ioctl(kvm_state, KVM_SET_TSS_ADDR, addr);
if (r == -1) {
@@ -56,7 +56,7 @@ static int kvm_init_tss(kvm_context_t kvm)
#ifdef KVM_CAP_SET_TSS_ADDR
int r;
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
if (r > 0) {
/*
* this address is 3 pages before the bios, and the bios should present
@@ -80,7 +80,7 @@ static int kvm_create_pit(kvm_context_t kvm)
kvm->pit_in_kernel = 0;
if (!kvm->no_pit_creation) {
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
if (r > 0) {
r = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT);
if (r >= 0)
@@ -384,7 +384,7 @@ struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
int r, e;
sizer.nmsrs = 0;
- r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, &sizer);
+ r = kvm_ioctl(kvm_state, KVM_GET_MSR_INDEX_LIST, &sizer);
if (r == -1 && errno != E2BIG)
return NULL;
/* Old kernel modules had a bug and could write beyond the provided
@@ -393,7 +393,7 @@ struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
sizer.nmsrs * sizeof(*msrs->indices)));
msrs->nmsrs = sizer.nmsrs;
- r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, msrs);
+ r = kvm_ioctl(kvm_state, KVM_GET_MSR_INDEX_LIST, msrs);
if (r == -1) {
e = errno;
free(msrs);
@@ -546,7 +546,7 @@ int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
int r;
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
if (r > 0) {
r = kvm_vm_ioctl(kvm_state, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
@@ -565,7 +565,7 @@ int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
int r;
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
if (r > 0) {
*nrshadow_pages = kvm_vm_ioctl(kvm_state, KVM_GET_NR_MMU_PAGES);
@@ -584,7 +584,7 @@ static int tpr_access_reporting(kvm_vcpu_context_t vcpu, int enabled)
.enabled = enabled,
};
- r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
+ r = ioctl(kvm_state->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
if (r == -1 || r == 0)
return -ENOSYS;
r = ioctl(vcpu->fd, KVM_TPR_ACCESS_REPORTING, &tac);
@@ -618,7 +618,7 @@ static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
cpuid = qemu_malloc(size);
cpuid->nent = max;
- r = ioctl(kvm->fd, KVM_GET_SUPPORTED_CPUID, cpuid);
+ r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_CPUID, cpuid);
if (r == -1)
r = -errno;
else if (r == 0 && cpuid->nent >= max)
@@ -227,7 +227,7 @@ static int get_free_slot(kvm_context_t kvm)
int tss_ext;
#if defined(KVM_CAP_SET_TSS_ADDR) && !defined(__s390__)
- tss_ext = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+ tss_ext = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
#else
tss_ext = 0;
#endif
@@ -451,7 +451,7 @@ int kvm_init(int smp_cpus)
kvm_state = qemu_mallocz(sizeof(*kvm_state));
kvm_context = &kvm_state->kvm_context;
- kvm_context->fd = fd;
+ kvm_state->fd = fd;
kvm_state->vmfd = -1;
kvm_context->opaque = cpu_single_env;
kvm_context->dirty_pages_log_all = 0;
@@ -492,7 +492,7 @@ static void kvm_finalize(KVMState *s)
if (kvm->vm_fd != -1)
close(kvm->vm_fd);
*/
- close(s->kvm_context.fd);
+ close(s->fd);
free(s);
}
@@ -526,7 +526,7 @@ kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id)
env->kvm_fd = r;
env->kvm_state = kvm_state;
- mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
+ mmap_size = kvm_ioctl(kvm_state, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size == -1) {
fprintf(stderr, "get vcpu mmap size: %m\n");
goto err_fd;
@@ -548,7 +548,7 @@ err:
static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id)
{
#ifdef KVM_CAP_SET_BOOT_CPU_ID
- int r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID);
+ int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID);
if (r > 0)
return kvm_vm_ioctl(kvm_state, KVM_SET_BOOT_CPU_ID, id);
return -ENOSYS;
@@ -559,7 +559,7 @@ static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id)
int kvm_create_vm(kvm_context_t kvm)
{
- int fd = kvm->fd;
+ int fd = kvm_state->fd;
#ifdef KVM_CAP_IRQ_ROUTING
kvm->irq_routes = qemu_mallocz(sizeof(*kvm->irq_routes));
@@ -580,7 +580,7 @@ static int kvm_create_default_phys_mem(kvm_context_t kvm,
void **vm_mem)
{
#ifdef KVM_CAP_USER_MEMORY
- int r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
+ int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
if (r > 0)
return 0;
fprintf(stderr, "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
@@ -607,13 +607,13 @@ void kvm_create_irqchip(kvm_context_t kvm)
kvm->irqchip_in_kernel = 0;
#ifdef KVM_CAP_IRQCHIP
if (!kvm->no_irqchip_creation) {
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
if (r > 0) { /* kernel irqchip supported */
r = kvm_vm_ioctl(kvm_state, KVM_CREATE_IRQCHIP);
if (r >= 0) {
kvm->irqchip_inject_ioctl = KVM_IRQ_LINE;
#if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
KVM_CAP_IRQ_INJECT_STATUS);
if (r > 0)
kvm->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
@@ -944,7 +944,7 @@ int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state)
{
int r;
- r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
if (r > 0)
return ioctl(vcpu->fd, KVM_GET_MP_STATE, mp_state);
return -ENOSYS;
@@ -954,7 +954,7 @@ int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state)
{
int r;
- r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
if (r > 0)
return ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
return -ENOSYS;
@@ -1190,7 +1190,7 @@ int kvm_has_sync_mmu(void)
{
int r = 0;
#ifdef KVM_CAP_SYNC_MMU
- r = ioctl(kvm_context->fd, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU);
#endif
return r;
}
@@ -1209,7 +1209,7 @@ int kvm_init_coalesced_mmio(kvm_context_t kvm)
int r = 0;
kvm->coalesced_mmio = 0;
#ifdef KVM_CAP_COALESCED_MMIO
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
if (r > 0) {
kvm->coalesced_mmio = r;
return 0;
@@ -1296,7 +1296,7 @@ int kvm_assign_irq(kvm_context_t kvm,
{
int ret;
- ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
+ ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
if (ret > 0) {
ret = kvm_vm_ioctl(kvm_state, KVM_ASSIGN_DEV_IRQ, assigned_irq);
if (ret < 0)
@@ -1346,7 +1346,7 @@ int kvm_destroy_memory_region_works(kvm_context_t kvm)
int ret = 0;
#ifdef KVM_CAP_DESTROY_MEMORY_REGION_WORKS
- ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
if (ret <= 0)
ret = 0;
@@ -1362,7 +1362,7 @@ int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
control.pit_reinject = pit_reinject;
- r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
+ r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
if (r > 0) {
r = kvm_vm_ioctl(kvm_state, KVM_REINJECT_CONTROL, &control);
if (r == -1)
@@ -52,8 +52,6 @@ extern int kvm_abi;
*/
struct kvm_context {
- /// Filedescriptor to /dev/kvm
- int fd;
void *opaque;
/// is dirty pages logging enabled for all regions or not
int dirty_pages_log_all;
@@ -1179,6 +1177,7 @@ typedef struct KVMState
extern KVMState *kvm_state;
+int kvm_ioctl(KVMState *s, int type, ...);
int kvm_vm_ioctl(KVMState *s, int type, ...);
#endif
Start using kvm_ioctl's code. For type safety, delete fd from kvm_context entirely, so the compiler can play along with us helping to detect errors I might have made. Signed-off-by: Glauber Costa <glommer@redhat.com> --- kvm-all.c | 2 +- qemu-kvm-x86.c | 18 +++++++++--------- qemu-kvm.c | 32 ++++++++++++++++---------------- qemu-kvm.h | 3 +-- 4 files changed, 27 insertions(+), 28 deletions(-)