@@ -786,7 +786,7 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
goto out_free_irq_routing;
r = kvm_set_irq_routing(kvm, entries, routing.nr,
routing.flags);
- out_free_irq_routing:
+out_free_irq_routing:
vfree(entries);
break;
}
@@ -25,9 +25,9 @@ struct kvm_coalesced_mmio_dev {
int kvm_coalesced_mmio_init(struct kvm *kvm);
void kvm_coalesced_mmio_free(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone);
+ struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone);
+ struct kvm_coalesced_mmio_zone *zone);
#else
@@ -43,7 +43,7 @@
#include "irq.h"
#if 0
-#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
+#define ioapic_debug(fmt, arg...) printk(KERN_WARNING fmt, ##arg)
#else
#define ioapic_debug(fmt, arg...)
#endif
@@ -326,7 +326,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
return -EOPNOTSUPP;
ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
- (void*)addr, len, val);
+ (void *)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */
if (len == 4 || len == 8)
@@ -50,7 +50,7 @@ struct kvm_ioapic {
};
#ifdef DEBUG
-#define ASSERT(x) \
+#define ASSERT(x) \
do { \
if (!(x)) { \
printk(KERN_EMERG "assertion failed %s: %d: %s\n", \
@@ -162,7 +162,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
irq_set[i++] = *e;
rcu_read_unlock();
- while(i--) {
+ while (i--) {
int r;
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
if (r < 0)
@@ -64,7 +64,7 @@ MODULE_LICENSE("GPL");
/*
* Ordering of locks:
*
- * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
+ * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
DEFINE_SPINLOCK(kvm_lock);
@@ -681,8 +681,8 @@ skip_lpage:
* memslot will be created.
*
* validation of sp->gfn happens in:
- * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
- * - kvm_is_visible_gfn (mmu_check_roots)
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+ * - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow(kvm);
kfree(old_memslots);
@@ -918,7 +918,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
slot = gfn_to_memslot_unaliased(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
- return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+ return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
@@ -970,7 +970,7 @@ EXPORT_SYMBOL_GPL(gfn_to_pfn);
static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+ return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
@@ -1166,7 +1166,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
int offset = offset_in_page(gpa);
int ret;
- while ((seg = next_segment(len, offset)) != 0) {
+ while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
if (ret < 0)
return ret;