diff mbox

[8/8,v2] Change irq_lock from mutex to spinlock.

Message ID 1249993895-11119-9-git-send-email-gleb@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov Aug. 11, 2009, 12:31 p.m. UTC
Change irq_lock from mutex to spinlock. We do not sleep while holding
it.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 include/linux/kvm_host.h |    2 +-
 virt/kvm/irq_comm.c      |   28 ++++++++++++++--------------
 virt/kvm/kvm_main.c      |    2 +-
 3 files changed, 16 insertions(+), 16 deletions(-)

Comments

Avi Kivity Aug. 12, 2009, 8:29 a.m. UTC | #1
On 08/11/2009 03:31 PM, Gleb Natapov wrote:
> Change irq_lock from mutex to spinlock. We do not sleep while holding
> it.
>    

But why change?

The only motivation I can see is to allow injection from irqfd and 
interrupt contexts without requiring a tasklet/work.  But that needs 
spin_lock_irqsave(), not spin_lock().
Gleb Natapov Aug. 12, 2009, 9:11 a.m. UTC | #2
On Wed, Aug 12, 2009 at 11:29:00AM +0300, Avi Kivity wrote:
> On 08/11/2009 03:31 PM, Gleb Natapov wrote:
>> Change irq_lock from mutex to spinlock. We do not sleep while holding
>> it.
>>    
>
> But why change?
>
Isn't it more lightweight? For the remaining use of the lock it doesn't
really matters, but if I see mutex used somewhere I assume there are
users that sleeps.

> The only motivation I can see is to allow injection from irqfd and  
> interrupt contexts without requiring a tasklet/work.  But that needs  
> spin_lock_irqsave(), not spin_lock().
>
After this series the lock is used only to protect modification of irq
table, add/delete of ack notifiers and irq source id allocator. None of
this affects injection from irqfd.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity Aug. 12, 2009, 9:22 a.m. UTC | #3
On 08/12/2009 12:11 PM, Gleb Natapov wrote:
> On Wed, Aug 12, 2009 at 11:29:00AM +0300, Avi Kivity wrote:
>    
>> On 08/11/2009 03:31 PM, Gleb Natapov wrote:
>>      
>>> Change irq_lock from mutex to spinlock. We do not sleep while holding
>>> it.
>>>
>>>        
>> But why change?
>>
>>      
> Isn't it more lightweight? For the remaining use of the lock it doesn't
> really matters, but if I see mutex used somewhere I assume there are
> users that sleeps.
>    

Before the recent change, a mutex was more expensive if there was 
contention (waiter would schedule out).  Recently the mutex code was 
changed to spin while the holder was running.

>> The only motivation I can see is to allow injection from irqfd and
>> interrupt contexts without requiring a tasklet/work.  But that needs
>> spin_lock_irqsave(), not spin_lock().
>>
>>      
> After this series the lock is used only to protect modification of irq
> table, add/delete of ack notifiers and irq source id allocator. None of
> this affects injection from irqfd.
>
>    

Then it can be definitely left as mutex.
Gleb Natapov Aug. 12, 2009, 9:47 a.m. UTC | #4
On Wed, Aug 12, 2009 at 12:22:34PM +0300, Avi Kivity wrote:
> On 08/12/2009 12:11 PM, Gleb Natapov wrote:
>> On Wed, Aug 12, 2009 at 11:29:00AM +0300, Avi Kivity wrote:
>>    
>>> On 08/11/2009 03:31 PM, Gleb Natapov wrote:
>>>      
>>>> Change irq_lock from mutex to spinlock. We do not sleep while holding
>>>> it.
>>>>
>>>>        
>>> But why change?
>>>
>>>      
>> Isn't it more lightweight? For the remaining use of the lock it doesn't
>> really matters, but if I see mutex used somewhere I assume there are
>> users that sleeps.
>>    
>
> Before the recent change, a mutex was more expensive if there was  
> contention (waiter would schedule out).  Recently the mutex code was  
> changed to spin while the holder was running.
>
>>> The only motivation I can see is to allow injection from irqfd and
>>> interrupt contexts without requiring a tasklet/work.  But that needs
>>> spin_lock_irqsave(), not spin_lock().
>>>
>>>      
>> After this series the lock is used only to protect modification of irq
>> table, add/delete of ack notifiers and irq source id allocator. None of
>> this affects injection from irqfd.
>>
>>    
>
> Then it can be definitely left as mutex.
>
OK.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 56b2a12..ccc054a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -176,7 +176,7 @@  struct kvm {
 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 #endif
 
-	struct mutex irq_lock;
+	spinlock_t irq_lock;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	struct kvm_irq_routing_table *irq_routing;
 	struct hlist_head mask_notifier_list;
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 4b36917..15eab15 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -184,17 +184,17 @@  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian)
 {
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 }
 
 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 				    struct kvm_irq_ack_notifier *kian)
 {
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	hlist_del_init_rcu(&kian->link);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 	synchronize_rcu();
 }
 
@@ -203,7 +203,7 @@  int kvm_request_irq_source_id(struct kvm *kvm)
 	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
 	int irq_source_id;
 
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	irq_source_id = find_first_zero_bit(bitmap,
 				sizeof(kvm->arch.irq_sources_bitmap));
 
@@ -214,7 +214,7 @@  int kvm_request_irq_source_id(struct kvm *kvm)
 
 	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
 	set_bit(irq_source_id, bitmap);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 
 	return irq_source_id;
 }
@@ -225,7 +225,7 @@  void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
 
 	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
 
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	if (irq_source_id < 0 ||
 	    irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
 		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
@@ -234,24 +234,24 @@  void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
 	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
 		clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
 	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 }
 
 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 				    struct kvm_irq_mask_notifier *kimn)
 {
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	kimn->irq = irq;
 	hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 }
 
 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 				      struct kvm_irq_mask_notifier *kimn)
 {
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	hlist_del_rcu(&kimn->link);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 	synchronize_rcu();
 }
 
@@ -364,10 +364,10 @@  int kvm_set_irq_routing(struct kvm *kvm,
 		++ue;
 	}
 
-	mutex_lock(&kvm->irq_lock);
+	spin_lock(&kvm->irq_lock);
 	old = kvm->irq_routing;
 	rcu_assign_pointer(kvm->irq_routing, new);
-	mutex_unlock(&kvm->irq_lock);
+	spin_unlock(&kvm->irq_lock);
 	synchronize_rcu();
 
 	new = old;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ef96c04..8b60039 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -978,7 +978,7 @@  static struct kvm *kvm_create_vm(void)
 	kvm_io_bus_init(&kvm->pio_bus);
 	kvm_eventfd_init(kvm);
 	mutex_init(&kvm->lock);
-	mutex_init(&kvm->irq_lock);
+	spin_lock_init(&kvm->irq_lock);
 	kvm_io_bus_init(&kvm->mmio_bus);
 	init_rwsem(&kvm->slots_lock);
 	atomic_set(&kvm->users_count, 1);