diff mbox

[v4,1/2] arm: read/write rank->vcpu atomically

Message ID 1486778723-25586-1-git-send-email-sstabellini@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini Feb. 11, 2017, 2:05 a.m. UTC
We don't need a lock in vgic_get_target_vcpu anymore, solving the
following lock inversion bug: the rank lock should be taken first, then
the vgic lock. However, gic_update_one_lr is called with the vgic lock
held, and it calls vgic_get_target_vcpu, which tries to obtain the rank
lock.

Coverity-ID: 1381855
Coverity-ID: 1381853

Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
---
 xen/arch/arm/vgic-v2.c |  6 +++---
 xen/arch/arm/vgic-v3.c |  6 +++---
 xen/arch/arm/vgic.c    | 27 +++++----------------------
 3 files changed, 11 insertions(+), 28 deletions(-)

Comments

Julien Grall Feb. 16, 2017, 6:55 p.m. UTC | #1
Hi Stefano,

On 11/02/17 02:05, Stefano Stabellini wrote:
> We don't need a lock in vgic_get_target_vcpu anymore, solving the
> following lock inversion bug: the rank lock should be taken first, then
> the vgic lock. However, gic_update_one_lr is called with the vgic lock
> held, and it calls vgic_get_target_vcpu, which tries to obtain the rank
> lock.
>
> Coverity-ID: 1381855
> Coverity-ID: 1381853
>
> Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
>  xen/arch/arm/vgic-v2.c |  6 +++---
>  xen/arch/arm/vgic-v3.c |  6 +++---
>  xen/arch/arm/vgic.c    | 27 +++++----------------------
>  3 files changed, 11 insertions(+), 28 deletions(-)
>
> diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
> index 3dbcfe8..b30379e 100644
> --- a/xen/arch/arm/vgic-v2.c
> +++ b/xen/arch/arm/vgic-v2.c
> @@ -79,7 +79,7 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank,
>      offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);
>
>      for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
> -        reg |= (1 << rank->vcpu[offset]) << (i * NR_BITS_PER_TARGET);
> +        reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * NR_BITS_PER_TARGET);

I was about to suggested to turn vcpu into an atomic_t to catch 
potential misuse. But unfortunately atomic_t is int.

So I would probably add a comment on top of the field vcpu in 
vgic_irq_rank explaining that vcpu should be read using atomic.

With that:

Reviewed-by: Julien Grall <julien.grall@arm.com>

Cheers,
Stefano Stabellini Feb. 16, 2017, 7:59 p.m. UTC | #2
On Thu, 16 Feb 2017, Julien Grall wrote:
> Hi Stefano,
> 
> On 11/02/17 02:05, Stefano Stabellini wrote:
> > We don't need a lock in vgic_get_target_vcpu anymore, solving the
> > following lock inversion bug: the rank lock should be taken first, then
> > the vgic lock. However, gic_update_one_lr is called with the vgic lock
> > held, and it calls vgic_get_target_vcpu, which tries to obtain the rank
> > lock.
> > 
> > Coverity-ID: 1381855
> > Coverity-ID: 1381853
> > 
> > Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
> > ---
> >  xen/arch/arm/vgic-v2.c |  6 +++---
> >  xen/arch/arm/vgic-v3.c |  6 +++---
> >  xen/arch/arm/vgic.c    | 27 +++++----------------------
> >  3 files changed, 11 insertions(+), 28 deletions(-)
> > 
> > diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
> > index 3dbcfe8..b30379e 100644
> > --- a/xen/arch/arm/vgic-v2.c
> > +++ b/xen/arch/arm/vgic-v2.c
> > @@ -79,7 +79,7 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank
> > *rank,
> >      offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);
> > 
> >      for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
> > -        reg |= (1 << rank->vcpu[offset]) << (i * NR_BITS_PER_TARGET);
> > +        reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i *
> > NR_BITS_PER_TARGET);
> 
> I was about to suggested to turn vcpu into an atomic_t to catch potential
> misuse. But unfortunately atomic_t is int.

Indeed


> So I would probably add a comment on top of the field vcpu in vgic_irq_rank
> explaining that vcpu should be read using atomic.
>
> With that:
> 
> Reviewed-by: Julien Grall <julien.grall@arm.com>

Thank you, I added:

+     * Use atomic operations to read/write the vcpu fields to avoid
+     * taking the rank lock.

and committed.
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 3dbcfe8..b30379e 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -79,7 +79,7 @@  static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank,
     offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);
 
     for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
-        reg |= (1 << rank->vcpu[offset]) << (i * NR_BITS_PER_TARGET);
+        reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * NR_BITS_PER_TARGET);
 
     return reg;
 }
@@ -152,7 +152,7 @@  static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank,
         /* The vCPU ID always starts from 0 */
         new_target--;
 
-        old_target = rank->vcpu[offset];
+        old_target = read_atomic(&rank->vcpu[offset]);
 
         /* Only migrate the vIRQ if the target vCPU has changed */
         if ( new_target != old_target )
@@ -162,7 +162,7 @@  static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank,
                              virq);
         }
 
-        rank->vcpu[offset] = new_target;
+        write_atomic(&rank->vcpu[offset], new_target);
     }
 }
 
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index d61479d..7dc9b6f 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -108,7 +108,7 @@  static uint64_t vgic_fetch_irouter(struct vgic_irq_rank *rank,
     /* Get the index in the rank */
     offset &= INTERRUPT_RANK_MASK;
 
-    return vcpuid_to_vaffinity(rank->vcpu[offset]);
+    return vcpuid_to_vaffinity(read_atomic(&rank->vcpu[offset]));
 }
 
 /*
@@ -136,7 +136,7 @@  static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank,
     offset &= virq & INTERRUPT_RANK_MASK;
 
     new_vcpu = vgic_v3_irouter_to_vcpu(d, irouter);
-    old_vcpu = d->vcpu[rank->vcpu[offset]];
+    old_vcpu = d->vcpu[read_atomic(&rank->vcpu[offset])];
 
     /*
      * From the spec (see 8.9.13 in IHI 0069A), any write with an
@@ -154,7 +154,7 @@  static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank,
     if ( new_vcpu != old_vcpu )
         vgic_migrate_irq(old_vcpu, new_vcpu, virq);
 
-    rank->vcpu[offset] = new_vcpu->vcpu_id;
+    write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
 }
 
 static inline bool vgic_reg64_check_access(struct hsr_dabt dabt)
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 364d5f0..3dd9044 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -85,7 +85,7 @@  static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index,
     rank->index = index;
 
     for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
-        rank->vcpu[i] = vcpu;
+        write_atomic(&rank->vcpu[i], vcpu);
 }
 
 int domain_vgic_register(struct domain *d, int *mmio_count)
@@ -218,28 +218,11 @@  int vcpu_vgic_free(struct vcpu *v)
     return 0;
 }
 
-/* The function should be called by rank lock taken. */
-static struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
-{
-    struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
-
-    ASSERT(spin_is_locked(&rank->lock));
-
-    return v->domain->vcpu[rank->vcpu[virq & INTERRUPT_RANK_MASK]];
-}
-
-/* takes the rank lock */
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
 {
-    struct vcpu *v_target;
     struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
-    unsigned long flags;
-
-    vgic_lock_rank(v, rank, flags);
-    v_target = __vgic_get_target_vcpu(v, virq);
-    vgic_unlock_rank(v, rank, flags);
-
-    return v_target;
+    int target = read_atomic(&rank->vcpu[virq & INTERRUPT_RANK_MASK]);
+    return v->domain->vcpu[target];
 }
 
 static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
@@ -326,7 +309,7 @@  void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
 
     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
         irq = i + (32 * n);
-        v_target = __vgic_get_target_vcpu(v, irq);
+        v_target = vgic_get_target_vcpu(v, irq);
         p = irq_to_pending(v_target, irq);
         clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
         gic_remove_from_queues(v_target, irq);
@@ -368,7 +351,7 @@  void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
 
     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
         irq = i + (32 * n);
-        v_target = __vgic_get_target_vcpu(v, irq);
+        v_target = vgic_get_target_vcpu(v, irq);
         p = irq_to_pending(v_target, irq);
         set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
         spin_lock_irqsave(&v_target->arch.vgic.lock, flags);