Message ID | 20190103100806.9039-12-frankja@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | 390x: Add cross hypervisor and disk boot | expand |
On 03.01.19 11:08, Janosch Frank wrote: > With smp CPUs have to compete for sclp. Let's add some locking, so > they execute sclp calls in an orderly fashion. > > Signed-off-by: Janosch Frank <frankja@linux.ibm.com> > --- > lib/s390x/sclp.c | 16 ++++++++++++++++ > 1 file changed, 16 insertions(+) > > diff --git a/lib/s390x/sclp.c b/lib/s390x/sclp.c > index 817c692..947a533 100644 > --- a/lib/s390x/sclp.c > +++ b/lib/s390x/sclp.c > @@ -15,6 +15,7 @@ > #include <asm/arch_def.h> > #include <asm/interrupt.h> > #include <asm/barrier.h> > +#include <asm/spinlock.h> > #include "sclp.h" > #include <alloc_phys.h> > > @@ -26,6 +27,7 @@ static uint64_t ram_size; > > char _sccb[PAGE_SIZE] __attribute__((__aligned__(4096))); > static volatile bool sclp_busy; > +static struct spinlock sclp_lock; > > static void mem_init(phys_addr_t mem_end) > { > @@ -48,7 +50,9 @@ static void sclp_setup_int(void) > void sclp_handle_ext(void) > { > ctl_clear_bit(0, 9); > + spin_lock(&sclp_lock); > sclp_busy = false; > + spin_unlock(&sclp_lock); > } > > void sclp_wait_busy(void) > @@ -59,7 +63,19 @@ void sclp_wait_busy(void) > > void sclp_mark_busy(void) > { > + /* > + * With multiple CPUs we might need to wait for another CPU's > + * request before grabbing the busy indication. > + */ > +retry_wait: > + sclp_wait_busy(); > + spin_lock(&sclp_lock); > + if (sclp_busy) { > + spin_unlock(&sclp_lock); > + goto retry_wait; > + } > sclp_busy = true; > + spin_unlock(&sclp_lock); > } > > static void sclp_read_scp_info(ReadInfo *ri, int length) > Reviewed-by: David Hildenbrand <david@redhat.com>
diff --git a/lib/s390x/sclp.c b/lib/s390x/sclp.c index 817c692..947a533 100644 --- a/lib/s390x/sclp.c +++ b/lib/s390x/sclp.c @@ -15,6 +15,7 @@ #include <asm/arch_def.h> #include <asm/interrupt.h> #include <asm/barrier.h> +#include <asm/spinlock.h> #include "sclp.h" #include <alloc_phys.h> @@ -26,6 +27,7 @@ static uint64_t ram_size; char _sccb[PAGE_SIZE] __attribute__((__aligned__(4096))); static volatile bool sclp_busy; +static struct spinlock sclp_lock; static void mem_init(phys_addr_t mem_end) { @@ -48,7 +50,9 @@ static void sclp_setup_int(void) void sclp_handle_ext(void) { ctl_clear_bit(0, 9); + spin_lock(&sclp_lock); sclp_busy = false; + spin_unlock(&sclp_lock); } void sclp_wait_busy(void) @@ -59,7 +63,19 @@ void sclp_wait_busy(void) void sclp_mark_busy(void) { + /* + * With multiple CPUs we might need to wait for another CPU's + * request before grabbing the busy indication. + */ +retry_wait: + sclp_wait_busy(); + spin_lock(&sclp_lock); + if (sclp_busy) { + spin_unlock(&sclp_lock); + goto retry_wait; + } sclp_busy = true; + spin_unlock(&sclp_lock); } static void sclp_read_scp_info(ReadInfo *ri, int length)
With smp CPUs have to compete for sclp. Let's add some locking, so they execute sclp calls in an orderly fashion. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> --- lib/s390x/sclp.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+)