@@ -524,6 +524,10 @@ int spi_register_master(struct spi_master *master)
dynamic = 1;
}
+ spin_lock_init(&master->bus_lock_spinlock);
+ mutex_init(&master->bus_lock_mutex);
+ master->bus_lock_chipselect = SPI_MASTER_BUS_UNLOCKED;
+
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
*/
@@ -692,7 +696,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
*/
-int spi_async(struct spi_device *spi, struct spi_message *message)
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
@@ -720,7 +724,6 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
message->status = -EINPROGRESS;
return master->transfer(spi, message);
}
-EXPORT_SYMBOL_GPL(spi_async);
/*-------------------------------------------------------------------------*/
@@ -756,14 +759,50 @@ static void spi_complete(void *arg)
*
* It returns zero on success, else a negative error code.
*/
+
+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int status;
+ struct spi_master *master = spi->master;
+
+ message->complete = spi_complete;
+ message->context = &done;
+
+ spin_lock(&master->bus_lock_spinlock);
+
+ status = __spi_async(spi, message);
+
+ spin_unlock(&master->bus_lock_spinlock);
+
+ if (status == 0) {
+ wait_for_completion(&done);
+ status = message->status;
+ }
+
+ message->context = NULL;
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_sync_locked);
+
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
+ struct spi_master *master = spi->master;
message->complete = spi_complete;
message->context = &done;
- status = spi_async(spi, message);
+
+ mutex_lock(&master->bus_lock_mutex);
+
+ spin_lock(&master->bus_lock_spinlock);
+ status = __spi_async(spi, message);
+ spin_unlock(&master->bus_lock_spinlock);
+
+ mutex_unlock(&master->bus_lock_mutex);
+
if (status == 0) {
wait_for_completion(&done);
status = message->status;
@@ -773,6 +812,82 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
}
EXPORT_SYMBOL_GPL(spi_sync);
+int spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ if (master->bus_lock_chipselect == SPI_MASTER_BUS_UNLOCKED
+ || master->bus_lock_chipselect == spi->chip_select) {
+ ret = __spi_async(spi, message);
+ } else {
+ /* REVISIT:
+ * if the bus is locked to an other device, message transfer
+ * fails. Maybe messages should be queued in the SPI layer
+ * and be transmitted after the lock is released.
+ */
+ ret = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_async);
+
+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ if (master->bus_lock_chipselect == spi->chip_select) {
+ ret = __spi_async(spi, message);
+ } else {
+ /* API error: the SPI bus lock is not held by the caller */
+ ret = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(spi_async_locked);
+
+int spi_bus_lock(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+
+ spin_lock(&master->bus_lock_spinlock);
+ mutex_lock(&master->bus_lock_mutex);
+ master->bus_lock_chipselect = spi->chip_select;
+ spin_unlock(&master->bus_lock_spinlock);
+
+ /* mutex remains locked until spi_bus_unlock is called */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_lock);
+
+int spi_bus_unlock(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+
+ spin_lock(&master->bus_lock_spinlock);
+ mutex_unlock(&master->bus_lock_mutex);
+ master->bus_lock_chipselect = SPI_MASTER_BUS_UNLOCKED;
+ spin_unlock(&master->bus_lock_spinlock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
/* portable code must never pass more than 32 bytes */
#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
@@ -261,6 +261,14 @@ struct spi_master {
#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
+ /* lock and mutex for SPI bus locking */
+ spinlock_t bus_lock_spinlock;
+ struct mutex bus_lock_mutex;
+
+ /* chipselect of the spi device that locked the bus for exclusive use */
+ u8 bus_lock_chipselect;
+#define SPI_MASTER_BUS_UNLOCKED 0xFF /* value to indicate unlocked */
+
/* Setup mode and clock, etc (spi driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
@@ -541,6 +549,8 @@ static inline void spi_message_free(struct spi_message *m)
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
+extern int
+spi_async_locked(struct spi_device *spi, struct spi_message *message);
/*---------------------------------------------------------------------------*/
@@ -550,6 +560,9 @@ extern int spi_async(struct spi_device *spi, struct spi_message *message);
*/
extern int spi_sync(struct spi_device *spi, struct spi_message *message);
+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
+extern int spi_bus_lock(struct spi_device *spi);
+extern int spi_bus_unlock(struct spi_device *spi);
/**
* spi_write - SPI synchronous write