@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
@@ -1371,6 +1372,12 @@ static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
}
+static void iio_buffer_free_blocks(struct iio_buffer *buffer)
+{
+ if (buffer->access->free_blocks)
+ buffer->access->free_blocks(buffer);
+}
+
static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_dev_buffer_pair *ib = filep->private_data;
@@ -1381,16 +1388,23 @@ static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
kfree(ib);
clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
+ iio_buffer_free_blocks(buffer);
iio_device_put(indio_dev);
return 0;
}
+static long iio_buffer_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+static int iio_buffer_mmap(struct file *filep, struct vm_area_struct *vma);
+
static const struct file_operations iio_buffer_chrdev_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.read = iio_buffer_read,
.poll = iio_buffer_poll,
+ .unlocked_ioctl = iio_buffer_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .mmap = iio_buffer_mmap,
.release = iio_buffer_chrdev_release,
};
@@ -1768,6 +1782,150 @@ void iio_buffer_put(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_GPL(iio_buffer_put);
+static int iio_buffer_query_block(struct iio_buffer *buffer,
+ struct iio_buffer_block __user *user_block)
+{
+ struct iio_buffer_block block;
+ int ret;
+
+ if (!buffer->access->query_block)
+ return -ENOSYS;
+
+ if (copy_from_user(&block, user_block, sizeof(block)))
+ return -EFAULT;
+
+ ret = buffer->access->query_block(buffer, &block);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(user_block, &block, sizeof(block)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int iio_buffer_dequeue_block(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer,
+ struct iio_buffer_block __user *user_block,
+ bool non_blocking)
+{
+ struct iio_buffer_block block;
+ int ret;
+
+ if (!buffer->access->dequeue_block)
+ return -ENOSYS;
+
+ do {
+ if (!iio_buffer_data_available(buffer)) {
+ if (non_blocking)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(buffer->pollq,
+ iio_buffer_data_available(buffer) ||
+ indio_dev->info == NULL);
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+ }
+
+ ret = buffer->access->dequeue_block(buffer, &block);
+ if (ret == -EAGAIN && non_blocking)
+ ret = 0;
+ } while (ret);
+
+ if (ret)
+ return ret;
+
+ if (copy_to_user(user_block, &block, sizeof(block)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int iio_buffer_enqueue_block(struct iio_buffer *buffer,
+ struct iio_buffer_block __user *user_block)
+{
+ struct iio_buffer_block block;
+
+ if (!buffer->access->enqueue_block)
+ return -ENOSYS;
+
+ if (copy_from_user(&block, user_block, sizeof(block)))
+ return -EFAULT;
+
+ return buffer->access->enqueue_block(buffer, &block);
+}
+
+static int iio_buffer_alloc_blocks(struct iio_buffer *buffer,
+ struct iio_buffer_block_alloc_req __user *user_req)
+{
+ struct iio_buffer_block_alloc_req req;
+ int ret;
+
+ if (!buffer->access->alloc_blocks)
+ return -ENOSYS;
+
+ if (copy_from_user(&req, user_req, sizeof(req)))
+ return -EFAULT;
+
+ ret = buffer->access->alloc_blocks(buffer, &req);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(user_req, &req, sizeof(req)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long iio_buffer_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ bool non_blocking = filep->f_flags & O_NONBLOCK;
+ struct iio_dev_buffer_pair *ib = filep->private_data;
+ struct iio_dev *indio_dev = ib->indio_dev;
+ struct iio_buffer *buffer = ib->buffer;
+
+ if (!buffer || !buffer->access)
+ return -ENODEV;
+
+ switch (cmd) {
+ case IIO_BUFFER_BLOCK_ALLOC_IOCTL:
+ return iio_buffer_alloc_blocks(buffer,
+ (struct iio_buffer_block_alloc_req __user *)arg);
+ case IIO_BUFFER_BLOCK_FREE_IOCTL:
+ iio_buffer_free_blocks(buffer);
+ return 0;
+ case IIO_BUFFER_BLOCK_QUERY_IOCTL:
+ return iio_buffer_query_block(buffer,
+ (struct iio_buffer_block __user *)arg);
+ case IIO_BUFFER_BLOCK_ENQUEUE_IOCTL:
+ return iio_buffer_enqueue_block(buffer,
+ (struct iio_buffer_block __user *)arg);
+ case IIO_BUFFER_BLOCK_DEQUEUE_IOCTL:
+ return iio_buffer_dequeue_block(indio_dev, buffer,
+ (struct iio_buffer_block __user *)arg, non_blocking);
+ }
+ return -EINVAL;
+}
+
+static int iio_buffer_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct iio_dev_buffer_pair *ib = filep->private_data;
+ struct iio_buffer *buffer = ib->buffer;
+
+ if (!buffer->access || !buffer->access->mmap)
+ return -ENODEV;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (!(vma->vm_flags & VM_READ))
+ return -EINVAL;
+
+ return buffer->access->mmap(buffer, vma);
+}
+
/**
* iio_device_attach_buffer - Attach a buffer to a IIO device
* @indio_dev: The device the buffer should be attached to
@@ -17,11 +17,6 @@ struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
-
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
* @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
@@ -34,6 +34,18 @@ struct iio_buffer;
* device stops sampling. Calles are balanced with @enable.
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @alloc_blocks: called from userspace via ioctl to allocate blocks
+ * that will be used via the mmap interface.
+ * @free_blocks: called from userspace via ioctl to free all blocks
+ * allocated for this buffer.
+ * @enqueue_block: called from userspace via ioctl to queue this block
+ * to this buffer. Requires a valid block id.
+ * @dequeue_block: called from userspace via ioctl to dequeue this block
+ * from this buffer. Requires a valid block id.
+ * @query_block: called from userspace via ioctl to query the attributes
+ * of this block. Requires a valid block id.
+ * @mmap: mmap hook for this buffer. Userspace mmap() calls will
+ * get routed to this.
* @modes: Supported operating modes by this buffer type
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
*
@@ -60,6 +72,17 @@ struct iio_buffer_access_funcs {
void (*release)(struct iio_buffer *buffer);
+ int (*alloc_blocks)(struct iio_buffer *buffer,
+ struct iio_buffer_block_alloc_req *req);
+ int (*free_blocks)(struct iio_buffer *buffer);
+ int (*enqueue_block)(struct iio_buffer *buffer,
+ struct iio_buffer_block *block);
+ int (*dequeue_block)(struct iio_buffer *buffer,
+ struct iio_buffer_block *block);
+ int (*query_block)(struct iio_buffer *buffer,
+ struct iio_buffer_block *block);
+ int (*mmap)(struct iio_buffer *buffer, struct vm_area_struct *vma);
+
unsigned int modes;
unsigned int flags;
};
@@ -5,6 +5,52 @@
#ifndef _UAPI_IIO_BUFFER_H_
#define _UAPI_IIO_BUFFER_H_
+#include <linux/types.h>
+
+/**
+ * struct iio_buffer_block_alloc_req - Descriptor for allocating IIO buffer blocks
+ * @type: type of block(s) to allocate (currently unused, reserved)
+ * @size: the size of a single block
+ * @count: the number of blocks to allocate
+ * @id: returned by the request, the number of blocks allocated
+ */
+struct iio_buffer_block_alloc_req {
+ __u32 type;
+ __u32 size;
+ __u32 count;
+ __u32 id;
+};
+
+/* A function will be assigned later for BIT(0) */
+#define IIO_BUFFER_BLOCK_FLAG_RESERVED (1 << 0)
+
+/**
+ * struct iio_buffer_block - Descriptor for a single IIO block
+ * @id: identifier of the block
+ * @size: size of the block
+ * @bytes_used: number of bytes used in this block by a data transfer
+ * @type: type of this block (currently unused, reserved)
+ * @flags: flags for this buffer, set when enqueuing this block
+ * @offset: data offset of this block in a larger memory segment
+ * @timestamp: timestamp for this block
+ */
+struct iio_buffer_block {
+ __u32 id;
+ __u32 size;
+ __u32 bytes_used;
+ __u32 type;
+ __u32 flags;
+ union {
+ __u32 offset;
+ } data;
+ __u64 timestamp;
+};
+
#define IIO_BUFFER_GET_FD_IOCTL _IOWR('i', 0x91, int)
+#define IIO_BUFFER_BLOCK_ALLOC_IOCTL _IOWR('i', 0x92, struct iio_buffer_block_alloc_req)
+#define IIO_BUFFER_BLOCK_FREE_IOCTL _IO('i', 0x93)
+#define IIO_BUFFER_BLOCK_QUERY_IOCTL _IOWR('i', 0x93, struct iio_buffer_block)
+#define IIO_BUFFER_BLOCK_ENQUEUE_IOCTL _IOWR('i', 0x94, struct iio_buffer_block)
+#define IIO_BUFFER_BLOCK_DEQUEUE_IOCTL _IOWR('i', 0x95, struct iio_buffer_block)
#endif /* _UAPI_IIO_BUFFER_H_ */