@@ -114,7 +114,7 @@ static int apu_rpmsg_callback(struct rpmsg_device *rpdev, void *data, int count,
}
static struct apu_buffer *apu_device_memory_map(struct rpmsg_apu *apu,
- uint32_t fd, struct rpmsg_request *rpmsg_req)
+ uint32_t fd)
{
struct rpmsg_device *rpdev = apu->rpdev;
struct apu_buffer *buffer;
@@ -129,10 +129,6 @@ static struct apu_buffer *apu_device_memory_map(struct rpmsg_apu *apu,
list_for_each_entry(buffer, &apu->buffers, node) {
if (buffer->fd == fd) {
kref_get(&buffer->refcount);
- if (rpmsg_req)
- list_add(&buffer->req_node,
- &rpmsg_req->buffers);
-
return buffer;
}
}
@@ -230,6 +226,44 @@ static void apu_device_memory_unmap(struct kref *ref)
kfree(buffer);
}
+static int apu_iommu_mmap_ioctl(struct rpmsg_apu *apu, void __user *argp)
+{
+ struct apu_iommu_mmap apu_iommu_mmap;
+ struct apu_buffer *buffer;
+ int ret;
+
+ if (copy_from_user(&apu_iommu_mmap, argp, sizeof(apu_iommu_mmap)))
+ return -EFAULT;
+
+ buffer = apu_device_memory_map(apu, apu_iommu_mmap.fd);
+ if (!buffer)
+ return -ENOMEM;
+
+ apu_iommu_mmap.da = buffer->iova;
+ if (copy_to_user(argp, &apu_iommu_mmap, sizeof(apu_iommu_mmap)))
+ ret = -EFAULT;
+
+ return 0;
+}
+
+static int apu_iommu_munmap_ioctl(struct rpmsg_apu *apu, void __user *argp)
+{
+ u32 fd;
+ struct apu_buffer *buffer, *tmp;
+
+ if (copy_from_user(&fd, argp, sizeof(fd)))
+ return -EFAULT;
+
+ list_for_each_entry_safe(buffer, tmp, &apu->buffers, node) {
+ if (buffer->fd == fd) {
+ kref_put(&buffer->refcount, apu_device_memory_unmap);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int apu_send_request(struct rpmsg_apu *apu,
struct apu_request *req)
{
@@ -266,7 +300,7 @@ static int apu_send_request(struct rpmsg_apu *apu,
INIT_LIST_HEAD(&rpmsg_req->buffers);
for (i = 0; i < req->count; i++) {
- buffer = apu_device_memory_map(apu, fd[i], rpmsg_req);
+ buffer = apu_device_memory_map(apu, fd[i]);
if (IS_ERR(buffer)) {
ret = PTR_ERR(buffer);
goto err_free_memory;
@@ -417,6 +451,12 @@ static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
}
spin_unlock_irqrestore(&apu->ctx_lock, flags);
+ break;
+ case APU_IOMMU_MMAP:
+ ret = apu_iommu_mmap_ioctl(apu, argp);
+ break;
+ case APU_IOMMU_MUNMAP:
+ ret = apu_iommu_munmap_ioctl(apu, argp);
break;
default:
ret = -EINVAL;
@@ -31,10 +31,17 @@ struct apu_request {
__u8 data[0];
};
+struct apu_iommu_mmap {
+ __u32 fd;
+ __u32 da;
+};
+
/* Send synchronous request to an APU */
#define APU_SEND_REQ_IOCTL _IOW(0xb7, 0x2, struct apu_request)
#define APU_GET_NEXT_AVAILABLE_IOCTL _IOR(0xb7, 0x3, __u16)
#define APU_GET_RESP _IOWR(0xb7, 0x4, struct apu_request)
+#define APU_IOMMU_MMAP _IOWR(0xb7, 0x5, struct apu_iommu_mmap)
+#define APU_IOMMU_MUNMAP _IOWR(0xb7, 0x6, __u32)
#endif
Currently, the kernel is automatically doing an IOMMU memory mapping. But we want to do it automatically for two reasons: - to reduce the overhead of each APU operation - to get the device address and use it as input for an operation This adds 2 IOCTL to manually IOMMU map and unmap memory. Signed-off-by: Alexandre Bailon <abailon@baylibre.com> --- drivers/rpmsg/apu_rpmsg.c | 52 ++++++++++++++++++++++++++++++---- include/uapi/linux/apu_rpmsg.h | 7 +++++ 2 files changed, 53 insertions(+), 6 deletions(-)