@@ -179,6 +179,14 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+ ptdev->dummy_doorbell_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!ptdev->dummy_doorbell_page)
+ return -ENOMEM;
+
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
+ page_address(ptdev->dummy_doorbell_page));
+ if (ret)
+ return ret;
/*
* Set the dummy page holding the latest flush to 1. This will cause the
* flush to avoided as we know it isn't necessary if the submission
@@ -343,41 +351,58 @@ const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_c
static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct panthor_device *ptdev = vma->vm_private_data;
+ struct panthor_file *pfile = vma->vm_private_data;
+ struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
unsigned long pfn;
pgprot_t pgprot;
vm_fault_t ret;
bool active;
int cookie;
+ u32 group_handle;
+ u8 doorbell_id;
if (!drm_dev_enter(&ptdev->base, &cookie))
return VM_FAULT_SIGBUS;
- mutex_lock(&ptdev->pm.mmio_lock);
- active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
-
switch (offset) {
case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
- if (active)
+ mutex_lock(&ptdev->pm.mmio_lock);
+
+ pgprot = vma->vm_page_prot;
+
+ active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
+ if (active) {
pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
- else
+ pgprot = pgprot_noncached(pgprot);
+ } else {
pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
+ }
+
+ ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
+
+ mutex_unlock(&ptdev->pm.mmio_lock);
+
+ break;
+
+ case PANTHOR_DOORBELL_OFFSET_START ... (PANTHOR_DOORBELL_OFFSET_END - 1):
+ group_handle = PANTHOR_DOORBELL_GROUP_FROM_OFFSET(offset) + 1;
+
+ doorbell_id = panthor_sched_doorbell_id(pfile, group_handle);
+ if (doorbell_id != (u8)-1)
+ pfn = PHYS_PFN(ptdev->phys_addr + CSF_DOORBELL(doorbell_id));
+ else
+ pfn = page_to_pfn(ptdev->dummy_doorbell_page);
+
+ ret = vmf_insert_pfn_prot(vma, vmf->address, pfn,
+ pgprot_device(vma->vm_page_prot));
+
break;
default:
ret = VM_FAULT_SIGBUS;
- goto out_unlock;
}
- pgprot = vma->vm_page_prot;
- if (active)
- pgprot = pgprot_noncached(pgprot);
-
- ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
-
-out_unlock:
- mutex_unlock(&ptdev->pm.mmio_lock);
drm_dev_exit(cookie);
return ret;
}
@@ -386,7 +411,7 @@ static const struct vm_operations_struct panthor_mmio_vm_ops = {
.fault = panthor_mmio_vm_fault,
};
-int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
+int panthor_device_mmap_io(struct panthor_file *pfile, struct vm_area_struct *vma)
{
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
@@ -398,12 +423,19 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
break;
+ case PANTHOR_DOORBELL_OFFSET_START ... (PANTHOR_DOORBELL_OFFSET_END - 1):
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
+ (vma->vm_flags & (VM_READ | VM_EXEC)))
+ return -EINVAL;
+
+ break;
+
default:
return -EINVAL;
}
/* Defer actual mapping to the fault handler. */
- vma->vm_private_data = ptdev;
+ vma->vm_private_data = pfile;
vma->vm_ops = &panthor_mmio_vm_ops;
vm_flags_set(vma,
VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
@@ -162,6 +162,9 @@ struct panthor_device {
*/
struct page *dummy_latest_flush;
} pm;
+
+ /** @dummy_doorbell_page: dummy doorbell page when queue is not on HW */
+ struct page *dummy_doorbell_page;
};
/**
@@ -204,7 +207,7 @@ static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
return atomic_read(&ptdev->reset.pending) != 0;
}
-int panthor_device_mmap_io(struct panthor_device *ptdev,
+int panthor_device_mmap_io(struct panthor_file *pfile,
struct vm_area_struct *vma);
int panthor_device_resume(struct device *dev);
@@ -376,6 +379,8 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
((group) << DRM_PANTHOR_MAX_PAGE_SHIFT))
#define PANTHOR_DOORBELL_OFFSET_START PANTHOR_DOORBELL_OFFSET(0)
#define PANTHOR_DOORBELL_OFFSET_END PANTHOR_DOORBELL_OFFSET(MAX_GROUPS_PER_POOL)
+#define PANTHOR_DOORBELL_GROUP_FROM_OFFSET(offset) \
+ ((offset - PANTHOR_PRIVATE_MMIO_OFFSET) >> DRM_PANTHOR_MAX_PAGE_SHIFT)
extern struct workqueue_struct *panthor_cleanup_wq;
@@ -1381,7 +1381,6 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file = filp->private_data;
struct panthor_file *pfile = file->driver_priv;
- struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
int ret, cookie;
@@ -1404,7 +1403,7 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
#endif
if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
- ret = panthor_device_mmap_io(ptdev, vma);
+ ret = panthor_device_mmap_io(pfile, vma);
else
ret = drm_gem_mmap(filp, vma);
@@ -444,7 +444,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
int ret;
mem = panthor_kernel_bo_create(ptdev, ptdev->fw->vm, SZ_8K,
- DRM_PANTHOR_BO_NO_MMAP,
+ 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -954,6 +954,10 @@ group_bind_locked(struct panthor_group *group, u32 csg_id)
for (u32 i = 0; i < group->queue_count; i++)
group->queues[i]->doorbell_id = csg_id + 1;
+ /* Unmap the dummy doorbell page (if mapped) */
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ PANTHOR_DOORBELL_OFFSET(group->handle - 1), 0, 1);
+
csg_slot->group = group;
return 0;
@@ -990,6 +994,10 @@ group_unbind_locked(struct panthor_group *group)
for (u32 i = 0; i < group->queue_count; i++)
group->queues[i]->doorbell_id = -1;
+ /* Unmap the dummy doorbell page (if mapped) */
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ PANTHOR_DOORBELL_OFFSET(group->handle - 1), 0, 1);
+
slot->group = NULL;
group_put(group);
@@ -1726,6 +1734,41 @@ void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
sched_queue_work(ptdev->scheduler, fw_events);
}
+/**
+ * panthor_sched_doorbell_id() - Get the doorbell ID for a given group.
+ * @pfile: Panthor file.
+ * @group_handle: group slot id
+ *
+ * Return: a doorbell ID if valid, -1 otherwise
+ */
+u8 panthor_sched_doorbell_id(struct panthor_file *pfile, u32 group_handle)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_scheduler *sched = pfile->ptdev->scheduler;
+ u8 doorbell_id;
+ struct panthor_group *group;
+
+ group = group_get(xa_load(&gpool->xa, group_handle));
+ if (!group)
+ return -1;
+
+ if (!group->queue_count) {
+ doorbell_id = -1;
+ goto err_put_group;
+ }
+
+ mutex_lock(&sched->lock);
+
+ /* In current implementation, all queues of same group share same doorbell page. */
+ doorbell_id = group->queues[0]->doorbell_id;
+
+ mutex_unlock(&sched->lock);
+
+err_put_group:
+ group_put(group);
+ return doorbell_id;
+}
+
static const char *fence_get_driver_name(struct dma_fence *fence)
{
return "panthor";
@@ -3057,6 +3100,7 @@ group_create_queue(struct panthor_group *group,
if (!queue)
return ERR_PTR(-ENOMEM);
+ queue->doorbell_id = -1;
queue->fence_ctx.id = dma_fence_context_alloc(1);
spin_lock_init(&queue->fence_ctx.lock);
INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
@@ -3065,7 +3109,7 @@ group_create_queue(struct panthor_group *group,
queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
args->ringbuf_size,
- DRM_PANTHOR_BO_NO_MMAP,
+ 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -47,4 +47,6 @@ void panthor_sched_resume(struct panthor_device *ptdev);
void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
+u8 panthor_sched_doorbell_id(struct panthor_file *pfile, u32 group_handle);
+
#endif