@@ -126,25 +126,25 @@ int amdgpu_userqueue_create_object(struct amdgpu_userq_mgr *uq_mgr,
r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
if (r) {
- DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
return r;
}
r = amdgpu_bo_reserve(userq_obj->obj, true);
if (r) {
- DRM_ERROR("Failed to reserve BO to map (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
goto free_obj;
}
r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
if (r) {
- DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
goto unresv;
}
r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
if (r) {
- DRM_ERROR("Failed to map BO for userqueue (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
goto unresv;
}
@@ -180,7 +180,7 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
if (gobj == NULL) {
- DRM_ERROR("Can't find GEM object for doorbell\n");
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
return -EINVAL;
}
@@ -190,13 +190,15 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
/* Pin the BO before generating the index, unpin in queue destroy */
r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
if (r) {
- DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+ drm_file_err(uq_mgr->file,
+ "[Usermode queues] Failed to pin doorbell object\n");
goto unref_bo;
}
r = amdgpu_bo_reserve(db_obj->obj, true);
if (r) {
- DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+ drm_file_err(uq_mgr->file,
+ "[Usermode queues] Failed to pin doorbell object\n");
goto unpin_bo;
}
@@ -218,7 +220,8 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
break;
default:
- DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
+ drm_file_err(uq_mgr->file,
+ "[Usermode queues] IP %d not support\n", db_info->queue_type);
r = -EINVAL;
goto unpin_bo;
}
@@ -304,7 +307,8 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
args->in.ip_type != AMDGPU_HW_IP_DMA &&
args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
- DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
+ drm_file_err(uq_mgr->file,
+ "Usermode queue doesn't support IP type %u\n", args->in.ip_type);
return -EINVAL;
}
@@ -330,14 +334,16 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
uq_funcs = adev->userq_funcs[args->in.ip_type];
if (!uq_funcs) {
- DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
+ drm_file_err(uq_mgr->file,
+ "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
r = -EINVAL;
goto unlock;
}
queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
r = -ENOMEM;
goto unlock;
}
@@ -354,7 +360,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userqueue_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
- DRM_ERROR("Failed to get doorbell for queue\n");
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
goto unlock;
}
@@ -363,13 +369,13 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
r = amdgpu_userq_fence_driver_alloc(adev, queue);
if (r) {
- DRM_ERROR("Failed to alloc fence driver\n");
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
goto unlock;
}
r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
if (r) {
- DRM_ERROR("Failed to create Queue\n");
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
amdgpu_userq_fence_driver_free(queue);
kfree(queue);
goto unlock;
@@ -378,7 +384,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
if (qid < 0) {
- DRM_ERROR("Failed to allocate a queue id\n");
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
@@ -398,7 +404,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = uq_funcs->map(uq_mgr, queue);
if (r) {
mutex_unlock(&adev->userq_mutex);
- DRM_ERROR("Failed to map Queue\n");
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
idr_remove(&uq_mgr->userq_idr, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
@@ -429,7 +435,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
r = amdgpu_userqueue_create(filp, args);
if (r)
- DRM_ERROR("Failed to create usermode queue\n");
+ drm_file_err(filp, "Failed to create usermode queue\n");
break;
case AMDGPU_USERQ_OP_FREE:
@@ -447,7 +453,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
r = amdgpu_userqueue_destroy(filp, args->in.queue_id);
if (r)
- DRM_ERROR("Failed to destroy usermode queue\n");
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
break;
default:
@@ -520,7 +526,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
ret = amdgpu_vm_lock_pd(vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret)) {
- DRM_ERROR("Failed to lock PD\n");
+ drm_file_err(uq_mgr->file, "Failed to lock PD\n");
goto unlock_all;
}
@@ -560,7 +566,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
bo = bo_va->base.bo;
ret = amdgpu_userqueue_validate_vm_bo(NULL, bo);
if (ret) {
- DRM_ERROR("Failed to validate BO\n");
+ drm_file_err(uq_mgr->file, "Failed to validate BO\n");
goto unlock_all;
}
@@ -591,7 +597,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
- DRM_ERROR("Failed to replace eviction fence\n");
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
unlock_all:
drm_exec_fini(&exec);
@@ -610,13 +616,13 @@ static void amdgpu_userqueue_resume_worker(struct work_struct *work)
ret = amdgpu_userqueue_validate_bos(uq_mgr);
if (ret) {
- DRM_ERROR("Failed to validate BOs to restore\n");
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
}
ret = amdgpu_userqueue_resume_all(uq_mgr);
if (ret) {
- DRM_ERROR("Failed to resume all queues\n");
+ drm_file_err(uq_mgr->file, "Failed to resume all queues\n");
goto unlock;
}