@@ -44,8 +44,9 @@ amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
if (f && !dma_fence_is_signaled(f)) {
ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
if (ret <= 0) {
- DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
- f->context, f->seqno);
+ drm_file_err(uq_mgr->file,
+ "Timed out waiting for fence: context=%llu seqno:%llu\n",
+ f->context, f->seqno);
return;
}
}
@@ -480,7 +481,8 @@ amdgpu_userqueue_resume_all(struct amdgpu_userq_mgr *uq_mgr)
}
if (ret)
- DRM_ERROR("Failed to map all the queues\n");
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+
return ret;
}
@@ -638,7 +640,8 @@ amdgpu_userqueue_suspend_all(struct amdgpu_userq_mgr *uq_mgr)
}
if (ret)
- DRM_ERROR("Couldn't unmap all the queues\n");
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+
return ret;
}
@@ -655,8 +658,10 @@ amdgpu_userqueue_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
continue;
ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
if (ret <= 0) {
- DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
- f->context, f->seqno);
+ drm_file_err(uq_mgr->file,
+ "Timed out waiting for fence: context=%llu seqno:%llu\n",
+ f->context, f->seqno);
+
return -ETIMEDOUT;
}
}
@@ -675,13 +680,13 @@ amdgpu_userqueue_suspend(struct amdgpu_userq_mgr *uq_mgr,
/* Wait for any pending userqueue fence work to finish */
ret = amdgpu_userqueue_wait_for_signal(uq_mgr);
if (ret) {
- DRM_ERROR("Not suspending userqueue, timeout waiting for work\n");
+ drm_file_err(uq_mgr->file, "Not suspending userqueue, timeout waiting\n");
return;
}
ret = amdgpu_userqueue_suspend_all(uq_mgr);
if (ret) {
- DRM_ERROR("Failed to evict userqueue\n");
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
return;
}