@@ -72,7 +72,8 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority,
struct drm_file *filp,
- struct amdgpu_ctx *ctx)
+ struct amdgpu_ctx *ctx,
+ uint32_t flags)
{
unsigned num_entities = amdgpu_ctx_total_num_entities();
unsigned i, j, k;
@@ -121,6 +122,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
+ if (flags & AMDGPU_CTX_ALLOC_FLAGS_SECURE)
+ ctx->is_secure = true;
+
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
@@ -253,7 +257,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
enum drm_sched_priority priority,
- uint32_t *id)
+ uint32_t *id, uint32_t flags)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx *ctx;
@@ -272,7 +276,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
}
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, priority, filp, ctx);
+ r = amdgpu_ctx_init(adev, priority, filp, ctx, flags);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -407,6 +411,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ if (!adev->tmz.enabled &&
+ (args->in.flags & AMDGPU_CTX_ALLOC_FLAGS_SECURE)) {
+ DRM_ERROR("Cannot allocate secure context while tmz is disabled\n");
+ return -EINVAL;
+ }
+
r = 0;
id = args->in.ctx_id;
priority = amdgpu_to_sched_priority(args->in.priority);
@@ -418,7 +428,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
+ r = amdgpu_ctx_alloc(adev, fpriv, filp, priority,
+ &id, args->in.flags);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
@@ -45,6 +45,7 @@ struct amdgpu_ctx {
struct dma_fence **fences;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
bool preamble_presented;
+ bool is_secure;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
struct mutex lock;