@@ -227,6 +227,7 @@ extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
+extern int amdgpu_ring_id_schedule;
extern int amdgpu_mtype_local;
extern bool enforce_isolation;
#ifdef CONFIG_HSA_AMD
@@ -1276,6 +1276,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib = p->chunks[0].kdata;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_job *leader = p->gang_leader;
struct amdgpu_bo_list_entry *e;
@@ -1285,8 +1286,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
uint64_t seq;
int r;
- for (i = 0; i < p->gang_size; ++i)
- drm_sched_job_arm(&p->jobs[i]->base, -1);
+ for (i = 0; i < p->gang_size; ++i) {
+ if (amdgpu_ring_id_schedule)
+ drm_sched_job_arm(&p->jobs[i]->base, chunk_ib->ring);
+ else
+ drm_sched_job_arm(&p->jobs[i]->base, -1);
+ }
for (i = 0; i < p->gang_size; ++i) {
struct dma_fence *fence;
@@ -221,6 +221,7 @@ int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
+int amdgpu_ring_id_schedule = 0;
int amdgpu_vcnfw_log;
int amdgpu_sg_display = -1; /* auto */
int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
@@ -740,6 +741,13 @@ MODULE_PARM_DESC(use_xgmi_p2p,
"Enable XGMI P2P interface (0 = disable; 1 = enable (default))");
module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
+/**
+ * DOC: ring_id_schedule (int)
+ * Enables/disables ring id schedule interface (0 = disable, 1 = enable, -1 auto (default))
+ */
+MODULE_PARM_DESC(ring_id_schedule,
+ "Enable ring id schedule interface(0 = disable, 1 = enable, -1 auto (default))");
+module_param_named(ring_id_schedule, amdgpu_ring_id_schedule, int, 0644);
#ifdef CONFIG_HSA_AMD
/**