Message ID | Zn4BupVa65CVayqQ@slm.duckdns.org (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | [sched_ext/for-6.11,1/2] sched_ext: Implement DSQ iterator | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
On Thu, Jun 27, 2024 at 5:20 PM Tejun Heo <tj@kernel.org> wrote: > > DSQs are very opaque in the consumption path. The BPF scheduler has no way > of knowing which tasks are being considered and which is picked. This patch > adds BPF DSQ iterator. > > - Allows iterating tasks queued on a DSQ in the dispatch order or reverse > from anywhere using bpf_for_each(scx_dsq) or calling the iterator kfuncs > directly. > > - Has ordering guarantee where only tasks which were already queued when the > iteration started are visible and consumable during the iteration. > > scx_qmap is updated to implement periodic dumping of the shared DSQ. > > v2: - scx_bpf_consume_task() is separated out into a separate patch. > > - DSQ seq and iter flags don't need to be u64. Use u32. > > Signed-off-by: Tejun Heo <tj@kernel.org> > Reviewed-by: David Vernet <dvernet@meta.com> > Cc: Alexei Starovoitov <ast@kernel.org> > Cc: bpf@vger.kernel.org > --- > Hello, Alexei. > > These two patches implement inline iterator for a task queue data structure > that's used by sched_ext. The first one implements the iterator itself. It's > pretty straightforward and seems to work fine. The second one implements a > kfunc which consumes a task while iterating. This one is a bit nasty > unfortunately. I'll continue on the second patch. > > Thanks. > > include/linux/sched/ext.h | 4 > kernel/sched/ext.c | 182 ++++++++++++++++++++++++++++++- > tools/sched_ext/include/scx/common.bpf.h | 3 > tools/sched_ext/scx_qmap.bpf.c | 25 ++++ > tools/sched_ext/scx_qmap.c | 8 + > 5 files changed, 218 insertions(+), 4 deletions(-) > > --- a/include/linux/sched/ext.h > +++ b/include/linux/sched/ext.h > @@ -61,6 +61,7 @@ struct scx_dispatch_q { > struct list_head list; /* tasks in dispatch order */ > struct rb_root priq; /* used to order by p->scx.dsq_vtime */ > u32 nr; > + u32 seq; /* used by BPF iter */ > u64 id; > struct rhash_head hash_node; > struct llist_node free_node; > @@ -94,6 +95,8 @@ enum scx_task_state { > /* scx_entity.dsq_flags */ > enum scx_ent_dsq_flags { > SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */ > + > + SCX_TASK_DSQ_CURSOR = 1 << 31, /* iteration cursor, not a task */ > }; > > /* > @@ -134,6 +137,7 @@ struct scx_dsq_node { > struct sched_ext_entity { > struct scx_dispatch_q *dsq; > struct scx_dsq_node dsq_node; /* protected by dsq lock */ > + u32 dsq_seq; > u32 flags; /* protected by rq lock */ > u32 weight; > s32 sticky_cpu; > --- a/kernel/sched/ext.c > +++ b/kernel/sched/ext.c > @@ -1066,6 +1066,72 @@ static __always_inline bool scx_kf_allow > return true; > } > > +/** > + * nldsq_next_task - Iterate to the next task in a non-local DSQ > + * @dsq: user dsq being interated > + * @cur: current position, %NULL to start iteration > + * @rev: walk backwards > + * > + * Returns %NULL when iteration is finished. > + */ > +static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, > + struct task_struct *cur, bool rev) > +{ > + struct list_head *list_node; > + struct scx_dsq_node *dsq_node; > + > + lockdep_assert_held(&dsq->lock); > + > + if (cur) > + list_node = &cur->scx.dsq_node.list; > + else > + list_node = &dsq->list; > + > + /* find the next task, need to skip BPF iteration cursors */ > + do { > + if (rev) > + list_node = list_node->prev; > + else > + list_node = list_node->next; > + > + if (list_node == &dsq->list) > + return NULL; > + > + dsq_node = container_of(list_node, struct scx_dsq_node, list); > + } while (dsq_node->flags & SCX_TASK_DSQ_CURSOR); > + > + return container_of(dsq_node, struct task_struct, scx.dsq_node); > +} > + > +#define nldsq_for_each_task(p, dsq) \ > + for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ > + (p) = nldsq_next_task((dsq), (p), false)) > + > + > +/* > + * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] > + * dispatch order. BPF-visible iterator is opaque and larger to allow future > + * changes without breaking backward compatibility. Can be used with > + * bpf_for_each(). See bpf_iter_scx_dsq_*(). > + */ > +enum scx_dsq_iter_flags { > + /* iterate in the reverse dispatch order */ > + SCX_DSQ_ITER_REV = 1U << 0, > + > + __SCX_DSQ_ITER_ALL_FLAGS = SCX_DSQ_ITER_REV, > +}; > + > +struct bpf_iter_scx_dsq_kern { > + struct scx_dsq_node cursor; > + struct scx_dispatch_q *dsq; > + u32 dsq_seq; > + u32 flags; > +} __attribute__((aligned(8))); > + > +struct bpf_iter_scx_dsq { > + u64 __opaque[12]; > +} __attribute__((aligned(8))); I think this is a bit too much to put on the prog stack. Folks are working on increasing this limit and moving the stack into "divided stack", so it won't be an issue eventually, but let's find a way to reduce it. It seems to me scx_dsq_node has a bunch of fields, but if I'm reading the code correctly this patch is only using cursor.list part of it ? Another alternative is to use bpf_mem_alloc() like we do in bpf_iter_css_task and others? > + > > /* > * SCX task iterator. > @@ -1415,7 +1481,7 @@ static void dispatch_enqueue(struct scx_ > * tested easily when adding the first task. > */ > if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && > - !list_empty(&dsq->list))) > + nldsq_next_task(dsq, NULL, false))) > scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks", > dsq->id); > > @@ -1447,6 +1513,10 @@ static void dispatch_enqueue(struct scx_ > list_add_tail(&p->scx.dsq_node.list, &dsq->list); > } > > + /* seq records the order tasks are queued, used by BPF DSQ iterator */ > + dsq->seq++; > + p->scx.dsq_seq = dsq->seq; > + > dsq_mod_nr(dsq, 1); > p->scx.dsq = dsq; > > @@ -2109,7 +2179,7 @@ retry: > > raw_spin_lock(&dsq->lock); > > - list_for_each_entry(p, &dsq->list, scx.dsq_node.list) { > + nldsq_for_each_task(p, dsq) { > struct rq *task_rq = task_rq(p); > > if (rq == task_rq) { > @@ -5697,6 +5767,111 @@ __bpf_kfunc void scx_bpf_destroy_dsq(u64 > destroy_dsq(dsq_id); > } > > +/** > + * bpf_iter_scx_dsq_new - Create a DSQ iterator > + * @it: iterator to initialize > + * @dsq_id: DSQ to iterate > + * @flags: %SCX_DSQ_ITER_* > + * > + * Initialize BPF iterator @it which can be used with bpf_for_each() to walk > + * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes > + * tasks which are already queued when this function is invoked. > + */ > +__bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, > + u64 flags) > +{ > + struct bpf_iter_scx_dsq_kern *kit = (void *)it; > + > + BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > > + sizeof(struct bpf_iter_scx_dsq)); > + BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != > + __alignof__(struct bpf_iter_scx_dsq)); > + > + if (flags & ~__SCX_DSQ_ITER_ALL_FLAGS) > + return -EINVAL; > + > + kit->dsq = find_non_local_dsq(dsq_id); > + if (!kit->dsq) > + return -ENOENT; > + > + INIT_LIST_HEAD(&kit->cursor.list); > + RB_CLEAR_NODE(&kit->cursor.priq); > + kit->cursor.flags = SCX_TASK_DSQ_CURSOR; Are these two assignments really necessary? Something inside nldsq_next_task() is using that? > + kit->dsq_seq = READ_ONCE(kit->dsq->seq); > + kit->flags = flags; > + > + return 0; > +}
Hello, On Thu, Jun 27, 2024 at 06:11:48PM -0700, Alexei Starovoitov wrote: > > +struct bpf_iter_scx_dsq_kern { > > + struct scx_dsq_node cursor; > > + struct scx_dispatch_q *dsq; > > + u32 dsq_seq; > > + u32 flags; > > +} __attribute__((aligned(8))); > > + > > +struct bpf_iter_scx_dsq { > > + u64 __opaque[12]; > > +} __attribute__((aligned(8))); > > I think this is a bit too much to put on the prog stack. > Folks are working on increasing this limit and moving > the stack into "divided stack", so it won't be an issue eventually, > but let's find a way to reduce it. Yeah, it is kinda big. Do you have some idea on where the boundary between okay and too big would fall on? > It seems to me scx_dsq_node has a bunch of fields, > but if I'm reading the code correctly this patch is > only using cursor.list part of it ? Great point. Cursors used to have to go on the rbtrees too but that's no longer the case, so I should be able to drop the rbnode which should help reducing the size substantially. I'll see what I can do. > Another alternative is to use bpf_mem_alloc() like we do > in bpf_iter_css_task and others? This might be okay but given that this can be used pretty frequently (e.g. every scheduling event) and it *seems* possible to reduce its size substantially, I'd like to keep it on stack if possible. > > +__bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, > > + u64 flags) > > +{ > > + struct bpf_iter_scx_dsq_kern *kit = (void *)it; > > + > > + BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > > > + sizeof(struct bpf_iter_scx_dsq)); > > + BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != > > + __alignof__(struct bpf_iter_scx_dsq)); > > + > > + if (flags & ~__SCX_DSQ_ITER_ALL_FLAGS) > > + return -EINVAL; > > + > > + kit->dsq = find_non_local_dsq(dsq_id); > > + if (!kit->dsq) > > + return -ENOENT; > > + > > + INIT_LIST_HEAD(&kit->cursor.list); > > + RB_CLEAR_NODE(&kit->cursor.priq); > > + kit->cursor.flags = SCX_TASK_DSQ_CURSOR; > > Are these two assignments really necessary? > Something inside nldsq_next_task() is using that? > > > + kit->dsq_seq = READ_ONCE(kit->dsq->seq); > > + kit->flags = flags; I'm a bit confused whether you're referring to the statements above or below, but AFAICS, they're all used except for kit->cursor.priq. - SCX_TASK_DSQ_CURSOR assignment is what tells nldsq_next_task() that the node is a cursor, not a real task, and thus should be skipped for internal iterations. - kit->dsq_seq is used by bpf_iter_scx_dsq_next() to ignore tasks that are queued after the iteration has started. This, among other things, guarantees that p->scx.dsq_vtime increases monotonically throughout iteration. - kit->flags carries SCX_DSQ_ITER_REV which tells bpf_iter_scx_dsq_next() the direction of the iteration. Thanks.
--- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -61,6 +61,7 @@ struct scx_dispatch_q { struct list_head list; /* tasks in dispatch order */ struct rb_root priq; /* used to order by p->scx.dsq_vtime */ u32 nr; + u32 seq; /* used by BPF iter */ u64 id; struct rhash_head hash_node; struct llist_node free_node; @@ -94,6 +95,8 @@ enum scx_task_state { /* scx_entity.dsq_flags */ enum scx_ent_dsq_flags { SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */ + + SCX_TASK_DSQ_CURSOR = 1 << 31, /* iteration cursor, not a task */ }; /* @@ -134,6 +137,7 @@ struct scx_dsq_node { struct sched_ext_entity { struct scx_dispatch_q *dsq; struct scx_dsq_node dsq_node; /* protected by dsq lock */ + u32 dsq_seq; u32 flags; /* protected by rq lock */ u32 weight; s32 sticky_cpu; --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1066,6 +1066,72 @@ static __always_inline bool scx_kf_allow return true; } +/** + * nldsq_next_task - Iterate to the next task in a non-local DSQ + * @dsq: user dsq being interated + * @cur: current position, %NULL to start iteration + * @rev: walk backwards + * + * Returns %NULL when iteration is finished. + */ +static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, + struct task_struct *cur, bool rev) +{ + struct list_head *list_node; + struct scx_dsq_node *dsq_node; + + lockdep_assert_held(&dsq->lock); + + if (cur) + list_node = &cur->scx.dsq_node.list; + else + list_node = &dsq->list; + + /* find the next task, need to skip BPF iteration cursors */ + do { + if (rev) + list_node = list_node->prev; + else + list_node = list_node->next; + + if (list_node == &dsq->list) + return NULL; + + dsq_node = container_of(list_node, struct scx_dsq_node, list); + } while (dsq_node->flags & SCX_TASK_DSQ_CURSOR); + + return container_of(dsq_node, struct task_struct, scx.dsq_node); +} + +#define nldsq_for_each_task(p, dsq) \ + for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ + (p) = nldsq_next_task((dsq), (p), false)) + + +/* + * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] + * dispatch order. BPF-visible iterator is opaque and larger to allow future + * changes without breaking backward compatibility. Can be used with + * bpf_for_each(). See bpf_iter_scx_dsq_*(). + */ +enum scx_dsq_iter_flags { + /* iterate in the reverse dispatch order */ + SCX_DSQ_ITER_REV = 1U << 0, + + __SCX_DSQ_ITER_ALL_FLAGS = SCX_DSQ_ITER_REV, +}; + +struct bpf_iter_scx_dsq_kern { + struct scx_dsq_node cursor; + struct scx_dispatch_q *dsq; + u32 dsq_seq; + u32 flags; +} __attribute__((aligned(8))); + +struct bpf_iter_scx_dsq { + u64 __opaque[12]; +} __attribute__((aligned(8))); + /* * SCX task iterator. @@ -1415,7 +1481,7 @@ static void dispatch_enqueue(struct scx_ * tested easily when adding the first task. */ if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && - !list_empty(&dsq->list))) + nldsq_next_task(dsq, NULL, false))) scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks", dsq->id); @@ -1447,6 +1513,10 @@ static void dispatch_enqueue(struct scx_ list_add_tail(&p->scx.dsq_node.list, &dsq->list); } + /* seq records the order tasks are queued, used by BPF DSQ iterator */ + dsq->seq++; + p->scx.dsq_seq = dsq->seq; + dsq_mod_nr(dsq, 1); p->scx.dsq = dsq; @@ -2109,7 +2179,7 @@ retry: raw_spin_lock(&dsq->lock); - list_for_each_entry(p, &dsq->list, scx.dsq_node.list) { + nldsq_for_each_task(p, dsq) { struct rq *task_rq = task_rq(p); if (rq == task_rq) { @@ -5697,6 +5767,111 @@ __bpf_kfunc void scx_bpf_destroy_dsq(u64 destroy_dsq(dsq_id); } +/** + * bpf_iter_scx_dsq_new - Create a DSQ iterator + * @it: iterator to initialize + * @dsq_id: DSQ to iterate + * @flags: %SCX_DSQ_ITER_* + * + * Initialize BPF iterator @it which can be used with bpf_for_each() to walk + * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes + * tasks which are already queued when this function is invoked. + */ +__bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, + u64 flags) +{ + struct bpf_iter_scx_dsq_kern *kit = (void *)it; + + BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > + sizeof(struct bpf_iter_scx_dsq)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != + __alignof__(struct bpf_iter_scx_dsq)); + + if (flags & ~__SCX_DSQ_ITER_ALL_FLAGS) + return -EINVAL; + + kit->dsq = find_non_local_dsq(dsq_id); + if (!kit->dsq) + return -ENOENT; + + INIT_LIST_HEAD(&kit->cursor.list); + RB_CLEAR_NODE(&kit->cursor.priq); + kit->cursor.flags = SCX_TASK_DSQ_CURSOR; + kit->dsq_seq = READ_ONCE(kit->dsq->seq); + kit->flags = flags; + + return 0; +} + +/** + * bpf_iter_scx_dsq_next - Progress a DSQ iterator + * @it: iterator to progress + * + * Return the next task. See bpf_iter_scx_dsq_new(). + */ +__bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) +{ + struct bpf_iter_scx_dsq_kern *kit = (void *)it; + bool rev = kit->flags & SCX_DSQ_ITER_REV; + struct task_struct *p; + unsigned long flags; + + if (!kit->dsq) + return NULL; + + raw_spin_lock_irqsave(&kit->dsq->lock, flags); + + if (list_empty(&kit->cursor.list)) + p = NULL; + else + p = container_of(&kit->cursor, struct task_struct, scx.dsq_node); + + /* + * Only tasks which were queued before the iteration started are + * visible. This bounds BPF iterations and guarantees that vtime never + * jumps in the other direction while iterating. + */ + do { + p = nldsq_next_task(kit->dsq, p, rev); + } while (p && unlikely((s32)(p->scx.dsq_seq - kit->dsq_seq) > 0)); + + if (p) { + if (rev) + list_move_tail(&kit->cursor.list, &p->scx.dsq_node.list); + else + list_move(&kit->cursor.list, &p->scx.dsq_node.list); + } else { + list_del_init(&kit->cursor.list); + } + + raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); + + return p; +} + +/** + * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator + * @it: iterator to destroy + * + * Undo scx_iter_scx_dsq_new(). + */ +__bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) +{ + struct bpf_iter_scx_dsq_kern *kit = (void *)it; + + if (!kit->dsq) + return; + + if (!list_empty(&kit->cursor.list)) { + unsigned long flags; + + raw_spin_lock_irqsave(&kit->dsq->lock, flags); + list_del_init(&kit->cursor.list); + raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); + } + kit->dsq = NULL; +} + __bpf_kfunc_end_defs(); static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size, @@ -6118,6 +6293,9 @@ BTF_KFUNCS_START(scx_kfunc_ids_any) BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) +BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) +BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -39,6 +39,9 @@ u32 scx_bpf_reenqueue_local(void) __ksym void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; +int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, bool rev) __ksym __weak; +struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak; +void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak; void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak; void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak; --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -36,6 +36,7 @@ const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; const volatile u32 dsp_inf_loop_after; const volatile u32 dsp_batch; +const volatile bool print_shared_dsq; const volatile s32 disallow_tgid; const volatile bool suppress_dump; @@ -604,10 +605,34 @@ out: scx_bpf_put_cpumask(online); } +/* + * Dump the currently queued tasks in the shared DSQ to demonstrate the usage of + * scx_bpf_dsq_nr_queued() and DSQ iterator. Raise the dispatch batch count to + * see meaningful dumps in the trace pipe. + */ +static void dump_shared_dsq(void) +{ + struct task_struct *p; + s32 nr; + + if (!(nr = scx_bpf_dsq_nr_queued(SHARED_DSQ))) + return; + + bpf_printk("Dumping %d tasks in SHARED_DSQ in reverse order", nr); + + bpf_rcu_read_lock(); + bpf_for_each(scx_dsq, p, SHARED_DSQ, SCX_DSQ_ITER_REV) + bpf_printk("%s[%d]", p->comm, p->pid); + bpf_rcu_read_unlock(); +} + static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer) { monitor_cpuperf(); + if (print_shared_dsq) + dump_shared_dsq(); + bpf_timer_start(timer, ONE_SEC_IN_NS, 0); return 0; } --- a/tools/sched_ext/scx_qmap.c +++ b/tools/sched_ext/scx_qmap.c @@ -20,7 +20,7 @@ const char help_fmt[] = "See the top-level comment in .bpf.c for more details.\n" "\n" "Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n" -" [-d PID] [-D LEN] [-p] [-v]\n" +" [-P] [-d PID] [-D LEN] [-p] [-v]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" @@ -28,6 +28,7 @@ const char help_fmt[] = " -T COUNT Stall every COUNT'th kernel thread\n" " -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n" " -b COUNT Dispatch upto COUNT tasks together\n" +" -P Print out DSQ content to trace_pipe every second, use with -b\n" " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" " -D LEN Set scx_exit_info.dump buffer length\n" " -S Suppress qmap-specific debug dump\n" @@ -62,7 +63,7 @@ int main(int argc, char **argv) skel = SCX_OPS_OPEN(qmap_ops, scx_qmap); - while ((opt = getopt(argc, argv, "s:e:t:T:l:b:d:D:Spvh")) != -1) { + while ((opt = getopt(argc, argv, "s:e:t:T:l:b:Pd:D:Spvh")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; @@ -82,6 +83,9 @@ int main(int argc, char **argv) case 'b': skel->rodata->dsp_batch = strtoul(optarg, NULL, 0); break; + case 'P': + skel->rodata->print_shared_dsq = true; + break; case 'd': skel->rodata->disallow_tgid = strtol(optarg, NULL, 0); if (skel->rodata->disallow_tgid < 0)