@@ -515,6 +515,7 @@ struct dept_task {
extern void dept_sched_enter(void);
extern void dept_sched_exit(void);
extern void dept_kernel_enter(void);
+extern void dept_work_enter(void);
static inline void dept_ecxt_enter_nokeep(struct dept_map *m)
{
@@ -565,6 +566,7 @@ static inline void dept_ecxt_enter_nokeep(struct dept_map *m)
#define dept_sched_enter() do { } while (0)
#define dept_sched_exit() do { } while (0)
#define dept_kernel_enter() do { } while (0)
+#define dept_work_enter() do { } while (0)
#define dept_ecxt_enter_nokeep(m) do { } while (0)
#define dept_key_init(k) do { (void)(k); } while (0)
#define dept_key_destroy(k) do { (void)(k); } while (0)
@@ -1954,6 +1954,16 @@ void dept_hardirqs_off(unsigned long ip)
}
EXPORT_SYMBOL_GPL(dept_hardirqs_off);
+/*
+ * Assign a different context id to each work.
+ */
+void dept_work_enter(void)
+{
+ struct dept_task *dt = dept_task();
+
+ dt->cxt_id[DEPT_CXT_PROCESS] += 1UL << DEPT_CXTS_NR;
+}
+
void dept_kernel_enter(void)
{
struct dept_task *dt = dept_task();
@@ -51,6 +51,7 @@
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
#include <linux/kvm_para.h>
+#include <linux/dept.h>
#include "workqueue_internal.h"
@@ -2199,6 +2200,8 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
#endif
+ dept_work_enter();
+
/* ensure we're on the correct CPU */
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
raw_smp_processor_id() != pool->cpu);
Workqueue already provides concurrency control. By that, any wait in a work doesn't prevents events in other works with the control enabled. Thus, each work would better be considered a different context. So let Dept assign a different context id to each work. Signed-off-by: Byungchul Park <max.byungchul.park@gmail.com> --- include/linux/dept.h | 2 ++ kernel/dependency/dept.c | 10 ++++++++++ kernel/workqueue.c | 3 +++ 3 files changed, 15 insertions(+)