@@ -464,9 +464,11 @@ void mem_cgroup_handle_over_high(void);
unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
+
static inline void mem_cgroup_oom_enable(void)
{
WARN_ON(current->memcg_may_oom);
@@ -859,7 +861,13 @@ static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
}
static inline void
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
+ struct task_struct *p)
+{
+}
+
+static inline void
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
@@ -1118,33 +1118,39 @@ static const char *const memcg1_stat_names[] = {
};
#define K(x) ((x) << (PAGE_SHIFT-10))
+
/**
- * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
- * @memcg: The memory cgroup that went over limit
- * @p: Task that is going to be killed
+ * mem_cgroup_print_oom_context: Print OOM context information including allocation
+ * constraint, nodemask, orgin memcg that has reached its limit, kill memcg that
+ * contains the killed process, killed process's command, pid and pid.
*
- * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
- * enabled
+ * @oc: pointer to struct oom_control
+ * @p: Task that is going to be killed
*/
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
- struct mem_cgroup *iter;
- unsigned int i;
-
rcu_read_lock();
-
+ pr_cont("origin_memcg=");
+ if (memcg)
+ pr_cont_cgroup_path(memcg->css.cgroup);
if (p) {
- pr_info("Task in ");
+ pr_cont(" kill_memcg=");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
- pr_cont(" killed as a result of limit of ");
- } else {
- pr_info("Memory limit reached of cgroup ");
+ pr_cont(" task=%s pid=%5d uid=%5d\n", p->comm, p->pid,
+ from_kuid(&init_user_ns, task_uid(p)));
}
-
- pr_cont_cgroup_path(memcg->css.cgroup);
- pr_cont("\n");
-
rcu_read_unlock();
+}
+
+/**
+ * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
+ * memory controller.
+ * @memcg: The memory cgroup that went over limit
+ */
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *iter;
+ unsigned int i;
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
@@ -421,6 +421,8 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
static void dump_header(struct oom_control *oc, struct task_struct *p)
{
+ enum oom_constraint constraint = constrained_alloc(oc);
+
pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
current->comm, oc->gfp_mask, &oc->gfp_mask,
nodemask_pr_args(oc->nodemask), oc->order,
@@ -430,8 +432,26 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
cpuset_print_current_mems_allowed();
dump_stack();
+ pr_info("oom-kill: constrain=CONSTRAINT_");
+ switch (constraint) {
+ case CONSTRAINT_NONE:
+ pr_cont("NONE ");
+ break;
+ case CONSTRAINT_CPUSET:
+ pr_cont("CPUSET ");
+ break;
+ case CONSTRAINT_MEMORY_POLICY:
+ pr_cont("MEMORY_POLICY ");
+ break;
+ default:
+ pr_cont("MEMCG ");
+ break;
+ }
+ pr_cont("nodemask=%*pbl ", nodemask_pr_args(oc->nodemask));
+ mem_cgroup_print_oom_context(oc->memcg, p);
+ pr_cont("\n");
if (is_memcg_oom(oc))
- mem_cgroup_print_oom_info(oc->memcg, p);
+ mem_cgroup_print_oom_meminfo(oc->memcg);
else {
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
if (is_dump_unreclaim_slabs())