@@ -355,8 +355,10 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL,
- 0, NULL, true);
+ if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL),
+ GFP_KERNEL, 0, NULL, true)) {
+ printk(KERN_INFO "OOM killer disabled\n");
+ }
}
static DECLARE_WORK(moom_work, moom_callback);
@@ -68,7 +68,7 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
bool force_kill);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
@@ -85,21 +85,6 @@ extern void oom_killer_disable(void);
*/
extern void oom_killer_enable(void);
-/**
- * oom_killer_allowed_start - start OOM killer section
- *
- * Synchronise with oom_killer_{disable,enable} sections.
- * Returns 1 if oom_killer is allowed.
- */
-extern int oom_killer_allowed_start(void);
-
-/**
- * oom_killer_allowed_end - end OOM killer section
- *
- * previously started by oom_killer_allowed_end.
- */
-extern void oom_killer_allowed_end(void);
-
static inline bool oom_gfp_allowed(gfp_t gfp_mask)
{
return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
@@ -610,18 +610,8 @@ void oom_killer_enable(void)
up_write(&oom_sem);
}
-int oom_killer_allowed_start(void)
-{
- return down_read_trylock(&oom_sem);
-}
-
-void oom_killer_allowed_end(void)
-{
- up_read(&oom_sem);
-}
-
/**
- * out_of_memory - kill the "best" process when we run out of memory
+ * __out_of_memory - kill the "best" process when we run out of memory
* @zonelist: zonelist pointer
* @gfp_mask: memory allocation flags
* @order: amount of memory being requested as a power of 2
@@ -633,7 +623,7 @@ void oom_killer_allowed_end(void)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask, bool force_kill)
{
const nodemask_t *mpol_mask;
@@ -698,6 +688,27 @@ out:
schedule_timeout_killable(1);
}
+/** out_of_memory - tries to invoke OOM killer.
+ * @zonelist: zonelist pointer
+ * @gfp_mask: memory allocation flags
+ * @order: amount of memory being requested as a power of 2
+ * @nodemask: nodemask passed to page allocator
+ * @force_kill: true if a task must be killed, even if others are exiting
+ *
+ * invokes __out_of_memory if the OOM is not disabled by oom_killer_disable()
+ * when it returns false. Otherwise returns true.
+ */
+bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *nodemask, bool force_kill)
+{
+ if (!down_read_trylock(&oom_sem))
+ return false;
+ __out_of_memory(zonlist, gfp_mask, order, nodemask, force_kill);
+ up_read(&oom_sem);
+
+ return true;
+}
+
/*
* The pagefault handler calls here because it is out of memory, so kill a
* memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a
@@ -712,7 +723,7 @@ void pagefault_out_of_memory(void)
zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
- out_of_memory(NULL, 0, 0, NULL, false);
+ __out_of_memory(NULL, 0, 0, NULL, false);
oom_zonelist_unlock(zonelist, GFP_KERNEL);
}
}
@@ -2239,10 +2239,11 @@ static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int classzone_idx, int migratetype)
+ int classzone_idx, int migratetype, bool *oom_failed)
{
struct page *page;
+ *oom_failed = false;
/* Acquire the per-zone oom lock for each zone */
if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
schedule_timeout_uninterruptible(1);
@@ -2279,8 +2280,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out;
}
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order, nodemask, false);
-
+ if (!out_of_memory(zonelist, gfp_mask, order, nodemask, false))
+ *oom_failed = true;
out:
oom_zonelist_unlock(zonelist, gfp_mask);
return page;
@@ -2706,26 +2707,28 @@ rebalance:
*/
if (!did_some_progress) {
if (oom_gfp_allowed(gfp_mask)) {
+ bool oom_failed;
+
/* Coredumps can quickly deplete all memory reserves */
if ((current->flags & PF_DUMPCORE) &&
!(gfp_mask & __GFP_NOFAIL))
goto nopage;
- /*
- * Just make sure that we cannot race with oom_killer
- * disabling e.g. PM freezer needs to make sure that
- * no OOM happens after all tasks are frozen.
- */
- if (!oom_killer_allowed_start())
- goto nopage;
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
- classzone_idx, migratetype);
- oom_killer_allowed_end();
+ classzone_idx, migratetype,
+ &oom_failed);
if (page)
goto got_pg;
+ /*
+ * OOM killer might be disabled and then we have to
+ * fail the allocation
+ */
+ if (oom_failed)
+ goto no_page;
+
if (!(gfp_mask & __GFP_NOFAIL)) {
/*
* The oom killer is not called for high-order