@@ -290,7 +290,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
- nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
+ nreg = kmalloc_nofail(sizeof(*nreg), GFP_NOIO);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
DM_RH_CLEAN : DM_RH_NOSYNC;
@@ -1967,7 +1967,7 @@ void btrfs_add_delayed_iput(struct inode *inode)
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
- delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
+ delayed = kmalloc_nofail(sizeof(*delayed), GFP_NOFS);
delayed->inode = inode;
spin_lock(&fs_info->delayed_iput_lock);
@@ -709,7 +709,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
}
trace_gfs2_log_flush(sdp, 1);
- ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
+ ai = kzalloc_nofail(sizeof(struct gfs2_ail), GFP_NOFS);
INIT_LIST_HEAD(&ai->ai_ail1_list);
INIT_LIST_HEAD(&ai->ai_ail2_list);
@@ -1440,8 +1440,8 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
rgrp_blk++;
if (!bi->bi_clone) {
- bi->bi_clone = kmalloc(bi->bi_bh->b_size,
- GFP_NOFS | __GFP_NOFAIL);
+ bi->bi_clone = kmalloc_nofail(bi->bi_bh->b_size,
+ GFP_NOFS);
memcpy(bi->bi_clone + bi->bi_offset,
bi->bi_bh->b_data + bi->bi_offset,
bi->bi_len);
@@ -1759,9 +1759,6 @@ fail:
* @block: the block
*
* Figure out what RG a block belongs to and add that RG to the list
- *
- * FIXME: Don't use NOFAIL
- *
*/
void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
@@ -1789,8 +1786,8 @@ void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
if (rlist->rl_rgrps == rlist->rl_space) {
new_space = rlist->rl_space + 10;
- tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
- GFP_NOFS | __GFP_NOFAIL);
+ tmp = kcalloc_nofail(new_space, sizeof(struct gfs2_rgrpd *),
+ GFP_NOFS);
if (rlist->rl_rgd) {
memcpy(tmp, rlist->rl_rgd,
@@ -1811,17 +1808,14 @@ void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
* @rlist: the list of resource groups
* @state: the lock state to acquire the RG lock in
* @flags: the modifier flags for the holder structures
- *
- * FIXME: Don't use NOFAIL
- *
*/
void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
{
unsigned int x;
- rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
- GFP_NOFS | __GFP_NOFAIL);
+ rlist->rl_ghs = kcalloc_nofail(rlist->rl_rgrps,
+ sizeof(struct gfs2_holder), GFP_NOFS);
for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
state, 0,
@@ -98,14 +98,9 @@ static int start_this_handle(journal_t *journal, handle_t *handle)
}
alloc_transaction:
- if (!journal->j_running_transaction) {
- new_transaction = kzalloc(sizeof(*new_transaction),
- GFP_NOFS|__GFP_NOFAIL);
- if (!new_transaction) {
- ret = -ENOMEM;
- goto out;
- }
- }
+ if (!journal->j_running_transaction)
+ new_transaction = kzalloc_nofail(sizeof(*new_transaction),
+ GFP_NOFS);
jbd_debug(3, "New handle %p going live.\n", handle);
@@ -2593,8 +2593,7 @@ static int journal_read(struct super_block *sb)
static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
{
struct reiserfs_journal_list *jl;
- jl = kzalloc(sizeof(struct reiserfs_journal_list),
- GFP_NOFS | __GFP_NOFAIL);
+ jl = kzalloc_nofail(sizeof(struct reiserfs_journal_list), GFP_NOFS);
INIT_LIST_HEAD(&jl->j_list);
INIT_LIST_HEAD(&jl->j_working_list);
INIT_LIST_HEAD(&jl->j_tail_bh_list);
@@ -334,6 +334,61 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+/**
+ * kmalloc_nofail - infinitely loop until kmalloc() succeeds.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ *
+ * NOTE: no new callers of this function should be implemented!
+ * All memory allocations should be failable whenever possible.
+ */
+static inline void *kmalloc_nofail(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ for (;;) {
+ ret = kmalloc(size, flags);
+ if (ret)
+ return ret;
+ WARN_ONCE(1, "Out of memory, no fallback implemented "
+ "(size=%lu flags=0x%x)\n",
+ size, flags);
+ }
+}
+
+/**
+ * kcalloc_nofail - infinitely loop until kcalloc() succeeds.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kcalloc).
+ *
+ * NOTE: no new callers of this function should be implemented!
+ * All memory allocations should be failable whenever possible.
+ */
+static inline void *kcalloc_nofail(size_t n, size_t size, gfp_t flags)
+{
+ void *ret;
+
+ for (;;) {
+ ret = kcalloc(n, size, flags);
+ if (ret)
+ return ret;
+ WARN_ONCE(1, "Out of memory, no fallback implemented "
+ "(n=%lu size=%lu flags=0x%x)\n",
+ n, size, flags);
+ }
+}
+
+/**
+ * kzalloc_nofail - infinitely loop until kzalloc() succeeds.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kzalloc).
+ */
+static inline void *kzalloc_nofail(size_t size, gfp_t flags)
+{
+ return kmalloc_nofail(size, flags | __GFP_ZERO);
+}
+
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */