diff mbox series

[12/19] mm: memcontrol: convert anon and file-thp to new mem_cgroup_charge() API

Message ID 20200508183105.225460-13-hannes@cmpxchg.org (mailing list archive)
State New, archived
Headers show
Series mm: memcontrol: charge swapin pages on instantiation | expand

Commit Message

Johannes Weiner May 8, 2020, 6:30 p.m. UTC
With the page->mapping requirement gone from memcg, we can charge anon
and file-thp pages in one single step, right after they're allocated.

This removes two out of three API calls - especially the tricky commit
step that needed to happen at just the right time between when the
page is "set up" and when it's "published" - somewhat vague and fluid
concepts that varied by page type. All we need is a freshly allocated
page and a memcg context to charge.

v2: prevent double charges on pre-allocated hugepages in khugepaged

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/mm.h      |  4 +---
 kernel/events/uprobes.c | 11 +++--------
 mm/filemap.c            |  2 +-
 mm/huge_memory.c        |  9 +++------
 mm/khugepaged.c         | 35 ++++++++++-------------------------
 mm/memory.c             | 36 ++++++++++--------------------------
 mm/migrate.c            |  5 +----
 mm/swapfile.c           |  6 +-----
 mm/userfaultfd.c        |  5 +----
 9 files changed, 31 insertions(+), 82 deletions(-)

Comments

Qian Cai May 12, 2020, 2:38 p.m. UTC | #1
> On May 8, 2020, at 2:30 PM, Johannes Weiner <hannes@cmpxchg.org> wrote:
> 
> With the page->mapping requirement gone from memcg, we can charge anon
> and file-thp pages in one single step, right after they're allocated.
> 
> This removes two out of three API calls - especially the tricky commit
> step that needed to happen at just the right time between when the
> page is "set up" and when it's "published" - somewhat vague and fluid
> concepts that varied by page type. All we need is a freshly allocated
> page and a memcg context to charge.
> 
> v2: prevent double charges on pre-allocated hugepages in khugepaged
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> ---
> include/linux/mm.h      |  4 +---
> kernel/events/uprobes.c | 11 +++--------
> mm/filemap.c            |  2 +-
> mm/huge_memory.c        |  9 +++------
> mm/khugepaged.c         | 35 ++++++++++-------------------------
> mm/memory.c             | 36 ++++++++++--------------------------
> mm/migrate.c            |  5 +----
> mm/swapfile.c           |  6 +-----
> mm/userfaultfd.c        |  5 +----
> 9 files changed, 31 insertions(+), 82 deletions(-)
[]
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> 
> @@ -1198,10 +1193,11 @@ static void collapse_huge_page(struct mm_struct *mm,
> out_up_write:
> 	up_write(&mm->mmap_sem);
> out_nolock:
> +	if (*hpage)
> +		mem_cgroup_uncharge(*hpage);
> 	trace_mm_collapse_huge_page(mm, isolated, result);
> 	return;
> out:
> -	mem_cgroup_cancel_charge(new_page, memcg);
> 	goto out_up_write;
> }
[]

Some memory pressure will crash this new code. It looks like somewhat racy.

if (!page->mem_cgroup)

where page == NULL in mem_cgroup_uncharge().

[ 2244.414421][  T726] BUG: Kernel NULL pointer dereference on read at 0x0000002c
[ 2244.414454][  T726] Faulting instruction address: 0xc0000000004f7e44
[ 2244.414467][  T726] Oops: Kernel access of bad area, sig: 11 [#1]
[ 2244.414488][  T726] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=256 DEBUG_PAGEALLOC NUMA PowerNV
[ 2244.414501][  T726] Modules linked in: brd ext4 crc16 mbcache jbd2 loop kvm_hv kvm ip_tables x_tables xfs sd_mod bnx2x ahci tg3 libahci libphy mdio libata firmware_class dm_mirror dm_region_hash dm_log dm_mod
[ 2244.414556][  T726] CPU: 11 PID: 726 Comm: khugepaged Not tainted 5.7.0-rc5-next-20200512+ #8
[ 2244.414579][  T726] NIP:  c0000000004f7e44 LR: c0000000004df95c CTR: c0000000001c1400
[ 2244.414600][  T726] REGS: c000001a2398f6e0 TRAP: 0300   Not tainted  (5.7.0-rc5-next-20200512+)
[ 2244.414630][  T726] MSR:  9000000000009033 <SF,HV,EE,ME,IR,DR,RI,LE>  CR: 24000244  XER: 20040000
[ 2244.414656][  T726] CFAR: c0000000004df958 DAR: 000000000000002c DSISR: 40000000 IRQMASK: 0 
[ 2244.414656][  T726] GPR00: c0000000004df95c c000001a2398f970 c00000000168a700 fffffffffffffff4 
[ 2244.414656][  T726] GPR04: ffffffffffffffff c000000000bd0980 0000000000000005 0000000000000080 
[ 2244.414656][  T726] GPR08: 0000001ffc030000 0000000000000001 0000000000000000 c00000000152bb58 
[ 2244.414656][  T726] GPR12: 0000000024000222 c000001fffff5680 c0000001d818ce00 c0000001d818cd00 
[ 2244.414656][  T726] GPR16: 0000000000000000 c000001a2398fce0 fe7fffffffffefff fffffffffffffe7f 
[ 2244.414656][  T726] GPR20: c000201320aa53c8 000000000000001e 0000000000000017 c00020047636b868 
[ 2244.414656][  T726] GPR24: 0000000000000000 0000000000000000 c000000001756080 c000001a2398fce0 
[ 2244.414656][  T726] GPR28: c000001a2398fa20 00007ffeeda00000 c000200f28547928 c000200f28547880 
[ 2244.414865][  T726] NIP [c0000000004f7e44] mem_cgroup_uncharge+0x34/0xb0
mem_cgroup_uncharge at mm/memcontrol.c:6563
[ 2244.414895][  T726] LR [c0000000004df95c] collapse_huge_page+0x24c/0x1000
collapse_huge_page at mm/khugepaged.c:1197
[ 2244.414924][  T726] Call Trace:
[ 2244.414940][  T726] [c000001a2398f970] [0000000000000001] 0x1 (unreliable)
[ 2244.414970][  T726] [c000001a2398f9c0] [c0000000004df814] collapse_huge_page+0x104/0x1000
collapse_huge_page at mm/khugepaged.c:1064 (discriminator 10)
[ 2244.414991][  T726] [c000001a2398faf0] [c0000000004e0f84] khugepaged_scan_pmd+0x874/0xc70
[ 2244.415021][  T726] [c000001a2398fbf0] [c0000000004e2a90] khugepaged+0x900/0x1920
[ 2244.415043][  T726] [c000001a2398fdb0] [c000000000155aa4] kthread+0x1c4/0x1d0
[ 2244.415075][  T726] [c000001a2398fe20] [c00000000000cb28] ret_from_kernel_thread+0x5c/0x74
[ 2244.415095][  T726] Instruction dump:
[ 2244.415113][  T726] 384228f0 7c0802a6 60000000 f821ffb1 e92d0c70 f9210048 39200000 3d22ffec 
[ 2244.415146][  T726] 3929f9f4 81290000 2f890000 409d0048 <e9230038> 2fa90000 419e003c 7c0802a6 
[ 2244.415181][  T726] ---[ end trace 3488eb8818913a26 ]---
Qian Cai May 12, 2020, 5:11 p.m. UTC | #2
> On May 12, 2020, at 10:38 AM, Qian Cai <cai@lca.pw> wrote:
> 
> 
> 
>> On May 8, 2020, at 2:30 PM, Johannes Weiner <hannes@cmpxchg.org> wrote:
>> 
>> With the page->mapping requirement gone from memcg, we can charge anon
>> and file-thp pages in one single step, right after they're allocated.
>> 
>> This removes two out of three API calls - especially the tricky commit
>> step that needed to happen at just the right time between when the
>> page is "set up" and when it's "published" - somewhat vague and fluid
>> concepts that varied by page type. All we need is a freshly allocated
>> page and a memcg context to charge.
>> 
>> v2: prevent double charges on pre-allocated hugepages in khugepaged
>> 
>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
>> ---
>> include/linux/mm.h      |  4 +---
>> kernel/events/uprobes.c | 11 +++--------
>> mm/filemap.c            |  2 +-
>> mm/huge_memory.c        |  9 +++------
>> mm/khugepaged.c         | 35 ++++++++++-------------------------
>> mm/memory.c             | 36 ++++++++++--------------------------
>> mm/migrate.c            |  5 +----
>> mm/swapfile.c           |  6 +-----
>> mm/userfaultfd.c        |  5 +----
>> 9 files changed, 31 insertions(+), 82 deletions(-)
> []
>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>> 
>> @@ -1198,10 +1193,11 @@ static void collapse_huge_page(struct mm_struct *mm,
>> out_up_write:
>> 	up_write(&mm->mmap_sem);
>> out_nolock:
>> +	if (*hpage)
>> +		mem_cgroup_uncharge(*hpage);
>> 	trace_mm_collapse_huge_page(mm, isolated, result);
>> 	return;
>> out:
>> -	mem_cgroup_cancel_charge(new_page, memcg);
>> 	goto out_up_write;
>> }
> []
> 
> Some memory pressure will crash this new code. It looks like somewhat racy.

Reverted the whole series fixed the crash, i.e.,

git revert --no-edit 6070efb8e52b..c986ddf58a95

There is a minor conflict during reverting due to another linux-next commit,

2a6b525f0de1 (“khugepaged: do not stop collapse if less than half PTEs are referenced”)

which is trivial to resolve,

--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@@ -1091,8 -1000,8 +1093,9 @@@ static void collapse_huge_page(struct m
         * If it fails, we release mmap_sem and jump out_nolock.
         * Continuing to collapse causes inconsistency.
         */
 -      if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
 +      if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
 +                                                   pmd, referenced)) {
+               mem_cgroup_cancel_charge(new_page, memcg, true);
                up_read(&mm->mmap_sem);
                goto out_nolock;
        }


> 
> if (!page->mem_cgroup)
> 
> where page == NULL in mem_cgroup_uncharge().
> 
> [ 2244.414421][  T726] BUG: Kernel NULL pointer dereference on read at 0x0000002c
> [ 2244.414454][  T726] Faulting instruction address: 0xc0000000004f7e44
> [ 2244.414467][  T726] Oops: Kernel access of bad area, sig: 11 [#1]
> [ 2244.414488][  T726] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=256 DEBUG_PAGEALLOC NUMA PowerNV
> [ 2244.414501][  T726] Modules linked in: brd ext4 crc16 mbcache jbd2 loop kvm_hv kvm ip_tables x_tables xfs sd_mod bnx2x ahci tg3 libahci libphy mdio libata firmware_class dm_mirror dm_region_hash dm_log dm_mod
> [ 2244.414556][  T726] CPU: 11 PID: 726 Comm: khugepaged Not tainted 5.7.0-rc5-next-20200512+ #8
> [ 2244.414579][  T726] NIP:  c0000000004f7e44 LR: c0000000004df95c CTR: c0000000001c1400
> [ 2244.414600][  T726] REGS: c000001a2398f6e0 TRAP: 0300   Not tainted  (5.7.0-rc5-next-20200512+)
> [ 2244.414630][  T726] MSR:  9000000000009033 <SF,HV,EE,ME,IR,DR,RI,LE>  CR: 24000244  XER: 20040000
> [ 2244.414656][  T726] CFAR: c0000000004df958 DAR: 000000000000002c DSISR: 40000000 IRQMASK: 0 
> [ 2244.414656][  T726] GPR00: c0000000004df95c c000001a2398f970 c00000000168a700 fffffffffffffff4 
> [ 2244.414656][  T726] GPR04: ffffffffffffffff c000000000bd0980 0000000000000005 0000000000000080 
> [ 2244.414656][  T726] GPR08: 0000001ffc030000 0000000000000001 0000000000000000 c00000000152bb58 
> [ 2244.414656][  T726] GPR12: 0000000024000222 c000001fffff5680 c0000001d818ce00 c0000001d818cd00 
> [ 2244.414656][  T726] GPR16: 0000000000000000 c000001a2398fce0 fe7fffffffffefff fffffffffffffe7f 
> [ 2244.414656][  T726] GPR20: c000201320aa53c8 000000000000001e 0000000000000017 c00020047636b868 
> [ 2244.414656][  T726] GPR24: 0000000000000000 0000000000000000 c000000001756080 c000001a2398fce0 
> [ 2244.414656][  T726] GPR28: c000001a2398fa20 00007ffeeda00000 c000200f28547928 c000200f28547880 
> [ 2244.414865][  T726] NIP [c0000000004f7e44] mem_cgroup_uncharge+0x34/0xb0
> mem_cgroup_uncharge at mm/memcontrol.c:6563
> [ 2244.414895][  T726] LR [c0000000004df95c] collapse_huge_page+0x24c/0x1000
> collapse_huge_page at mm/khugepaged.c:1197
> [ 2244.414924][  T726] Call Trace:
> [ 2244.414940][  T726] [c000001a2398f970] [0000000000000001] 0x1 (unreliable)
> [ 2244.414970][  T726] [c000001a2398f9c0] [c0000000004df814] collapse_huge_page+0x104/0x1000
> collapse_huge_page at mm/khugepaged.c:1064 (discriminator 10)
> [ 2244.414991][  T726] [c000001a2398faf0] [c0000000004e0f84] khugepaged_scan_pmd+0x874/0xc70
> [ 2244.415021][  T726] [c000001a2398fbf0] [c0000000004e2a90] khugepaged+0x900/0x1920
> [ 2244.415043][  T726] [c000001a2398fdb0] [c000000000155aa4] kthread+0x1c4/0x1d0
> [ 2244.415075][  T726] [c000001a2398fe20] [c00000000000cb28] ret_from_kernel_thread+0x5c/0x74
> [ 2244.415095][  T726] Instruction dump:
> [ 2244.415113][  T726] 384228f0 7c0802a6 60000000 f821ffb1 e92d0c70 f9210048 39200000 3d22ffec 
> [ 2244.415146][  T726] 3929f9f4 81290000 2f890000 409d0048 <e9230038> 2fa90000 419e003c 7c0802a6 
> [ 2244.415181][  T726] ---[ end trace 3488eb8818913a26 ]---
Johannes Weiner May 12, 2020, 9:58 p.m. UTC | #3
On Tue, May 12, 2020 at 10:38:54AM -0400, Qian Cai wrote:
> > On May 8, 2020, at 2:30 PM, Johannes Weiner <hannes@cmpxchg.org> wrote:
> > 
> > With the page->mapping requirement gone from memcg, we can charge anon
> > and file-thp pages in one single step, right after they're allocated.
> > 
> > This removes two out of three API calls - especially the tricky commit
> > step that needed to happen at just the right time between when the
> > page is "set up" and when it's "published" - somewhat vague and fluid
> > concepts that varied by page type. All we need is a freshly allocated
> > page and a memcg context to charge.
> > 
> > v2: prevent double charges on pre-allocated hugepages in khugepaged
> > 
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > ---
> > include/linux/mm.h      |  4 +---
> > kernel/events/uprobes.c | 11 +++--------
> > mm/filemap.c            |  2 +-
> > mm/huge_memory.c        |  9 +++------
> > mm/khugepaged.c         | 35 ++++++++++-------------------------
> > mm/memory.c             | 36 ++++++++++--------------------------
> > mm/migrate.c            |  5 +----
> > mm/swapfile.c           |  6 +-----
> > mm/userfaultfd.c        |  5 +----
> > 9 files changed, 31 insertions(+), 82 deletions(-)
> []
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > 
> > @@ -1198,10 +1193,11 @@ static void collapse_huge_page(struct mm_struct *mm,
> > out_up_write:
> > 	up_write(&mm->mmap_sem);
> > out_nolock:
> > +	if (*hpage)
> > +		mem_cgroup_uncharge(*hpage);
> > 	trace_mm_collapse_huge_page(mm, isolated, result);
> > 	return;
> > out:
> > -	mem_cgroup_cancel_charge(new_page, memcg);
> > 	goto out_up_write;
> > }
> []
> 
> Some memory pressure will crash this new code. It looks like somewhat racy.
> 
> if (!page->mem_cgroup)
> 
> where page == NULL in mem_cgroup_uncharge().

Thanks for the report, sorry about the inconvenience.

Hm, the page is exclusive at this point, nobody else should be
touching it. After all, khugepaged might reuse the preallocated page
for another pmd if this one fails to collapse.

Looking at the code, I think it's page itself that's garbage, not
page->mem_cgroup changing. If you have CONFIG_NUMA and the allocation
fails, *hpage could contain an ERR_PTR instead of being NULL.

I think we need the following fixlet:

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index f2e0a5e5cfbb..f6161e17da26 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1193,7 +1193,7 @@ static void collapse_huge_page(struct mm_struct *mm,
 out_up_write:
 	up_write(&mm->mmap_sem);
 out_nolock:
-	if (*hpage)
+	if (!IS_ERR_OR_NULL(*hpage))
 		mem_cgroup_uncharge(*hpage);
 	trace_mm_collapse_huge_page(mm, isolated, result);
 	return;
@@ -1928,7 +1928,7 @@ static void collapse_file(struct mm_struct *mm,
 	unlock_page(new_page);
 out:
 	VM_BUG_ON(!list_empty(&pagelist));
-	if (*hpage)
+	if (!IS_ERR_OR_NULL(*hpage))
 		mem_cgroup_uncharge(*hpage);
 	/* TODO: tracepoints */
 }
Qian Cai May 12, 2020, 11:58 p.m. UTC | #4
> On May 12, 2020, at 5:58 PM, Johannes Weiner <hannes@cmpxchg.org> wrote:
> 
> On Tue, May 12, 2020 at 10:38:54AM -0400, Qian Cai wrote:
>>> On May 8, 2020, at 2:30 PM, Johannes Weiner <hannes@cmpxchg.org> wrote:
>>> 
>>> With the page->mapping requirement gone from memcg, we can charge anon
>>> and file-thp pages in one single step, right after they're allocated.
>>> 
>>> This removes two out of three API calls - especially the tricky commit
>>> step that needed to happen at just the right time between when the
>>> page is "set up" and when it's "published" - somewhat vague and fluid
>>> concepts that varied by page type. All we need is a freshly allocated
>>> page and a memcg context to charge.
>>> 
>>> v2: prevent double charges on pre-allocated hugepages in khugepaged
>>> 
>>> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
>>> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
>>> ---
>>> include/linux/mm.h      |  4 +---
>>> kernel/events/uprobes.c | 11 +++--------
>>> mm/filemap.c            |  2 +-
>>> mm/huge_memory.c        |  9 +++------
>>> mm/khugepaged.c         | 35 ++++++++++-------------------------
>>> mm/memory.c             | 36 ++++++++++--------------------------
>>> mm/migrate.c            |  5 +----
>>> mm/swapfile.c           |  6 +-----
>>> mm/userfaultfd.c        |  5 +----
>>> 9 files changed, 31 insertions(+), 82 deletions(-)
>> []
>>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>>> 
>>> @@ -1198,10 +1193,11 @@ static void collapse_huge_page(struct mm_struct *mm,
>>> out_up_write:
>>> 	up_write(&mm->mmap_sem);
>>> out_nolock:
>>> +	if (*hpage)
>>> +		mem_cgroup_uncharge(*hpage);
>>> 	trace_mm_collapse_huge_page(mm, isolated, result);
>>> 	return;
>>> out:
>>> -	mem_cgroup_cancel_charge(new_page, memcg);
>>> 	goto out_up_write;
>>> }
>> []
>> 
>> Some memory pressure will crash this new code. It looks like somewhat racy.
>> 
>> if (!page->mem_cgroup)
>> 
>> where page == NULL in mem_cgroup_uncharge().
> 
> Thanks for the report, sorry about the inconvenience.
> 
> Hm, the page is exclusive at this point, nobody else should be
> touching it. After all, khugepaged might reuse the preallocated page
> for another pmd if this one fails to collapse.
> 
> Looking at the code, I think it's page itself that's garbage, not
> page->mem_cgroup changing. If you have CONFIG_NUMA and the allocation
> fails, *hpage could contain an ERR_PTR instead of being NULL.
> 
> I think we need the following fixlet:

Yes, I have NUMA here.

Stephen, can you pick this up for first before Andrew has a chance to push out the next mmotm hopefully contain this fix?

https://lore.kernel.org/lkml/20200512215813.GA487759@cmpxchg.org/

> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index f2e0a5e5cfbb..f6161e17da26 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1193,7 +1193,7 @@ static void collapse_huge_page(struct mm_struct *mm,
> out_up_write:
> 	up_write(&mm->mmap_sem);
> out_nolock:
> -	if (*hpage)
> +	if (!IS_ERR_OR_NULL(*hpage))
> 		mem_cgroup_uncharge(*hpage);
> 	trace_mm_collapse_huge_page(mm, isolated, result);
> 	return;
> @@ -1928,7 +1928,7 @@ static void collapse_file(struct mm_struct *mm,
> 	unlock_page(new_page);
> out:
> 	VM_BUG_ON(!list_empty(&pagelist));
> -	if (*hpage)
> +	if (!IS_ERR_OR_NULL(*hpage))
> 		mem_cgroup_uncharge(*hpage);
> 	/* TODO: tracepoints */
> }
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index bb8d3716bfe4..87a2c2b66d05 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -501,7 +501,6 @@  struct vm_fault {
 	pte_t orig_pte;			/* Value of PTE at the time of fault */
 
 	struct page *cow_page;		/* Page handler may use for COW fault */
-	struct mem_cgroup *memcg;	/* Cgroup cow_page belongs to */
 	struct page *page;		/* ->fault handlers should return a
 					 * page here, unless VM_FAULT_NOPAGE
 					 * is set (which is also implied by
@@ -935,8 +934,7 @@  static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 	return pte;
 }
 
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
-		struct page *page);
+vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
 vm_fault_t finish_fault(struct vm_fault *vmf);
 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
 #endif
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 89ef81b65bcb..4253c153e985 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -162,14 +162,13 @@  static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 	};
 	int err;
 	struct mmu_notifier_range range;
-	struct mem_cgroup *memcg;
 
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
 				addr + PAGE_SIZE);
 
 	if (new_page) {
-		err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
-					    &memcg);
+		err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL,
+					false);
 		if (err)
 			return err;
 	}
@@ -179,16 +178,12 @@  static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
 	mmu_notifier_invalidate_range_start(&range);
 	err = -EAGAIN;
-	if (!page_vma_mapped_walk(&pvmw)) {
-		if (new_page)
-			mem_cgroup_cancel_charge(new_page, memcg);
+	if (!page_vma_mapped_walk(&pvmw))
 		goto unlock;
-	}
 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 
 	if (new_page) {
 		get_page(new_page);
-		mem_cgroup_commit_charge(new_page, memcg, false);
 		page_add_new_anon_rmap(new_page, vma, addr, false);
 		lru_cache_add_active_or_unevictable(new_page, vma);
 	} else
diff --git a/mm/filemap.c b/mm/filemap.c
index d5b6e3d7d402..fa47f160e1cc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2638,7 +2638,7 @@  void filemap_map_pages(struct vm_fault *vmf,
 		if (vmf->pte)
 			vmf->pte += xas.xa_index - last_pgoff;
 		last_pgoff = xas.xa_index;
-		if (alloc_set_pte(vmf, NULL, page))
+		if (alloc_set_pte(vmf, page))
 			goto unlock;
 		unlock_page(page);
 		goto next;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 74f8b4013203..d0f1e8cee93c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -587,19 +587,19 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 			struct page *page, gfp_t gfp)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct mem_cgroup *memcg;
 	pgtable_t pgtable;
 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	vm_fault_t ret = 0;
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg)) {
+	if (mem_cgroup_charge(page, vma->vm_mm, gfp, false)) {
 		put_page(page);
 		count_vm_event(THP_FAULT_FALLBACK);
 		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
 		return VM_FAULT_FALLBACK;
 	}
+	cgroup_throttle_swaprate(page, gfp);
 
 	pgtable = pte_alloc_one(vma->vm_mm);
 	if (unlikely(!pgtable)) {
@@ -630,7 +630,6 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 			vm_fault_t ret2;
 
 			spin_unlock(vmf->ptl);
-			mem_cgroup_cancel_charge(page, memcg);
 			put_page(page);
 			pte_free(vma->vm_mm, pgtable);
 			ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
@@ -640,7 +639,6 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 
 		entry = mk_huge_pmd(page, vma->vm_page_prot);
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-		mem_cgroup_commit_charge(page, memcg, false);
 		page_add_new_anon_rmap(page, vma, haddr, true);
 		lru_cache_add_active_or_unevictable(page, vma);
 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
@@ -649,7 +647,7 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 		mm_inc_nr_ptes(vma->vm_mm);
 		spin_unlock(vmf->ptl);
 		count_vm_event(THP_FAULT_ALLOC);
-		count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
+		count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
 	}
 
 	return 0;
@@ -658,7 +656,6 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 release:
 	if (pgtable)
 		pte_free(vma->vm_mm, pgtable);
-	mem_cgroup_cancel_charge(page, memcg);
 	put_page(page);
 	return ret;
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index be67ebe8a120..34731e7c9a67 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1044,7 +1044,6 @@  static void collapse_huge_page(struct mm_struct *mm,
 	struct page *new_page;
 	spinlock_t *pmd_ptl, *pte_ptl;
 	int isolated = 0, result = 0;
-	struct mem_cgroup *memcg;
 	struct vm_area_struct *vma;
 	struct mmu_notifier_range range;
 	gfp_t gfp;
@@ -1067,15 +1066,15 @@  static void collapse_huge_page(struct mm_struct *mm,
 		goto out_nolock;
 	}
 
-	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
+	if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
 		result = SCAN_CGROUP_CHARGE_FAIL;
 		goto out_nolock;
 	}
+	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
 
 	down_read(&mm->mmap_sem);
 	result = hugepage_vma_revalidate(mm, address, &vma);
 	if (result) {
-		mem_cgroup_cancel_charge(new_page, memcg);
 		up_read(&mm->mmap_sem);
 		goto out_nolock;
 	}
@@ -1083,7 +1082,6 @@  static void collapse_huge_page(struct mm_struct *mm,
 	pmd = mm_find_pmd(mm, address);
 	if (!pmd) {
 		result = SCAN_PMD_NULL;
-		mem_cgroup_cancel_charge(new_page, memcg);
 		up_read(&mm->mmap_sem);
 		goto out_nolock;
 	}
@@ -1095,7 +1093,6 @@  static void collapse_huge_page(struct mm_struct *mm,
 	 */
 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
 				pmd, referenced)) {
-		mem_cgroup_cancel_charge(new_page, memcg);
 		up_read(&mm->mmap_sem);
 		goto out_nolock;
 	}
@@ -1182,9 +1179,7 @@  static void collapse_huge_page(struct mm_struct *mm,
 
 	spin_lock(pmd_ptl);
 	BUG_ON(!pmd_none(*pmd));
-	mem_cgroup_commit_charge(new_page, memcg, false);
 	page_add_new_anon_rmap(new_page, vma, address, true);
-	count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
 	lru_cache_add_active_or_unevictable(new_page, vma);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, address, pmd, _pmd);
@@ -1198,10 +1193,11 @@  static void collapse_huge_page(struct mm_struct *mm,
 out_up_write:
 	up_write(&mm->mmap_sem);
 out_nolock:
+	if (*hpage)
+		mem_cgroup_uncharge(*hpage);
 	trace_mm_collapse_huge_page(mm, isolated, result);
 	return;
 out:
-	mem_cgroup_cancel_charge(new_page, memcg);
 	goto out_up_write;
 }
 
@@ -1609,7 +1605,6 @@  static void collapse_file(struct mm_struct *mm,
 	struct address_space *mapping = file->f_mapping;
 	gfp_t gfp;
 	struct page *new_page;
-	struct mem_cgroup *memcg;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1628,10 +1623,11 @@  static void collapse_file(struct mm_struct *mm,
 		goto out;
 	}
 
-	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
+	if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
 		result = SCAN_CGROUP_CHARGE_FAIL;
 		goto out;
 	}
+	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
 
 	/* This will be less messy when we use multi-index entries */
 	do {
@@ -1641,7 +1637,6 @@  static void collapse_file(struct mm_struct *mm,
 			break;
 		xas_unlock_irq(&xas);
 		if (!xas_nomem(&xas, GFP_KERNEL)) {
-			mem_cgroup_cancel_charge(new_page, memcg);
 			result = SCAN_FAIL;
 			goto out;
 		}
@@ -1834,18 +1829,9 @@  static void collapse_file(struct mm_struct *mm,
 	}
 
 	if (nr_none) {
-		struct lruvec *lruvec;
-		/*
-		 * XXX: We have started try_charge and pinned the
-		 * memcg, but the page isn't committed yet so we
-		 * cannot use mod_lruvec_page_state(). This hackery
-		 * will be cleaned up when remove the page->mapping
-		 * dependency from memcg and fully charge above.
-		 */
-		lruvec = mem_cgroup_lruvec(memcg, page_pgdat(new_page));
-		__mod_lruvec_state(lruvec, NR_FILE_PAGES, nr_none);
+		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
 		if (is_shmem)
-			__mod_lruvec_state(lruvec, NR_SHMEM, nr_none);
+			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
 	}
 
 xa_locked:
@@ -1883,7 +1869,6 @@  static void collapse_file(struct mm_struct *mm,
 
 		SetPageUptodate(new_page);
 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
-		mem_cgroup_commit_charge(new_page, memcg, false);
 
 		if (is_shmem) {
 			set_page_dirty(new_page);
@@ -1891,7 +1876,6 @@  static void collapse_file(struct mm_struct *mm,
 		} else {
 			lru_cache_add_file(new_page);
 		}
-		count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
 
 		/*
 		 * Remove pte page tables, so we can re-fault the page as huge.
@@ -1938,13 +1922,14 @@  static void collapse_file(struct mm_struct *mm,
 		VM_BUG_ON(nr_none);
 		xas_unlock_irq(&xas);
 
-		mem_cgroup_cancel_charge(new_page, memcg);
 		new_page->mapping = NULL;
 	}
 
 	unlock_page(new_page);
 out:
 	VM_BUG_ON(!list_empty(&pagelist));
+	if (*hpage)
+		mem_cgroup_uncharge(*hpage);
 	/* TODO: tracepoints */
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index 46c3e5dc918d..832ee914cbcf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2645,7 +2645,6 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 	struct page *new_page = NULL;
 	pte_t entry;
 	int page_copied = 0;
-	struct mem_cgroup *memcg;
 	struct mmu_notifier_range range;
 
 	if (unlikely(anon_vma_prepare(vma)))
@@ -2676,8 +2675,9 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		}
 	}
 
-	if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg))
+	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL, false))
 		goto oom_free_new;
+	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
 
 	__SetPageUptodate(new_page);
 
@@ -2710,7 +2710,6 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		 * thread doing COW.
 		 */
 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
-		mem_cgroup_commit_charge(new_page, memcg, false);
 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
 		lru_cache_add_active_or_unevictable(new_page, vma);
 		/*
@@ -2749,8 +2748,6 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		/* Free the old page.. */
 		new_page = old_page;
 		page_copied = 1;
-	} else {
-		mem_cgroup_cancel_charge(new_page, memcg);
 	}
 
 	if (new_page)
@@ -3088,7 +3085,6 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct page *page = NULL, *swapcache;
-	struct mem_cgroup *memcg;
 	swp_entry_t entry;
 	pte_t pte;
 	int locked;
@@ -3193,10 +3189,11 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 		goto out_page;
 	}
 
-	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg)) {
+	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, true)) {
 		ret = VM_FAULT_OOM;
 		goto out_page;
 	}
+	cgroup_throttle_swaprate(page, GFP_KERNEL);
 
 	/*
 	 * Back out if somebody else already faulted in this pte.
@@ -3243,11 +3240,9 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 	/* ksm created a completely new copy */
 	if (unlikely(page != swapcache && swapcache)) {
-		mem_cgroup_commit_charge(page, memcg, false);
 		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		lru_cache_add_active_or_unevictable(page, vma);
 	} else {
-		mem_cgroup_commit_charge(page, memcg, true);
 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
 		activate_page(page);
 	}
@@ -3284,7 +3279,6 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 out:
 	return ret;
 out_nomap:
-	mem_cgroup_cancel_charge(page, memcg);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 out_page:
 	unlock_page(page);
@@ -3305,7 +3299,6 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct mem_cgroup *memcg;
 	struct page *page;
 	vm_fault_t ret = 0;
 	pte_t entry;
@@ -3358,8 +3351,9 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	if (!page)
 		goto oom;
 
-	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg))
+	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false))
 		goto oom_free_page;
+	cgroup_throttle_swaprate(page, GFP_KERNEL);
 
 	/*
 	 * The memory barrier inside __SetPageUptodate makes sure that
@@ -3384,13 +3378,11 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	/* Deliver the page fault to userland, check inside PT lock */
 	if (userfaultfd_missing(vma)) {
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
-		mem_cgroup_cancel_charge(page, memcg);
 		put_page(page);
 		return handle_userfault(vmf, VM_UFFD_MISSING);
 	}
 
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-	mem_cgroup_commit_charge(page, memcg, false);
 	page_add_new_anon_rmap(page, vma, vmf->address, false);
 	lru_cache_add_active_or_unevictable(page, vma);
 setpte:
@@ -3402,7 +3394,6 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return ret;
 release:
-	mem_cgroup_cancel_charge(page, memcg);
 	put_page(page);
 	goto unlock;
 oom_free_page:
@@ -3607,7 +3598,6 @@  static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
  *
  * @vmf: fault environment
- * @memcg: memcg to charge page (only for private mappings)
  * @page: page to map
  *
  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
@@ -3618,8 +3608,7 @@  static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
  *
  * Return: %0 on success, %VM_FAULT_ code in case of error.
  */
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
-		struct page *page)
+vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -3627,9 +3616,6 @@  vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 	vm_fault_t ret;
 
 	if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
-		/* THP on COW? */
-		VM_BUG_ON_PAGE(memcg, page);
-
 		ret = do_set_pmd(vmf, page);
 		if (ret != VM_FAULT_FALLBACK)
 			return ret;
@@ -3652,7 +3638,6 @@  vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 	/* copy-on-write page */
 	if (write && !(vma->vm_flags & VM_SHARED)) {
 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-		mem_cgroup_commit_charge(page, memcg, false);
 		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		lru_cache_add_active_or_unevictable(page, vma);
 	} else {
@@ -3702,7 +3687,7 @@  vm_fault_t finish_fault(struct vm_fault *vmf)
 	if (!(vmf->vma->vm_flags & VM_SHARED))
 		ret = check_stable_address_space(vmf->vma->vm_mm);
 	if (!ret)
-		ret = alloc_set_pte(vmf, vmf->memcg, page);
+		ret = alloc_set_pte(vmf, page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return ret;
@@ -3862,11 +3847,11 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 	if (!vmf->cow_page)
 		return VM_FAULT_OOM;
 
-	if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm,
-					GFP_KERNEL, &vmf->memcg)) {
+	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, false)) {
 		put_page(vmf->cow_page);
 		return VM_FAULT_OOM;
 	}
+	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
 
 	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -3884,7 +3869,6 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 		goto uncharge_out;
 	return ret;
 uncharge_out:
-	mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg);
 	put_page(vmf->cow_page);
 	return ret;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index e84fb5b87a85..2028f08e3e8d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2746,7 +2746,6 @@  static void migrate_vma_insert_page(struct migrate_vma *migrate,
 {
 	struct vm_area_struct *vma = migrate->vma;
 	struct mm_struct *mm = vma->vm_mm;
-	struct mem_cgroup *memcg;
 	bool flush = false;
 	spinlock_t *ptl;
 	pte_t entry;
@@ -2793,7 +2792,7 @@  static void migrate_vma_insert_page(struct migrate_vma *migrate,
 
 	if (unlikely(anon_vma_prepare(vma)))
 		goto abort;
-	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg))
+	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false))
 		goto abort;
 
 	/*
@@ -2838,7 +2837,6 @@  static void migrate_vma_insert_page(struct migrate_vma *migrate,
 		goto unlock_abort;
 
 	inc_mm_counter(mm, MM_ANONPAGES);
-	mem_cgroup_commit_charge(page, memcg, false);
 	page_add_new_anon_rmap(page, vma, addr, false);
 	if (!is_zone_device_page(page))
 		lru_cache_add_active_or_unevictable(page, vma);
@@ -2861,7 +2859,6 @@  static void migrate_vma_insert_page(struct migrate_vma *migrate,
 
 unlock_abort:
 	pte_unmap_unlock(ptep, ptl);
-	mem_cgroup_cancel_charge(page, memcg);
 abort:
 	*src &= ~MIGRATE_PFN_MIGRATE;
 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 45b937b924f5..8c9b6767013b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1858,7 +1858,6 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, swp_entry_t entry, struct page *page)
 {
 	struct page *swapcache;
-	struct mem_cgroup *memcg;
 	spinlock_t *ptl;
 	pte_t *pte;
 	int ret = 1;
@@ -1868,14 +1867,13 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 	if (unlikely(!page))
 		return -ENOMEM;
 
-	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) {
+	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, true)) {
 		ret = -ENOMEM;
 		goto out_nolock;
 	}
 
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
-		mem_cgroup_cancel_charge(page, memcg);
 		ret = 0;
 		goto out;
 	}
@@ -1886,10 +1884,8 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 	set_pte_at(vma->vm_mm, addr, pte,
 		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
 	if (page == swapcache) {
-		mem_cgroup_commit_charge(page, memcg, true);
 		page_add_anon_rmap(page, vma, addr, false);
 	} else { /* ksm created a completely new copy */
-		mem_cgroup_commit_charge(page, memcg, false);
 		page_add_new_anon_rmap(page, vma, addr, false);
 		lru_cache_add_active_or_unevictable(page, vma);
 	}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 3dea268d2850..2745489415cc 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -56,7 +56,6 @@  static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 			    struct page **pagep,
 			    bool wp_copy)
 {
-	struct mem_cgroup *memcg;
 	pte_t _dst_pte, *dst_pte;
 	spinlock_t *ptl;
 	void *page_kaddr;
@@ -97,7 +96,7 @@  static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 	__SetPageUptodate(page);
 
 	ret = -ENOMEM;
-	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg))
+	if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL, false))
 		goto out_release;
 
 	_dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
@@ -123,7 +122,6 @@  static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 		goto out_release_uncharge_unlock;
 
 	inc_mm_counter(dst_mm, MM_ANONPAGES);
-	mem_cgroup_commit_charge(page, memcg, false);
 	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 	lru_cache_add_active_or_unevictable(page, dst_vma);
 
@@ -138,7 +136,6 @@  static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 	return ret;
 out_release_uncharge_unlock:
 	pte_unmap_unlock(dst_pte, ptl);
-	mem_cgroup_cancel_charge(page, memcg);
 out_release:
 	put_page(page);
 	goto out;