@@ -261,8 +261,8 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
int page_size);
#ifdef CONFIG_SMP
-#define tlb_delay_rmap tlb_delay_rmap
-static inline bool tlb_delay_rmap(struct mmu_gather *tlb);
+#define tlb_delay_page_rmap tlb_delay_page_rmap
+static inline bool tlb_delay_page_rmap(struct mmu_gather *tlb, struct page *page);
static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb);
static inline bool tlb_rmap_delayed(struct mmu_gather *tlb);
extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
@@ -300,7 +300,7 @@ struct mmu_gather {
*/
unsigned int freed_tables : 1;
-#ifdef tlb_delay_rmap
+#ifdef tlb_delay_page_rmap
/*
* Do we have pending delayed rmap removals?
*/
@@ -335,9 +335,9 @@ struct mmu_gather {
#endif
};
-#ifdef tlb_delay_rmap
+#ifdef tlb_delay_page_rmap
-static inline bool tlb_delay_rmap(struct mmu_gather *tlb)
+static inline bool tlb_delay_page_rmap(struct mmu_gather *tlb, struct page *page)
{
tlb->delayed_rmap = 1;
@@ -363,8 +363,8 @@ static inline bool tlb_rmap_delayed(struct mmu_gather *tlb)
* remote TLBs to flush and is not preemptible due to this
* all happening under the page table lock.
*/
-#define tlb_delay_rmap tlb_delay_rmap
-static inline bool tlb_delay_rmap(struct mmu_gather *tlb)
+#define tlb_delay_page_rmap tlb_delay_page_rmap
+static inline bool tlb_delay_page_rmap(struct mmu_gather *tlb, struct page *page)
{
return false;
}
@@ -1374,7 +1374,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
break;
if (pte_present(ptent)) {
- unsigned int delay_rmap;
+ unsigned int delay_page_rmap;
page = vm_normal_page(vma, addr, ptent);
if (unlikely(!should_zap_page(details, page)))
@@ -1387,12 +1387,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (unlikely(!page))
continue;
- delay_rmap = 0;
+ delay_page_rmap = 0;
if (!PageAnon(page)) {
if (pte_dirty(ptent)) {
set_page_dirty(page);
- if (tlb_delay_rmap(tlb)) {
- delay_rmap = 1;
+ if (tlb_delay_page_rmap(tlb, page)) {
+ delay_page_rmap = 1;
force_flush = 1;
}
}
@@ -1401,12 +1401,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
mark_page_accessed(page);
}
rss[mm_counter(page)]--;
- if (!delay_rmap) {
+ if (!delay_page_rmap) {
page_remove_rmap(page, vma, false);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
}
- if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
+ if (unlikely(__tlb_remove_page(tlb, page, delay_page_rmap))) {
force_flush = 1;
addr += PAGE_SIZE;
break;
tlb_delay_rmap() function indicates to the TLB gather infrastructure that a particular page should be removed from rmap until after the TLB flush. Yet, the function name and prototype indicate the whole TLB gather state. Rename tlb_delay_rmap() to tlb_delay_page_rmap() along with delay_rmap local variable and avoid the described ambiguity. Although unlikely ever used, add 'struc page' argument to the renamed function to emphasize the notion of the page being delayed. Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> --- include/asm-generic/tlb.h | 14 +++++++------- mm/memory.c | 12 ++++++------ 2 files changed, 13 insertions(+), 13 deletions(-)