@@ -72,15 +72,22 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
- /*
- * There could be some standard sized pages in there,
- * get them all.
- */
- flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}
+#define update_mmu_cache_huge update_mmu_cache_huge
+static inline void update_mmu_cache_huge(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * There could be some standard sized page in there,
+ * parameter address must be page faulting address rather than
+ * start address of huge page
+ */
+ local_flush_tlb_page(vma, address);
+ update_mmu_cache(vma, address & huge_page_mask(hstate_vma(vma)), ptep);
+}
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
@@ -746,6 +746,15 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
+#ifndef update_mmu_cache_huge
+#define update_mmu_cache_huge update_mmu_cache_huge
+static inline void update_mmu_cache_huge(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ update_mmu_cache(vma, address & huge_page_mask(hstate_vma(vma)), ptep);
+}
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -3757,10 +3757,12 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t entry;
+ struct hstate *h = hstate_vma(vma);
+ unsigned long haddr = address & huge_page_mask(h);
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
- if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
- update_mmu_cache(vma, address, ptep);
+ if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 1))
+ update_mmu_cache_huge(vma, address, ptep);
}
bool is_hugetlb_entry_migration(pte_t pte)
@@ -4128,7 +4130,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
page_move_anon_rmap(old_page, vma);
- set_huge_ptep_writable(vma, haddr, ptep);
+ set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -4630,7 +4632,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
flags & FAULT_FLAG_WRITE))
- update_mmu_cache(vma, haddr, ptep);
+ update_mmu_cache_huge(vma, address, ptep);
out_put_page:
if (page != pagecache_page)
unlock_page(page);
@@ -4770,7 +4772,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
- update_mmu_cache(dst_vma, dst_addr, dst_pte);
+ update_mmu_cache_huge(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
set_page_huge_active(page);