@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <asm/processor.h>
#include <asm/cache.h>
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -456,7 +457,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return old_pte;
}
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void ptep_set_wrprotect(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_SMP
unsigned long new, old;
@@ -467,6 +468,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
#else
pte_t old_pte = *ptep;
+ if (atomic_read(&mm->mm_users) > 1)
+ flush_cache_page(vma, addr, pte_pfn(old_pte));
set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
#endif
}
@@ -616,7 +616,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
- ptep_set_wrprotect(src_mm, addr, src_pte);
+ ptep_set_wrprotect(vma, src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}