@@ -276,10 +278,10 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
- struct vm_area_struct *mpnt;
+ struct vm_area_struct *mpnt, *mpnt_wrt, *mpnt_nwrt;
struct prio_tree_iter iter;
unsigned long offset;
- unsigned long addr, old_addr = 0;
+ unsigned long addr, addr_wrt, addr_nwrt;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
@@ -292,26 +294,69 @@ void flush_dcache_page(struct page *page)
if (!mapping)
return;
+ addr_wrt = 0;
+ mpnt_wrt = NULL;
+ addr_nwrt = 0;
+ mpnt_nwrt = NULL;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
/* We have carefully arranged in arch_get_unmapped_area() that
- * *any* mappings of a file are always congruently mapped (whether
- * declared as MAP_PRIVATE or MAP_SHARED), so we only need
- * to flush one address here for them all to become coherent */
+ * all shared mappings of a file are congruently mapped, so we
+ * only need to flush one address here for them all to become
+ * coherent. However, non-shared fixed mappings of executables
+ * are not congruently mapped on the boundary page between text
+ * and data. Further, the data segment sometimes occurs before
+ * the text segment. While it is unlikely that a dirty cache
+ * line would result from accesses through the text mapping,
+ * it is possible that this could occur since binutils doesn't
+ * ensure that the data segment starts on a page boundary. */
flush_dcache_mmap_lock(mapping);
+
+ /* Scan for inequivalent aliases. */
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
- if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
- __flush_cache_page(mpnt, addr, page_to_phys(page));
- if (old_addr)
- printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
- old_addr = addr;
+ if (mpnt->vm_flags & VM_WRITE) {
+ if (addr_wrt == 0) {
+ addr_wrt = addr;
+ mpnt_wrt = mpnt;
+ }
+ else if ((addr_wrt & (SHMLBA - 1)) != (addr & (SHMLBA - 1)))
+ goto flush_inequivalent;
}
+ else {
+ if (addr_nwrt == 0) {
+ addr_nwrt = addr;
+ mpnt_nwrt = mpnt;
+ }
+ else if ((addr_nwrt & (SHMLBA - 1)) != (addr & (SHMLBA - 1)))
+ goto flush_inequivalent;
+ }
+ }
+ flush_dcache_mmap_unlock(mapping);
+
+ /* Common case where all writeable aliases are equivalent and
+ * all non writeable aliases are equivalent. */
+ if (addr_wrt)
+ __flush_cache_page(mpnt_wrt, addr_wrt, page_to_phys(page));
+ if (addr_nwrt &&
+ (!addr_wrt || (addr_nwrt & (SHMLBA - 1)) != (addr_wrt & (SHMLBA - 1))))
+ __flush_cache_page(mpnt_nwrt, addr_nwrt, page_to_phys(page));
+ return;
+
+flush_inequivalent:
+ vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+ addr = mpnt->vm_start + offset;
+
+ printk(KERN_ERR "INEQUIVALENT ALIASES: page 0x%lx addr 0x%lx in file %s with flags 0x%lx\n", (unsigned long) page, addr, mpnt->vm_file ? (char *) mpnt->vm_file->f_path.dentry->d_name.name : "(null)", mpnt->vm_flags);
+
+ __flush_cache_page(mpnt, addr, page_to_phys(page));
}
flush_dcache_mmap_unlock(mapping);
+ return;
}
EXPORT_SYMBOL(flush_dcache_page);