@@ -807,6 +807,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern int vmtruncate(struct inode * inode, loff_t offset);
extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
+extern void remap_file_mappings(struct file *file, struct vm_operations_struct *vm_ops);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -2325,6 +2325,93 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
+static void remap_vma(struct vm_area_struct *vma,
+ struct vm_operations_struct *vm_ops)
+{
+ struct file *file = vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ unsigned long start_addr, end_addr, size;
+ struct mm_struct *mm;
+
+ start_addr = vma->vm_start;
+ end_addr = vma->vm_end;
+
+ /* Has this vma already been processed? */
+ if (vma->vm_ops == vm_ops)
+ return;
+
+ /* Switch out the locks so I can maninuplate this under the mm sem.
+ * Needed so I can call vm_ops->close.
+ */
+ mm = vma->vm_mm;
+ atomic_inc(&mm->mm_users);
+ spin_unlock(&mapping->i_mmap_lock);
+
+ /* Block page faults and other code modifying the mm. */
+ down_write(&mm->mmap_sem);
+
+ /* Lookup a vma for my file address */
+ vma = find_vma(mm, start_addr);
+ if (vma->vm_file != file)
+ goto out;
+ if (vma->vm_ops == vm_ops)
+ goto out;
+
+ start_addr = vma->vm_start;
+ end_addr = vma->vm_end;
+ size = end_addr - start_addr;
+
+ /* Unmap the vma */
+ zap_page_range(vma, start_addr, size, NULL);
+
+ /* Close the vma */
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ /* Repurpose the vma */
+ vma->vm_private_data = NULL;
+ vma->vm_ops = vm_ops;
+ if (vm_ops->open)
+ vm_ops->open(vma);
+out:
+ up_write(&mm->mmap_sem);
+ spin_lock(&mapping->i_mmap_lock);
+}
+
+void remap_file_mappings(struct file *file, struct vm_operations_struct *vm_ops)
+{
+ /* After file->f_ops has been changed update the vmas */
+ struct address_space *mapping = file->f_mapping;
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+
+ spin_lock(&mapping->i_mmap_lock);
+
+restart_tree:
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) {
+ /* Skip quickly over vmas that do not need to be touched */
+ if (vma->vm_file != file)
+ continue;
+ if (vma->vm_ops == vm_ops)
+ continue;
+ remap_vma(vma, vm_ops);
+ goto restart_tree;
+ }
+
+restart_list:
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) {
+ /* Skip quickly over vmas that do not need to be touched */
+ if (vma->vm_file != file)
+ continue;
+ if (vma->vm_ops == vm_ops)
+ continue;
+ remap_vma(vma, vm_ops);
+ goto restart_list;
+ }
+
+ spin_unlock(&mapping->i_mmap_lock);
+}
+
/**
* vmtruncate - unmap mappings "freed" by truncate() syscall
* @inode: inode of the file used