@@ -285,10 +285,10 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset,
/*
* For a mapping to be possible, we need a range of uncompressed and
- * contiguous blocks. Return the offset for the first block if that
- * verifies, or zero otherwise.
+ * contiguous blocks. Return the offset for the first block and number of
+ * valid blocks for which that is true, or zero otherwise.
*/
-static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 pages)
+static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
{
struct super_block *sb = inode->i_sb;
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
@@ -306,11 +306,16 @@ static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 pages)
do {
u32 expect = blockaddr + i * (PAGE_SIZE >> 2);
expect |= CRAMFS_BLK_FLAG_DIRECT_PTR|CRAMFS_BLK_FLAG_UNCOMPRESSED;
- pr_debug("range: block %d/%d got %#x expects %#x\n",
- pgoff+i, pgoff+pages-1, blockptrs[i], expect);
- if (blockptrs[i] != expect)
- return 0;
- } while (++i < pages);
+ if (blockptrs[i] != expect) {
+ pr_debug("range: block %d/%d got %#x expects %#x\n",
+ pgoff+i, pgoff+*pages-1, blockptrs[i], expect);
+ if (i == 0)
+ return 0;
+ break;
+ }
+ } while (++i < *pages);
+
+ *pages = i;
/* stored "direct" block ptrs are shifted down by 2 bits */
return blockaddr << 2;
@@ -321,8 +326,8 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
- unsigned int pages, max_pages, offset;
- unsigned long length, address;
+ unsigned int pages, vma_pages, max_pages, offset;
+ unsigned long address;
char *fail_reason;
int ret;
@@ -332,17 +337,20 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
- vma->vm_ops = &generic_file_vm_ops;
+ fail_reason = "vma is writable";
if (vma->vm_flags & VM_WRITE)
- return 0;
+ goto fail;
- length = vma->vm_end - vma->vm_start;
- pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ vma_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE - 1) >> PAGE_SHIFT;
max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (vma->vm_pgoff >= max_pages || pages > max_pages - vma->vm_pgoff)
- return -EINVAL;
+ fail_reason = "beyond file limit";
+ if (vma->vm_pgoff >= max_pages)
+ goto fail;
+ pages = vma_pages;
+ if (pages > max_pages - vma->vm_pgoff)
+ pages = max_pages - vma->vm_pgoff;
- offset = cramfs_get_block_range(inode, vma->vm_pgoff, pages);
+ offset = cramfs_get_block_range(inode, vma->vm_pgoff, &pages);
fail_reason = "unsuitable block layout";
if (!offset)
goto fail;
@@ -351,37 +359,60 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
if (!PAGE_ALIGNED(address))
goto fail;
- /* Don't map a partial page if it contains some other data */
+ /* Don't map the last page if it contains some other data */
if (unlikely(vma->vm_pgoff + pages == max_pages)) {
unsigned int partial = offset_in_page(inode->i_size);
if (partial) {
char *data = sbi->linear_virt_addr + offset;
data += (pages - 1) * PAGE_SIZE + partial;
- fail_reason = "last partial page is shared";
while ((unsigned long)data & 7)
if (*data++ != 0)
- goto fail;
+ goto nonzero;
while (offset_in_page(data)) {
- if (*(u64 *)data != 0)
- goto fail;
+ if (*(u64 *)data != 0) {
+ nonzero:
+ pr_debug("mmap: %s: last page is shared\n",
+ file_dentry(file)->d_name.name);
+ pages--;
+ break;
+ }
data += 8;
}
}
}
-
- ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
- length, vma->vm_page_prot);
- if (ret)
- return ret;
- pr_debug("mapped %s at 0x%08lx, length %lu to vma 0x%08lx, "
+
+ if (pages) {
+ /*
+ * Split the vma if we can't map it all so normal paging
+ * will take care of the rest through cramfs_readpage().
+ */
+ if (pages != vma_pages) {
+ if (1) {
+ fail_reason = "fix me";
+ goto fail;
+ }
+ ret = split_vma(vma->vm_mm, vma,
+ vma->vm_start + pages * PAGE_SIZE, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
+ pages * PAGE_SIZE, vma->vm_page_prot);
+ if (ret)
+ return ret;
+ }
+
+ pr_debug("mapped %s at 0x%08lx, %u/%u pages to vma 0x%08lx, "
"page_prot 0x%llx\n", file_dentry(file)->d_name.name,
- address, length, vma->vm_start,
+ address, pages, vma_pages, vma->vm_start,
(unsigned long long)pgprot_val(vma->vm_page_prot));
return 0;
fail:
pr_debug("%s: direct mmap failed: %s\n",
file_dentry(file)->d_name.name, fail_reason);
+ vma->vm_ops = &generic_file_vm_ops;
return 0;
}
@@ -394,14 +425,15 @@ static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
- unsigned int pages, max_pages, offset;
+ unsigned int pages, block_pages, max_pages, offset;
pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (pgoff >= max_pages || pages > max_pages - pgoff)
return -EINVAL;
- offset = cramfs_get_block_range(inode, pgoff, pages);
- if (!offset)
+ block_pages = pages;
+ offset = cramfs_get_block_range(inode, pgoff, &block_pages);
+ if (!offset || block_pages != pages)
return -ENOSYS;
addr = sbi->linear_phys_addr + offset;
pr_debug("get_unmapped for %s ofs %#lx siz %lu at 0x%08lx\n",