@@ -489,6 +489,8 @@ long bdev_direct_access(struct block_device *bdev, sector_t sector,
avail = ops->direct_access(bdev, sector, addr, pfn);
if (!avail)
return -ERANGE;
+ if (avail > 0 && avail & ~PAGE_MASK)
+ return -ENXIO;
return min(avail, size);
}
EXPORT_SYMBOL_GPL(bdev_direct_access);
@@ -52,7 +52,6 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
sz = min_t(long, count, SZ_128K);
clear_pmem(addr, sz);
size -= sz;
- BUG_ON(sz & 511);
sector += sz / 512;
cond_resched();
} while (size);
If a ->direct_access() implementation ever returns a map count less than PAGE_SIZE, catch the error in bdev_direct_access(). This simplifies error checking in upper layers. Reported-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- fs/block_dev.c | 2 ++ fs/dax.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-)