@@ -1199,6 +1199,8 @@ int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
}
EXPORT_SYMBOL_GPL(dax_truncate_page);
+typedef size_t (*iter_func_t)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
struct iov_iter *iter)
{
@@ -1210,6 +1212,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
ssize_t ret = 0;
size_t xfer;
int id;
+ iter_func_t write_func = dax_copy_from_iter;
if (iov_iter_rw(iter) == READ) {
end = min(end, i_size_read(iomi->inode));
@@ -1249,6 +1252,17 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
&kaddr, NULL);
+ if ((map_len == -EIO) && (iov_iter_rw(iter) == WRITE)) {
+ if (dax_prep_recovery(dax_dev, &kaddr) < 0) {
+ ret = map_len;
+ break;
+ }
+ map_len = dax_direct_access(dax_dev, pgoff,
+ PHYS_PFN(size), &kaddr, NULL);
+ if (map_len > 0)
+ write_func = dax_recovery_write;
+ }
+
if (map_len < 0) {
ret = map_len;
break;
@@ -1264,14 +1278,21 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
* The userspace address for the memory copy has already been
* validated via access_ok() in either vfs_read() or
* vfs_write(), depending on which operation we are doing.
+ * xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
+ * map_len, iter);
*/
if (iov_iter_rw(iter) == WRITE)
- xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
- map_len, iter);
+ xfer = write_func(dax_dev, pgoff, kaddr, map_len, iter);
else
xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
map_len, iter);
+ if (xfer == (ssize_t) -EIO) {
+ pr_warn("dax_ioma_iter: write_func returns-EIO\n");
+ ret = -EIO;
+ break;
+ }
+
pos += xfer;
length -= xfer;
done += xfer;
dax_iomap_iter() fails if the destination range contains poison. Add recovery_write to the failure code path. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- fs/dax.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-)