@@ -330,22 +330,24 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
EXPORT_SYMBOL_GPL(dax_direct_access);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
+ size_t bytes, struct iov_iter *i, unsigned long flags)
{
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i,
+ flags);
}
EXPORT_SYMBOL_GPL(dax_copy_from_iter);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
+ size_t bytes, struct iov_iter *i, unsigned long flags)
{
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i,
+ flags);
}
EXPORT_SYMBOL_GPL(dax_copy_to_iter);
@@ -181,7 +181,7 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
}
static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
@@ -191,11 +191,11 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
dev_sector = linear_map_sector(ti, sector);
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
- return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i, flags);
}
static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
@@ -205,7 +205,7 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
dev_sector = linear_map_sector(ti, sector);
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
- return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i, flags);
}
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -964,8 +964,8 @@ static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
}
static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
- pgoff_t pgoff, void *addr, size_t bytes,
- struct iov_iter *i)
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i,
+ unsigned long flags)
{
struct log_writes_c *lc = ti->private;
sector_t sector = pgoff * PAGE_SECTORS;
@@ -984,19 +984,19 @@ static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
return 0;
}
dax_copy:
- return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+ return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i, flags);
}
static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
- pgoff_t pgoff, void *addr, size_t bytes,
- struct iov_iter *i)
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i,
+ unsigned long flags)
{
struct log_writes_c *lc = ti->private;
sector_t sector = pgoff * PAGE_SECTORS;
if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
- return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+ return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i, flags);
}
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -323,7 +323,7 @@ static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
}
static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
@@ -338,11 +338,11 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
- return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i, flags);
}
static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
@@ -357,7 +357,7 @@ static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
- return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i, flags);
}
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
@@ -1045,7 +1045,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
}
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
@@ -1061,7 +1061,7 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
ret = copy_from_iter(addr, bytes, i);
goto out;
}
- ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
+ ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i, flags);
out:
dm_put_live_table(md, srcu_idx);
@@ -1069,7 +1069,7 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
}
static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
@@ -1085,7 +1085,7 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
ret = copy_to_iter(addr, bytes, i);
goto out;
}
- ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
+ ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i, flags);
out:
dm_put_live_table(md, srcu_idx);
@@ -311,13 +311,13 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
* dax_iomap_actor()
*/
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
return _copy_from_iter_flushcache(addr, bytes, i);
}
static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags)
{
return _copy_mc_to_iter(addr, bytes, i);
}
@@ -45,13 +45,15 @@ static const struct block_device_operations dcssblk_devops = {
};
static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i,
+ unsigned long flags)
{
return copy_from_iter(addr, bytes, i);
}
static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i,
+ unsigned long flags)
{
return copy_to_iter(addr, bytes, i);
}
@@ -1241,10 +1241,10 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
*/
if (iov_iter_rw(iter) == WRITE)
xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
- map_len, iter);
+ map_len, iter, dax_flag);
else
xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
- map_len, iter);
+ map_len, iter, dax_flag);
pos += xfer;
length -= xfer;
@@ -754,15 +754,15 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
}
static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i,
+ unsigned long flags)
{
return copy_from_iter(addr, bytes, i);
}
static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i,
+ unsigned long flags)
{
return copy_to_iter(addr, bytes, i);
}
@@ -33,10 +33,10 @@ struct dax_operations {
sector_t, sector_t);
/* copy_from_iter: required operation for fs-dax direct-i/o */
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
+ struct iov_iter *, unsigned long);
/* copy_to_iter: required operation for fs-dax direct-i/o */
size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
+ struct iov_iter *, unsigned long);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
};
@@ -197,9 +197,9 @@ void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn, unsigned long);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i);
+ size_t bytes, struct iov_iter *i, unsigned long flags);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i);
+ size_t bytes, struct iov_iter *i, unsigned long flags);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
@@ -148,7 +148,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn, unsigned long flags);
typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i);
+ void *addr, size_t bytes, struct iov_iter *i, unsigned long flags);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
Prepare dax_copy_to/from_iter() APIs with DAXDEV_F_RECOVERY flag such that when the flag is set, the underlying driver implementation of the APIs may deal with potential poison in a given address range and read partial data or write after clearing poison. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- drivers/dax/super.c | 10 ++++++---- drivers/md/dm-linear.c | 8 ++++---- drivers/md/dm-log-writes.c | 12 ++++++------ drivers/md/dm-stripe.c | 8 ++++---- drivers/md/dm.c | 8 ++++---- drivers/nvdimm/pmem.c | 4 ++-- drivers/s390/block/dcssblk.c | 6 ++++-- fs/dax.c | 4 ++-- fs/fuse/virtio_fs.c | 8 ++++---- include/linux/dax.h | 8 ++++---- include/linux/device-mapper.h | 2 +- 11 files changed, 41 insertions(+), 37 deletions(-)