@@ -844,6 +844,36 @@ static bool dm_table_supports_dax(struct dm_table *t,
return true;
}
+/* Check whether device is capable of dax poison recovery */
+static int device_poison_recovery_capable(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start, sector_t len, void *data)
+{
+ if (!dev->dax_dev)
+ return false;
+ return dax_recovery_capable(dev->dax_dev);
+}
+
+static bool dm_table_supports_poison_recovery(struct dm_table *t,
+ iterate_devices_callout_fn func)
+{
+ struct dm_target *ti;
+ unsigned int i;
+
+ /* Check if any DAX target supports poison recovery */
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (!ti->type->direct_access)
+ return false;
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, func, NULL))
+ return true;
+ }
+
+ return false;
+}
+
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -2014,6 +2044,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
+ if (dm_table_supports_poison_recovery(t,
+ device_poison_recovery_capable))
+ set_dax_recovery(t->md->dax_dev);
}
else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
If one of the MD raid participating target dax device supports DAXDEV_RECOVERY, then it'll be declared on the whole that the MD device is capable of DAXDEV_RECOVERY. And only when the recovery process reaches to the target driver, it becomes deterministic whether a certain dax address range maybe recovered, or not. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- drivers/md/dm-table.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+)