@@ -267,6 +267,16 @@ const char *dmz_metadata_label(struct dmz_metadata *zmd)
return (const char *)zmd->devname;
}
+bool dmz_check_dev(struct dmz_metadata *zmd)
+{
+ return dmz_check_bdev(&zmd->dev[0]);
+}
+
+bool dmz_dev_is_dying(struct dmz_metadata *zmd)
+{
+ return dmz_bdev_is_dying(&zmd->dev[0]);
+}
+
/*
* Lock/unlock mapping table.
* The map lock also protects all the zone lists.
@@ -1719,7 +1729,7 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
/* Allocate a random zone */
dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!dzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
dzone = ERR_PTR(-EIO);
goto out;
}
@@ -1820,7 +1830,7 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
/* Allocate a random zone */
bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!bzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
bzone = ERR_PTR(-EIO);
goto out;
}
@@ -455,7 +455,7 @@ static void dmz_reclaim_work(struct work_struct *work)
unsigned int p_unmap_rnd;
int ret;
- if (dmz_bdev_is_dying(zrc->dev))
+ if (dmz_dev_is_dying(zmd))
return;
if (!dmz_should_reclaim(zrc)) {
@@ -490,7 +490,7 @@ static void dmz_reclaim_work(struct work_struct *work)
if (ret) {
DMDEBUG("(%s): Reclaim error %d\n",
dmz_metadata_label(zmd), ret);
- if (!dmz_check_bdev(zrc->dev))
+ if (!dmz_check_dev(zmd))
return;
}
@@ -632,7 +632,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
sector_t chunk_sector;
int ret;
- if (dmz_bdev_is_dying(dmz->dev))
+ if (dmz_dev_is_dying(zmd))
return DM_MAPIO_KILL;
DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
@@ -181,6 +181,9 @@ sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
+bool dmz_check_dev(struct dmz_metadata *zmd);
+bool dmz_dev_is_dying(struct dmz_metadata *zmd);
+
#define DMZ_ALLOC_RND 0x01
#define DMZ_ALLOC_RECLAIM 0x02