===================================================================
@@ -270,6 +270,10 @@ retry:
up_write(&_hash_lock);
dm_put(md);
+ if (likely(keep_open_devices))
+ dm_destroy(md);
+ else
+ dm_destroy_nowait(md);
/*
* Some mapped devices may be using other mapped
@@ -640,17 +644,19 @@ static int dev_create(struct dm_ioctl *p
return r;
r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
- if (r)
- goto out;
+ if (r) {
+ dm_put(md);
+ dm_destroy(md);
+ return r;
+ }
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
__dev_status(md, param);
-out:
dm_put(md);
- return r;
+ return 0;
}
/*
@@ -744,6 +750,7 @@ static int dev_remove(struct dm_ioctl *p
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
+ dm_destroy(md);
return 0;
}
===================================================================
@@ -2176,6 +2176,7 @@ void dm_set_mdptr(struct mapped_device *
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
+ BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
const char *dm_device_name(struct mapped_device *md)
@@ -2184,27 +2185,55 @@ const char *dm_device_name(struct mapped
}
EXPORT_SYMBOL_GPL(dm_device_name);
-void dm_put(struct mapped_device *md)
+static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
- BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ might_sleep();
- if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_live_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED,
- MINOR(disk_devt(dm_disk(md))));
- set_bit(DMF_FREEING, &md->flags);
- spin_unlock(&_minor_lock);
- if (!dm_suspended_md(md)) {
- dm_table_presuspend_targets(map);
- dm_table_postsuspend_targets(map);
- }
- dm_sysfs_exit(md);
- dm_table_put(map);
- dm_table_destroy(__unbind(md));
- free_dev(md);
+ spin_lock(&_minor_lock);
+ map = dm_get_live_table(md);
+ idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
+ dm_table_postsuspend_targets(map);
}
+
+ /*
+ * Rare but there may be I/O requests still going to complete,
+ * for example. Wait for all references to disappear.
+ * No one shouldn't increment the reference count of the mapped_device,
+ * after the mapped_device becomes DMF_FREEING state.
+ */
+ if (wait) {
+ while (atomic_read(&md->holders))
+ msleep(1);
+ } else if (atomic_read(&md->holders))
+ DMWARN("%s: Deleting mapped_device still in use! (%d users)",
+ dm_device_name(md), atomic_read(&md->holders));
+
+ dm_sysfs_exit(md);
+ dm_table_put(map);
+ dm_table_destroy(__unbind(md));
+ free_dev(md);
+}
+
+void dm_destroy(struct mapped_device *md)
+{
+ __dm_destroy(md, true);
+}
+
+void dm_destroy_nowait(struct mapped_device *md)
+{
+ __dm_destroy(md, false);
+}
+
+void dm_put(struct mapped_device *md)
+{
+ atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
===================================================================
@@ -122,6 +122,11 @@ void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
+/*
+ * mapped_device operations
+ */
+void dm_destroy(struct mapped_device *md);
+void dm_destroy_nowait(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);