@@ -304,7 +304,7 @@ void core_update_device_list_access(
{
struct se_dev_entry *deve;
- spin_lock_irq(&nacl->lun_entry_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
deve = rcu_dereference(nacl->lun_entry_hlist[mapped_lun]);
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
@@ -313,7 +313,7 @@ void core_update_device_list_access(
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
- spin_unlock_irq(&nacl->lun_entry_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
synchronize_rcu();
}
@@ -345,7 +345,7 @@ int core_enable_device_list_for_node(
* transition. This transition must be for the same struct se_lun
* + mapped_lun that was setup in demo mode..
*/
- spin_lock_irq(&nacl->lun_entry_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
deve = nacl->lun_entry_hlist[mapped_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
BUG_ON(deve->se_lun_acl != NULL);
@@ -359,7 +359,7 @@ int core_enable_device_list_for_node(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
rcu_assign_pointer(deve->se_lun_acl, lun_acl);
- spin_unlock_irq(&nacl->lun_entry_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
synchronize_rcu();
return 0;
@@ -384,7 +384,7 @@ int core_enable_device_list_for_node(
rcu_assign_pointer(deve->se_lun, lun);
rcu_assign_pointer(deve->se_lun_acl, lun_acl);
- spin_unlock_irq(&nacl->lun_entry_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
spin_lock_bh(&port->sep_alua_lock);
list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
@@ -409,8 +409,8 @@ int core_disable_device_list_for_node(
struct se_port *port = lun->lun_sep;
struct se_dev_entry *deve;
- rcu_read_lock();
- deve = rcu_dereference(nacl->lun_entry_hlist[mapped_lun]);
+ mutex_lock(&nacl->lun_entry_mutex);
+ deve = (nacl->lun_entry_hlist[mapped_lun]);
/*
* If the MappedLUN entry is being disabled, the entry in
* port->sep_alua_list must be removed now before clearing the
@@ -430,7 +430,6 @@ int core_disable_device_list_for_node(
/*
* Disable struct se_dev_entry LUN ACL mapping
*/
- spin_lock_irq(&nacl->lun_entry_lock);
core_scsi3_ua_release_all(deve);
rcu_assign_pointer(deve->pr_reg, NULL);
rcu_assign_pointer(deve->se_lun, NULL);
@@ -438,8 +437,7 @@ int core_disable_device_list_for_node(
deve->lun_flags = 0;
deve->creation_time = 0;
deve->attach_count--;
- spin_unlock_irq(&nacl->lun_entry_lock);
- rcu_read_unlock();
+ mutex_unlock(&nacl->lun_entry_mutex);
/*
* Wait for RCU read critical sections to complete after
@@ -1030,10 +1030,10 @@ static void __core_scsi3_add_registration(
__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
spin_unlock(&pr_tmpl->registration_lock);
- spin_lock(&nacl->lun_entry_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
deve = nacl->lun_entry_hlist[pr_reg->pr_res_mapped_lun];
rcu_assign_pointer(deve->pr_reg, pr_reg);
- spin_unlock(&nacl->lun_entry_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
/*
* Wait for read path critical RCU in core_scsi3_pr_seq_non_holder()
* conditional checks for deve->pr_reg pointer access complete.
@@ -1065,10 +1065,10 @@ static void __core_scsi3_add_registration(
register_type);
spin_unlock(&pr_tmpl->registration_lock);
- spin_lock(&nacl->lun_entry_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
deve = nacl_tmp->lun_entry_hlist[pr_reg_tmp->pr_res_mapped_lun];
rcu_assign_pointer(deve->pr_reg, pr_reg_tmp);
- spin_unlock(&nacl->lun_entry_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
/*
* Wait for read path critical RCU in core_scsi3_pr_seq_non_holder()
* conditional checks for deve->pr_reg pointer access complete.
@@ -1280,10 +1280,10 @@ static void __core_scsi3_free_registration(
cpu_relax();
}
- spin_lock(&nacl->lun_entry_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
deve = nacl->lun_entry_hlist[pr_reg->pr_res_mapped_lun];
rcu_assign_pointer(deve->pr_reg, NULL);
- spin_unlock(&nacl->lun_entry_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
/*
* Wait for read path critical RCU in core_scsi3_pr_seq_non_holder()
* conditional checks for deve->pr_reg pointer access complete.
@@ -275,9 +275,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
INIT_LIST_HEAD(&acl->acl_sess_list);
kref_init(&acl->acl_kref);
init_completion(&acl->acl_free_comp);
- spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
- spin_lock_init(&acl->lun_entry_lock);
+ mutex_init(&acl->lun_entry_mutex);
atomic_set(&acl->acl_pr_ref_count, 0);
acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
@@ -331,7 +330,7 @@ void core_tpg_clear_object_luns(struct se_portal_group *tpg)
int i;
struct se_lun *lun;
- spin_lock(&tpg->tpg_lun_lock);
+ mutex_lock(&tpg->tpg_lun_mutex);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = tpg->tpg_lun_list[i];
@@ -339,11 +338,11 @@ void core_tpg_clear_object_luns(struct se_portal_group *tpg)
(lun->lun_se_dev == NULL))
continue;
- spin_unlock(&tpg->tpg_lun_lock);
+ mutex_unlock(&tpg->tpg_lun_mutex);
core_dev_del_lun(tpg, lun);
- spin_lock(&tpg->tpg_lun_lock);
+ mutex_lock(&tpg->tpg_lun_mutex);
}
- spin_unlock(&tpg->tpg_lun_lock);
+ mutex_unlock(&tpg->tpg_lun_mutex);
}
EXPORT_SYMBOL(core_tpg_clear_object_luns);
@@ -403,9 +402,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
INIT_LIST_HEAD(&acl->acl_sess_list);
kref_init(&acl->acl_kref);
init_completion(&acl->acl_free_comp);
- spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
- spin_lock_init(&acl->lun_entry_lock);
+ mutex_init(&acl->lun_entry_mutex);
atomic_set(&acl->acl_pr_ref_count, 0);
acl->queue_depth = queue_depth;
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
@@ -599,8 +599,7 @@ struct se_node_acl {
struct se_dev_entry **device_list;
struct se_session *nacl_sess;
struct se_portal_group *se_tpg;
- spinlock_t device_list_lock;
- spinlock_t lun_entry_lock;
+ struct mutex lun_entry_mutex;
spinlock_t nacl_sess_lock;
struct config_group acl_group;
struct config_group acl_attrib_group;