@@ -3598,6 +3598,7 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
uint32_t rida_fmt2:1;
+ uint32_t purge_mbox:1;
} flags;
uint16_t max_exchg;
@@ -3843,6 +3844,9 @@ struct qla_hw_data {
int port_down_retry_count;
uint8_t mbx_count;
uint8_t aen_mbx_count;
+ atomic_t num_pend_mbx_stage1;
+ atomic_t num_pend_mbx_stage2;
+ atomic_t num_pend_mbx_stage3;
uint32_t login_retry_count;
/* SNS command interfaces. */
@@ -6390,6 +6390,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
+ ha->flags.purge_mbox = 1;
/* For ISP82XX, reset_chip is just disabling interrupts.
* Driver waits for the completion of the commands.
* the interrupts need to be enabled.
@@ -6411,6 +6412,23 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->base_qpair->chip_reset;
}
+ /* purge MBox commands */
+ if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
+ i = 0;
+ while (atomic_read(&ha->num_pend_mbx_stage3) ||
+ atomic_read(&ha->num_pend_mbx_stage2) ||
+ atomic_read(&ha->num_pend_mbx_stage1)) {
+ msleep(20);
+ i++;
+ if (i > 50)
+ break;
+ }
+ ha->flags.purge_mbox = 0;
+
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -167,6 +167,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_inc(&ha->num_pend_mbx_stage1);
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -177,8 +178,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
+ atomic_dec(&ha->num_pend_mbx_stage1);
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_dec(&ha->num_pend_mbx_stage1);
+ if (ha->flags.purge_mbox) {
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
@@ -231,7 +238,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"jiffies=%lx.\n", jiffies);
/* Wait for mbx cmd completion until timeout */
-
+ atomic_inc(&ha->num_pend_mbx_stage2);
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
@@ -241,6 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -254,6 +262,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
+ atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -261,7 +270,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ } else if (ha->flags.purge_mbox) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
}
+ atomic_dec(&ha->num_pend_mbx_stage3);
+
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
command, jiffies_to_msecs(jiffies - wait_time));
@@ -275,6 +293,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -289,6 +308,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
while (!ha->flags.mbox_int) {
+ if (ha->flags.purge_mbox) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
+
if (time_after(jiffies, wait_time))
break;
@@ -304,6 +330,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Waited %d sec.\n",
(uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
+ atomic_dec(&ha->num_pend_mbx_stage2);
/* Check whether we timed out */
if (ha->flags.mbox_int) {
@@ -489,7 +516,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
complete(&ha->mbx_cmd_comp);
mbx_done:
- if (rval) {
+ if (rval == QLA_ABORTED) {
+ ql_log(ql_log_info, vha, 0xd035,
+ "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
+ mcp->mb[0]);
+ } else if (rval) {
if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
dev_name(&ha->pdev->dev), 0x1020+0x800,
@@ -2815,6 +2815,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
+ atomic_set(&ha->num_pend_mbx_stage1, 0);
+ atomic_set(&ha->num_pend_mbx_stage2, 0);
+ atomic_set(&ha->num_pend_mbx_stage3, 0);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {