@@ -744,6 +744,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer_ni *peer_ni,
INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
INIT_LIST_HEAD(&conn->ibc_active_txs);
+ INIT_LIST_HEAD(&conn->ibc_zombie_txs);
spin_lock_init(&conn->ibc_lock);
conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt);
@@ -951,6 +952,9 @@ void kiblnd_destroy_conn(struct kib_conn *conn)
if (conn->ibc_cq)
ib_destroy_cq(conn->ibc_cq);
+ kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED,
+ LNET_MSG_STATUS_OK);
+
if (conn->ibc_rx_pages)
kiblnd_unmap_rx_descs(conn);
@@ -581,7 +581,9 @@ struct kib_conn {
struct list_head ibc_tx_queue_rsrvd; /* sends that need to */
/* reserve an ACK/DONE msg */
struct list_head ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
+ spinlock_t ibc_lock; /* zombie tx awaiting done */
+ struct list_head ibc_zombie_txs;
+ /* serialise */
struct kib_rx *ibc_rxs; /* the rx descs */
struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */
@@ -1005,6 +1007,7 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
+void kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs);
void kiblnd_map_rx_descs(struct kib_conn *conn);
void kiblnd_unmap_rx_descs(struct kib_conn *conn);
void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
@@ -1211,6 +1211,21 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
LASSERT(!tx->tx_queued); /* not queued for sending already */
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
+ if (tx->tx_conn) {
+ /* PUT_DONE first attached to conn as a PUT_REQ */
+ LASSERT(tx->tx_conn == conn);
+ LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
+ tx->tx_conn = NULL;
+ kiblnd_conn_decref(conn);
+ }
+ list_add(&tx->tx_list, &conn->ibc_zombie_txs);
+
+ return;
+ }
+
timeout_ns = lnet_get_lnd_timeout() * NSEC_PER_SEC;
tx->tx_queued = 1;
tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
@@ -2056,7 +2071,7 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
-static void
+void
kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
{
LIST_HEAD(zombies);
@@ -2123,8 +2138,6 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state > IBLND_CONN_INIT);
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
-
/*
* abort_receives moves QP state to IB_QPS_ERR. This is only required
* for connections that didn't get as far as being connected, because
@@ -2132,6 +2145,8 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
*/
kiblnd_abort_receives(conn);
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+
/*
* Complete all tx descs not waiting for sends to complete.
* NB we should be safe from RDMA now that the QP has changed state