From patchwork Fri Jun 4 17:14:06 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michael Heinz X-Patchwork-Id: 104301 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o54HEC2D008227 for ; Fri, 4 Jun 2010 17:14:13 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753298Ab0FDROM (ORCPT ); Fri, 4 Jun 2010 13:14:12 -0400 Received: from avexcashub1.qlogic.com ([198.70.193.61]:5594 "EHLO avexcashub1.qlogic.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752591Ab0FDROL convert rfc822-to-8bit (ORCPT ); Fri, 4 Jun 2010 13:14:11 -0400 Received: from avexcashub2.qlogic.org (10.1.4.116) by avexcashub1.qlogic.org (10.1.4.161) with Microsoft SMTP Server (TLS) id 8.1.375.2; Fri, 4 Jun 2010 10:14:10 -0700 Received: from MNEXCASHUB2.qlogic.org (10.33.2.104) by avexcashub2.qlogic.org (10.1.4.162) with Microsoft SMTP Server (TLS) id 8.1.375.2; Fri, 4 Jun 2010 10:14:10 -0700 Received: from MNEXMB1.qlogic.org ([fe80::c6b:fda:afec:79a1]) by MNEXCASHUB2.qlogic.org ([::1]) with mapi; Fri, 4 Jun 2010 12:14:09 -0500 From: Mike Heinz To: "linux-rdma@vger.kernel.org" , Hal Rosenstock , "Hefty, Sean" , Roland Dreier Date: Fri, 4 Jun 2010 12:14:06 -0500 Subject: [PATCH v2] allow passthrough of rmpp packets to user mad clients Thread-Topic: [PATCH v2] allow passthrough of rmpp packets to user mad clients Thread-Index: AcsECVbtNq3l5UQTQ96UYW1C/N5DuA== Message-ID: <4C2744E8AD2982428C5BFE523DF8CDCB49A488DAD8@MNEXMB1.qlogic.org> Accept-Language: en-US Content-Language: en-US X-MS-Has-Attach: X-MS-TNEF-Correlator: acceptlanguage: en-US MIME-Version: 1.0 Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Fri, 04 Jun 2010 17:14:13 +0000 (UTC) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ef1304f..efca783 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -207,12 +207,18 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, int ret2, qpn; unsigned long flags; u8 mgmt_class, vclass; + u8 rmpp_passthru = 0; /* Validate parameters */ qpn = get_spl_qp_index(qp_type); if (qpn == -1) goto error1; + if (rmpp_version == IB_MGMT_RMPP_PASSTHRU) { + rmpp_passthru = 255; + rmpp_version = 0; + } + if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) goto error1; @@ -244,6 +250,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (!is_vendor_oui(mad_reg_req->oui)) goto error1; } + /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) @@ -302,6 +309,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; mad_agent_priv->reg_req = reg_req; mad_agent_priv->agent.rmpp_version = rmpp_version; + mad_agent_priv->agent.rmpp_passthru = rmpp_passthru; mad_agent_priv->agent.device = device; mad_agent_priv->agent.recv_handler = recv_handler; mad_agent_priv->agent.send_handler = send_handler; @@ -1792,7 +1800,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); - if (mad_agent_priv->agent.rmpp_version) { + if (mad_agent_priv->agent.rmpp_version && !mad_agent_priv->agent.rmpp_passthru) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { @@ -1801,29 +1809,47 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } } + /* + * At this point, the MAD is either not an RMPP or we are passing RMPPs thru to + * the client. + */ /* Complete corresponding request */ if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); - if (!mad_send_wr) { + if (mad_send_wr) { + ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); - ib_free_recv_mad(mad_recv_wc); - deref_mad_agent(mad_agent_priv); - return; - } - ib_mark_mad_done(mad_send_wr); - spin_unlock_irqrestore(&mad_agent_priv->lock, flags); - /* Defined behavior is to complete response before request */ - mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; - mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, - mad_recv_wc); - atomic_dec(&mad_agent_priv->refcount); + /* Defined behavior is to complete response before request */ + mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; + mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, + mad_recv_wc); + atomic_dec(&mad_agent_priv->refcount); - mad_send_wc.status = IB_WC_SUCCESS; - mad_send_wc.vendor_err = 0; - mad_send_wc.send_buf = &mad_send_wr->send_buf; - ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); + mad_send_wc.status = IB_WC_SUCCESS; + mad_send_wc.vendor_err = 0; + mad_send_wc.send_buf = &mad_send_wr->send_buf; + ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); + } else { + if (mad_agent_priv->agent.rmpp_passthru + && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) + && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { + // user rmpp is in effect + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); + + mad_recv_wc->wc->wr_id = 0; + mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, + mad_recv_wc); + atomic_dec(&mad_agent_priv->refcount); + } else { + // not user rmpp, revert to normal behavior and drop the mad + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); + ib_free_recv_mad(mad_recv_wc); + deref_mad_agent(mad_agent_priv); + return; + } + } } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 6babb72..baa11ae 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -501,7 +501,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); - if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { + if (!agent->rmpp_version || !ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { copy_offset = IB_MGMT_MAD_HDR; rmpp_active = 0; } else { @@ -553,14 +553,22 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, rmpp_mad->mad_hdr.tid = *tid; } - spin_lock_irq(&file->send_lock); - ret = is_duplicate(file, packet); - if (!ret) + if (agent->rmpp_passthru + && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) + && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { + spin_lock_irq(&file->send_lock); list_add_tail(&packet->list, &file->send_list); - spin_unlock_irq(&file->send_lock); - if (ret) { - ret = -EINVAL; - goto err_msg; + spin_unlock_irq(&file->send_lock); + } else { + spin_lock_irq(&file->send_lock); + ret = is_duplicate(file, packet); + if (!ret) + list_add_tail(&packet->list, &file->send_list); + spin_unlock_irq(&file->send_lock); + if (ret) { + ret = -EINVAL; + goto err_msg; + } } ret = ib_post_send_mad(packet->msg, NULL); diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index d3b9401..2651e93 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -79,6 +79,7 @@ /* RMPP information */ #define IB_MGMT_RMPP_VERSION 1 +#define IB_MGMT_RMPP_PASSTHRU 255 #define IB_MGMT_RMPP_TYPE_DATA 1 #define IB_MGMT_RMPP_TYPE_ACK 2 @@ -360,6 +361,7 @@ struct ib_mad_agent { u32 hi_tid; u8 port_num; u8 rmpp_version; + u8 rmpp_passthru; }; /** @@ -436,7 +438,9 @@ struct ib_mad_reg_req { * wishes to receive solicited responses. * @rmpp_version: If set, indicates that the client will send * and receive MADs that contain the RMPP header for the given version. - * If set to 0, indicates that RMPP is not used by this client. + * If set to 0, indicates that RMPP is not used by this client. If + * set to 255, incoming RMPP MADs are passed through to the client. + * Otherwise, RMPP MADs are handled according to the version #. * @send_handler: The completion callback routine invoked after a send * request has completed. * @recv_handler: The completion callback routine invoked for a received