@@ -29,8 +29,11 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include "qed_cxt.h"
#include "qed_hw.h"
+#include "qed_ll2.h"
#include "qed_rdma.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
@@ -474,12 +477,214 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
{
}
+static int
+qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ll2_buff *buf, u8 handle)
+{
+ int rc;
+
+ rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
+ (u16)buf->buff_size, buf, 1);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
+ rc, handle);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
+ buf->data, buf->data_phys_addr);
+ kfree(buf);
+ }
+
+ return rc;
+}
+
+static void
+qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_iwarp_ll2_buff *buf = data->cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ if (GET_FIELD(data->parse_flags,
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
+ GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
+ DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
+ goto err;
+ }
+
+ /* Process SYN packet - added later on in series */
+
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn, buf,
+ p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
+}
+
+static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
+ buffer->data, buffer->data_phys_addr);
+ kfree(buffer);
+}
+
+static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ /* this was originally an rx packet, post it back */
+ qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
+}
+
+static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ if (!buffer)
+ return;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
+ buffer->data, buffer->data_phys_addr);
+
+ kfree(buffer);
+}
+
+static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ int rc = 0;
+
+ if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_syn_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
+ iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
+ qed_llh_remove_mac_filter(p_hwfn,
+ p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ return rc;
+}
+
+static int
+qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
+ int num_rx_bufs, int buff_size, u8 ll2_handle)
+{
+ struct qed_iwarp_ll2_buff *buffer;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < num_rx_bufs; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ buff_size,
+ &buffer->data_phys_addr,
+ GFP_KERNEL);
+ if (!buffer->data) {
+ kfree(buffer);
+ rc = -ENOMEM;
+ break;
+ }
+
+ buffer->buff_size = buff_size;
+ rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
+ if (rc)
+ /* buffers will be deallocated by qed_ll2 */
+ break;
+ }
+ return rc;
+}
+
+#define QED_IWARP_MAX_BUF_SIZE(mtu) \
+ ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
+ ETH_CACHE_LINE_SIZE)
+
+static int
+qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_ll2_acquire_data data;
+ struct qed_ll2_cbs cbs;
+ int rc = 0;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+
+ iwarp_info->max_mtu = params->max_mtu;
+
+ ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
+
+ rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ if (rc)
+ return rc;
+
+ /* Start SYN connection */
+ cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
+ cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
+ cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
+ cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
+ cbs.cookie = p_hwfn;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_IWARP;
+ data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+ data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
+ data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
+ data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
+ data.input.tx_tc = PKT_LB_TC;
+ data.input.tx_dest = QED_LL2_TX_DEST_LB;
+ data.p_connection_handle = &iwarp_info->ll2_syn_handle;
+ data.cbs = &cbs;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
+ qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ return rc;
+ }
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
+ goto err;
+ }
+
+ rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
+ QED_IWARP_LL2_SYN_RX_SIZE,
+ QED_IWARP_MAX_SYN_PKT_SIZE,
+ iwarp_info->ll2_syn_handle);
+ if (rc)
+ goto err;
+
+ return rc;
+err:
+ qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+
+ return rc;
+}
+
int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
{
struct qed_iwarp_info *iwarp_info;
u32 rcv_wnd_size;
- int rc = 0;
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
@@ -499,7 +704,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
qed_iwarp_async_event);
- return rc;
+ return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
}
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -512,7 +717,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
- return 0;
+ return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
}
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
@@ -42,6 +42,17 @@ enum qed_iwarp_qp_state {
enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
+#define QED_IWARP_LL2_SYN_TX_SIZE (128)
+#define QED_IWARP_LL2_SYN_RX_SIZE (256)
+#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
+#define QED_IWARP_HANDLE_INVAL (0xff)
+
+struct qed_iwarp_ll2_buff {
+ void *data;
+ dma_addr_t data_phys_addr;
+ u32 buff_size;
+};
+
struct qed_iwarp_info {
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
@@ -50,6 +61,7 @@ struct qed_iwarp_info {
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
u8 tcp_flags;
+ u8 ll2_syn_handle;
u8 peer2peer;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;