@@ -15243,8 +15243,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
- /* alloc netdev data */
- ret = hfi1_netdev_alloc(dd);
+ /* alloc VNIC/AIP rx data */
+ ret = hfi1_alloc_rx(dd);
if (ret)
goto bail_cleanup;
@@ -15348,7 +15348,7 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
hfi1_comp_vectors_clean_up(dd);
msix_clean_up_interrupts(dd);
bail_cleanup:
- hfi1_netdev_free(dd);
+ hfi1_free_rx(dd);
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
@@ -69,7 +69,6 @@
#include <rdma/ib_hdrs.h>
#include <rdma/opa_addr.h>
#include <linux/rhashtable.h>
-#include <linux/netdevice.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
@@ -1060,6 +1059,7 @@ struct hfi1_vnic_data {
#define SERIAL_MAX 16 /* length of the serial number */
typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
+struct hfi1_netdev_rx;
struct hfi1_devdata {
struct hfi1_ibdev verbs_dev; /* must be first */
/* pointers to related structs for this device */
@@ -1402,7 +1402,7 @@ struct hfi1_devdata {
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
int vnic_num_vports;
- struct net_device *dummy_netdev;
+ struct hfi1_netdev_rx *netdev_rx;
struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */
@@ -1775,7 +1775,7 @@ static void remove_one(struct pci_dev *pdev)
hfi1_unregister_ib_device(dd);
/* free netdev data */
- hfi1_netdev_free(dd);
+ hfi1_free_rx(dd);
/*
* Disable the IB link, disable interrupts on the device,
@@ -14,15 +14,14 @@
/**
* struct hfi1_netdev_rxq - Receive Queue for HFI
- * dummy netdev. Both IPoIB and VNIC netdevices will be working on
- * top of this device.
+ * Both IPoIB and VNIC netdevices will be working on the rx abstraction.
* @napi: napi object
- * @priv: ptr to netdev_priv
+ * @rx: ptr to netdev_rx
* @rcd: ptr to receive context data
*/
struct hfi1_netdev_rxq {
struct napi_struct napi;
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
struct hfi1_ctxtdata *rcd;
};
@@ -36,7 +35,8 @@ struct hfi1_netdev_rxq {
#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
/**
- * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
+ * struct hfi1_netdev_rx: data required to setup and run HFI netdev.
+ * @rx_napi: the dummy netdevice to support "polling" the receive contexts
* @dd: hfi1_devdata
* @rxq: pointer to dummy netdev receive queues.
* @num_rx_q: number of receive queues
@@ -48,7 +48,8 @@ struct hfi1_netdev_rxq {
* @netdevs: atomic counter of netdevs using dummy netdev.
* When 0 receive queues will be freed.
*/
-struct hfi1_netdev_priv {
+struct hfi1_netdev_rx {
+ struct net_device rx_napi;
struct hfi1_devdata *dd;
struct hfi1_netdev_rxq *rxq;
int num_rx_q;
@@ -61,41 +62,27 @@ struct hfi1_netdev_priv {
};
static inline
-struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
-{
- return (struct hfi1_netdev_priv *)&dev[1];
-}
-
-static inline
int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->num_rx_q;
+ return dd->netdev_rx->num_rx_q;
}
static inline
struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->rxq[ctxt].rcd;
+ return dd->netdev_rx->rxq[ctxt].rcd;
}
static inline
int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->rmt_start;
+ return dd->netdev_rx->rmt_start;
}
static inline
void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- priv->rmt_start = rmt_idx;
+ dd->netdev_rx->rmt_start = rmt_idx;
}
u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
@@ -105,8 +92,8 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
-int hfi1_netdev_alloc(struct hfi1_devdata *dd);
-void hfi1_netdev_free(struct hfi1_devdata *dd);
+int hfi1_alloc_rx(struct hfi1_devdata *dd);
+void hfi1_free_rx(struct hfi1_devdata *dd);
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
@@ -17,11 +17,11 @@
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
struct hfi1_ctxtdata *uctxt)
{
unsigned int rcvctrl_ops;
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
int ret;
uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
@@ -118,11 +118,11 @@ static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
hfi1_free_ctxt(uctxt);
}
-static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
struct hfi1_ctxtdata **ctxt)
{
int rc;
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
if (rc) {
@@ -130,7 +130,7 @@ static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
return rc;
}
- rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+ rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
if (rc) {
dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
hfi1_netdev_deallocate_ctxt(dd, *ctxt);
@@ -183,31 +183,31 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
(u32)HFI1_MAX_NETDEV_CTXTS);
}
-static int hfi1_netdev_rxq_init(struct net_device *dev)
+static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
{
int i;
int rc;
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
+ struct net_device *dev = &rx->rx_napi;
- priv->num_rx_q = dd->num_netdev_contexts;
- priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
- GFP_KERNEL, dd->node);
+ rx->num_rx_q = dd->num_netdev_contexts;
+ rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
+ GFP_KERNEL, dd->node);
- if (!priv->rxq) {
+ if (!rx->rxq) {
dd_dev_err(dd, "Unable to allocate netdev queue data\n");
return (-ENOMEM);
}
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+ rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
if (rc)
goto bail_context_irq_failure;
hfi1_rcd_get(rxq->rcd);
- rxq->priv = priv;
+ rxq->rx = rx;
rxq->rcd->napi = &rxq->napi;
dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
i, rxq->rcd->ctxt);
@@ -227,7 +227,7 @@ static int hfi1_netdev_rxq_init(struct net_device *dev)
bail_context_irq_failure:
dd_dev_err(dd, "Unable to allot receive context\n");
for (; i >= 0; i--) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
if (rxq->rcd) {
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
@@ -235,20 +235,19 @@ static int hfi1_netdev_rxq_init(struct net_device *dev)
rxq->rcd = NULL;
}
}
- kfree(priv->rxq);
- priv->rxq = NULL;
+ kfree(rx->rxq);
+ rx->rxq = NULL;
return rc;
}
-static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
{
int i;
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
netif_napi_del(&rxq->napi);
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
@@ -256,41 +255,41 @@ static void hfi1_netdev_rxq_deinit(struct net_device *dev)
rxq->rcd = NULL;
}
- kfree(priv->rxq);
- priv->rxq = NULL;
- priv->num_rx_q = 0;
+ kfree(rx->rxq);
+ rx->rxq = NULL;
+ rx->num_rx_q = 0;
}
-static void enable_queues(struct hfi1_netdev_priv *priv)
+static void enable_queues(struct hfi1_netdev_rx *rx)
{
int i;
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+ dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
napi_enable(&rxq->napi);
- hfi1_rcvctrl(priv->dd,
+ hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
rxq->rcd);
}
}
-static void disable_queues(struct hfi1_netdev_priv *priv)
+static void disable_queues(struct hfi1_netdev_rx *rx)
{
int i;
- msix_netdev_synchronize_irq(priv->dd);
+ msix_netdev_synchronize_irq(rx->dd);
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+ dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
/* wait for napi if it was scheduled */
- hfi1_rcvctrl(priv->dd,
+ hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
rxq->rcd);
napi_synchronize(&rxq->napi);
@@ -307,15 +306,14 @@ static void disable_queues(struct hfi1_netdev_priv *priv)
*/
int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
int res;
- if (atomic_fetch_inc(&priv->netdevs))
+ if (atomic_fetch_inc(&rx->netdevs))
return 0;
mutex_lock(&hfi1_mutex);
- init_dummy_netdev(dd->dummy_netdev);
- res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+ res = hfi1_netdev_rxq_init(rx);
mutex_unlock(&hfi1_mutex);
return res;
}
@@ -328,12 +326,12 @@ int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
*/
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
/* destroy the RX queues only if it is the last netdev going away */
- if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+ if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
mutex_lock(&hfi1_mutex);
- hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+ hfi1_netdev_rxq_deinit(rx);
mutex_unlock(&hfi1_mutex);
}
@@ -341,43 +339,46 @@ int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
}
/**
- * hfi1_netdev_alloc - Allocates netdev and private data. It is required
- * because RMT index and MSI-X interrupt can be set only
- * during driver initialization.
- *
+ * hfi1_alloc_rx - Allocates the rx support structure
* @dd: hfi1 dev data
+ *
+ * Allocate the rx structure to support gathering the receive
+ * resources and the dummy netdev.
+ *
+ * Updates dd struct pointer upon success.
+ *
+ * Return: 0 (success) -error on failure
+ *
*/
-int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+int hfi1_alloc_rx(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
- const int netdev_size = sizeof(*dd->dummy_netdev) +
- sizeof(struct hfi1_netdev_priv);
+ struct hfi1_netdev_rx *rx;
- dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
- dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+ dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
+ rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
- if (!dd->dummy_netdev)
+ if (!rx)
return -ENOMEM;
+ rx->dd = dd;
+ init_dummy_netdev(&rx->rx_napi);
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- priv->dd = dd;
- xa_init(&priv->dev_tbl);
- atomic_set(&priv->enabled, 0);
- atomic_set(&priv->netdevs, 0);
+ xa_init(&rx->dev_tbl);
+ atomic_set(&rx->enabled, 0);
+ atomic_set(&rx->netdevs, 0);
+ dd->netdev_rx = rx;
return 0;
}
-void hfi1_netdev_free(struct hfi1_devdata *dd)
+void hfi1_free_rx(struct hfi1_devdata *dd)
{
- if (dd->dummy_netdev) {
- struct hfi1_netdev_priv *priv =
- hfi1_netdev_priv(dd->dummy_netdev);
-
- dd_dev_info(dd, "hfi1 netdev freed\n");
- xa_destroy(&priv->dev_tbl);
- kfree(dd->dummy_netdev);
- dd->dummy_netdev = NULL;
+ if (dd->netdev_rx) {
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
+
+ dd_dev_info(dd, "hfi1 rx freed\n");
+ xa_destroy(&rx->dev_tbl);
+ kfree(dd->netdev_rx);
+ dd->netdev_rx = NULL;
}
}
@@ -392,33 +393,33 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
*/
void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
- if (!dd->dummy_netdev)
+ if (!dd->netdev_rx)
return;
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- if (atomic_fetch_inc(&priv->enabled))
+ rx = dd->netdev_rx;
+ if (atomic_fetch_inc(&rx->enabled))
return;
mutex_lock(&hfi1_mutex);
- enable_queues(priv);
+ enable_queues(rx);
mutex_unlock(&hfi1_mutex);
}
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
- if (!dd->dummy_netdev)
+ if (!dd->netdev_rx)
return;
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- if (atomic_dec_if_positive(&priv->enabled))
+ rx = dd->netdev_rx;
+ if (atomic_dec_if_positive(&rx->enabled))
return;
mutex_lock(&hfi1_mutex);
- disable_queues(priv);
+ disable_queues(rx);
mutex_unlock(&hfi1_mutex);
}
@@ -434,9 +435,9 @@ void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
*/
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+ return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
}
/**
@@ -448,9 +449,9 @@ int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
*/
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_erase(&priv->dev_tbl, id);
+ return xa_erase(&rx->dev_tbl, id);
}
/**
@@ -461,9 +462,9 @@ void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
*/
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_load(&priv->dev_tbl, id);
+ return xa_load(&rx->dev_tbl, id);
}
/**
@@ -474,11 +475,11 @@ void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
*/
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
unsigned long index = *start_id;
void *ret;
- ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
*start_id = (int)index;
return ret;
}