@@ -88,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-int ena_xmit_common(struct net_device *dev,
+int ena_xmit_common(struct ena_adapter *adapter,
struct ena_ring *ring,
struct ena_tx_buffer *tx_info,
struct ena_com_tx_ctx *ena_tx_ctx,
u16 next_to_use,
u32 bytes)
{
- struct ena_adapter *adapter = netdev_priv(dev);
int rc, nb_hw_desc;
if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
ena_ring_tx_doorbell(ring);
@@ -115,7 +114,7 @@ int ena_xmit_common(struct net_device *dev,
* ena_com_prepare_tx() are fatal and therefore require a device reset.
*/
if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
+ netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
@@ -2599,7 +2598,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
- rc = ena_xmit_common(dev,
+ rc = ena_xmit_common(adapter,
tx_ring,
tx_info,
&ena_tx_ctx,
@@ -426,7 +426,7 @@ static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
}
-int ena_xmit_common(struct net_device *dev,
+int ena_xmit_common(struct ena_adapter *adapter,
struct ena_ring *ring,
struct ena_tx_buffer *tx_info,
struct ena_com_tx_ctx *ena_tx_ctx,
@@ -73,7 +73,7 @@ error_report_dma_error:
}
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
- struct net_device *dev,
+ struct ena_adapter *adapter,
struct xdp_frame *xdpf,
int flags)
{
@@ -93,7 +93,7 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
ena_tx_ctx.req_id = req_id;
- rc = ena_xmit_common(dev,
+ rc = ena_xmit_common(adapter,
xdp_ring,
tx_info,
&ena_tx_ctx,
@@ -141,7 +141,7 @@ int ena_xdp_xmit(struct net_device *dev, int n,
spin_lock(&xdp_ring->xdp_tx_lock);
for (i = 0; i < n; i++) {
- if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
+ if (ena_xdp_xmit_frame(xdp_ring, adapter, frames[i], 0))
break;
nxmit++;
}
@@ -36,7 +36,7 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
int first, int count);
int ena_xdp_io_poll(struct napi_struct *napi, int budget);
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
- struct net_device *dev,
+ struct ena_adapter *adapter,
struct xdp_frame *xdpf,
int flags);
int ena_xdp_xmit(struct net_device *dev, int n,
@@ -108,7 +108,7 @@ static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock);
- if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
+ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
XDP_XMIT_FLUSH))
xdp_return_frame(xdpf);