@@ -765,7 +765,7 @@ e1000e_process_tx_desc(E1000ECore *core,
}
tx->skip_cp = false;
- net_tx_pkt_reset(tx->tx_pkt);
+ net_tx_pkt_reset(tx->tx_pkt, core->owner);
tx->sum_needed = 0;
tx->cptse = 0;
@@ -3447,7 +3447,7 @@ e1000e_core_pci_uninit(E1000ECore *core)
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt);
+ net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
@@ -3572,7 +3572,7 @@ static void e1000e_reset(E1000ECore *core, bool sw)
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt);
+ net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
core->tx[i].skip_cp = false;
}
@@ -523,6 +523,7 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt)
static void
igb_process_tx_desc(IGBCore *core,
+ PCIDevice *dev,
struct igb_tx *tx,
union e1000_adv_tx_desc *tx_desc,
int queue_index)
@@ -588,7 +589,7 @@ igb_process_tx_desc(IGBCore *core,
tx->first = true;
tx->skip_cp = false;
- net_tx_pkt_reset(tx->tx_pkt);
+ net_tx_pkt_reset(tx->tx_pkt, dev);
}
}
@@ -803,6 +804,8 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
d = core->owner;
}
+ net_tx_pkt_reset(txr->tx->tx_pkt, d);
+
while (!igb_ring_empty(core, txi)) {
base = igb_ring_head_descr(core, txi);
@@ -811,7 +814,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
desc.read.cmd_type_len, desc.wb.status);
- igb_process_tx_desc(core, txr->tx, &desc, txi->idx);
+ igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
igb_ring_advance(core, txi, 1);
eic |= igb_txdesc_writeback(core, base, &desc, txi);
}
@@ -3828,7 +3831,7 @@ igb_core_pci_realize(IGBCore *core,
core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
+ net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS);
}
net_rx_pkt_init(&core->rx_pkt);
@@ -3853,7 +3856,7 @@ igb_core_pci_uninit(IGBCore *core)
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt);
+ net_tx_pkt_reset(core->tx[i].tx_pkt, NULL);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
@@ -4026,7 +4029,7 @@ static void igb_reset(IGBCore *core, bool sw)
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
tx = &core->tx[i];
- net_tx_pkt_reset(tx->tx_pkt);
+ net_tx_pkt_reset(tx->tx_pkt, NULL);
memset(tx->ctx, 0, sizeof(tx->ctx));
tx->first = true;
tx->skip_cp = false;
@@ -443,7 +443,7 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt)
#endif
}
-void net_tx_pkt_reset(struct NetTxPkt *pkt)
+void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
{
int i;
@@ -467,6 +467,7 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt)
pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
}
}
+ pkt->pci_dev = pci_dev;
pkt->raw_frags = 0;
pkt->hdr_len = 0;
@@ -148,9 +148,10 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt);
* reset tx packet private context (needed to be called between packets)
*
* @pkt: packet
+ * @dev: PCI device processing the next packet
*
*/
-void net_tx_pkt_reset(struct NetTxPkt *pkt);
+void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);
/**
* Send packet to qemu. handles sw offloads if vhdr is not supported.
@@ -678,7 +678,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
vmxnet3_complete_packet(s, qidx, txd_idx);
s->tx_sop = true;
s->skip_current_tx_pkt = false;
- net_tx_pkt_reset(s->tx_pkt);
+ net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
}
}
}
@@ -1159,7 +1159,7 @@ static void vmxnet3_deactivate_device(VMXNET3State *s)
{
if (s->device_active) {
VMW_CBPRN("Deactivating vmxnet3...");
- net_tx_pkt_reset(s->tx_pkt);
+ net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
net_tx_pkt_uninit(s->tx_pkt);
net_rx_pkt_uninit(s->rx_pkt);
s->device_active = false;