@@ -629,23 +629,30 @@ e1000e_rss_parse_packet(E1000ECore *core,
info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
}
-static void
+static bool
e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx)
{
if (tx->props.tse && tx->cptse) {
- net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss);
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss)) {
+ return false;
+ }
+
net_tx_pkt_update_ip_checksums(tx->tx_pkt);
e1000x_inc_reg_if_not_full(core->mac, TSCTC);
- return;
+ return true;
}
if (tx->sum_needed & E1000_TXD_POPTS_TXSM) {
- net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0);
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) {
+ return false;
+ }
}
if (tx->sum_needed & E1000_TXD_POPTS_IXSM) {
net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
}
+
+ return true;
}
static bool
@@ -654,7 +661,9 @@ e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index)
int target_queue = MIN(core->max_queue_num, queue_index);
NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
- e1000e_setup_tx_offloads(core, tx);
+ if (!e1000e_setup_tx_offloads(core, tx)) {
+ return false;
+ }
net_tx_pkt_dump(tx->tx_pkt);
@@ -304,10 +304,11 @@ func_exit:
return rc;
}
-void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
+bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size)
{
struct tcp_hdr l4hdr;
+ size_t bytes_read;
assert(pkt);
/* csum has to be enabled if tso is. */
@@ -328,8 +329,12 @@ void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
- iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
- 0, &l4hdr, sizeof(l4hdr));
+ bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
+ pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr));
+ if (bytes_read < sizeof(l4hdr)) {
+ return false;
+ }
+
pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
pkt->virt_hdr.gso_size = gso_size;
break;
@@ -354,6 +359,8 @@ void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
break;
}
}
+
+ return true;
}
void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
@@ -59,9 +59,10 @@ struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt);
* @tso_enable: TSO enabled
* @csum_enable: CSO enabled
* @gso_size: MSS size for TSO
+ * @ret: operation result
*
*/
-void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
+bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size);
/**
@@ -440,19 +440,19 @@ vmxnet3_setup_tx_offloads(VMXNET3State *s)
{
switch (s->offload_mode) {
case VMXNET3_OM_NONE:
- net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0);
- break;
+ return net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0);
case VMXNET3_OM_CSUM:
- net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0);
VMW_PKPRN("L4 CSO requested\n");
- break;
+ return net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0);
case VMXNET3_OM_TSO:
- net_tx_pkt_build_vheader(s->tx_pkt, true, true,
- s->cso_or_gso_size);
- net_tx_pkt_update_ip_checksums(s->tx_pkt);
VMW_PKPRN("GSO offload requested.");
+ if (!net_tx_pkt_build_vheader(s->tx_pkt, true, true,
+ s->cso_or_gso_size)) {
+ return false;
+ }
+ net_tx_pkt_update_ip_checksums(s->tx_pkt);
break;
default: