@@ -315,7 +315,7 @@ static int prep_msg(struct vector_private *vp,
} else
iov[iov_index].iov_len = skb->len;
iov_index++;
- for (frag = 0; frag < nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
skb_frag = &skb_shinfo(skb)->frags[frag];
iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
iov[iov_index].iov_len = skb_frag_size(skb_frag);
@@ -657,7 +657,7 @@ static struct sk_buff *prep_skb(
iov_index++;
nr_frags = skb_shinfo(result)->nr_frags;
- for (frag = 0; frag < nr_frags; frag++) {
+ skb_for_each_frag(result, frag) {
skb_frag = &skb_shinfo(result)->frags[frag];
iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
if (iov[iov_index].iov_base != NULL)
@@ -2556,7 +2556,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
tpd->iovec[slot].len = skb_headlen(skb);
++slot;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
@@ -177,7 +177,7 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
sg = msg->sgt.sgl;
sg_set_buf(sg, skb->data, skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
sg = sg_next(sg);
BUG_ON(!sg);
frag = &skb_shinfo(skb)->frags[i];
@@ -240,7 +240,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
return ret;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ret = sdma_txadd_page(dd,
@@ -101,7 +101,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
if (unlikely(ret))
goto bail_txadd;
- for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
+ skb_for_each_frag(tx->skb, i) {
skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
/* combine physically continuous fragments later? */
@@ -537,7 +537,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
length -= size;
num_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < num_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (length == 0) {
@@ -289,7 +289,7 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
} else
off = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca,
skb_frag_page(frag),
@@ -329,7 +329,7 @@ void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
} else
off = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(priv->ca, mapping[i + off],
@@ -2168,7 +2168,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(vp->gendev, frag,
@@ -808,7 +808,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
txd->frag.addrHi = 0;
first_txd->numDesc++;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
@@ -1368,7 +1368,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
{
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
@@ -106,7 +106,7 @@ static void greth_print_tx_packet(struct sk_buff *skb)
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, length, true);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
@@ -514,7 +514,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
curr_tx = NEXT_TX(greth->tx_next);
/* Frags */
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
@@ -710,7 +710,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
skb_headlen(skb),
DMA_TO_DEVICE);
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
bdp = greth->tx_bd_base + tx_last;
@@ -2453,7 +2453,7 @@ static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
@@ -2965,7 +2965,7 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
last_frag = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < last_frag; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
frag_len = skb_frag_size(frag);
@@ -600,7 +600,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
netif_dbg(pdata, tx_queued, pdata->netdev,
"mapping frag %u\n", i);
@@ -1806,7 +1806,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
for (len = skb_frag_size(frag); len; ) {
packet->rdesc_count++;
@@ -244,7 +244,7 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
skb_headlen(skb),
DMA_TO_DEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
DMA_TO_DEVICE);
@@ -1465,7 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
tpd->adrl.addr = cpu_to_le64(dma);
tpd->len = cpu_to_le16(maplen);
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (++txq->write_idx == txq->count)
@@ -2151,7 +2151,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
- for (f = 0; f < nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
@@ -1601,7 +1601,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
u16 fg_size = 0;
u16 proto_hdr_len = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
}
@@ -1777,7 +1777,7 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
}
- for (f = 0; f < nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i;
u16 seg_num;
@@ -2255,7 +2255,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
next_to_use = 0;
}
- for (f = 0; f < nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i, nseg;
@@ -2358,7 +2358,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
}
nr_frags = skb_shinfo(skb)->nr_frags;
- for (f = 0; f < nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
@@ -171,7 +171,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
flags = 0;
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
@@ -6681,7 +6681,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->nr_frags = last_frag;
tx_buf->is_gso = skb_is_gso(skb);
- for (i = 0; i < last_frag; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = BNX2_NEXT_TX_BD(prod);
@@ -4071,7 +4071,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
@@ -433,7 +433,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
- for (j = 0; j < last_frag; j++) {
+ skb_for_each_frag(skb, j) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
@@ -537,7 +537,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
- for (i = 0; i < last_frag; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
@@ -606,7 +606,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
prod = NEXT_TX(prod);
/* unmap remaining mapped pages */
- for (i = 0; i < last_frag; i++) {
+ skb_for_each_frag(skb, i) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[prod];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
@@ -6579,7 +6579,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
ri = &tnapi->tx_buffers[sw_idx];
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
tx_bug = 1;
@@ -2058,7 +2058,7 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
/* No need to check last fragment */
nr_frags--;
- for (f = 0; f < nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
@@ -2200,7 +2200,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
- for (f = 0; f < nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
@@ -1587,7 +1587,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
if (!skb_is_nonlinear(skb))
goto doorbell;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1021,7 +1021,7 @@ static inline unsigned int write_sgl(const struct sk_buff *skb,
}
nfrags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nfrags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
@@ -1595,7 +1595,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
skb_transport_header(skb), PCI_DMA_TODEVICE);
si = skb_shinfo(skb);
- for (i = 0; i < si->nr_frags; i++)
+ skb_for_each_frag(skb, i)
pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -1012,7 +1012,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
copied += len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
@@ -773,7 +773,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
pointer = ftgmac100_next_tx_pointer(priv, pointer);
/* Add the fragments */
- for (i = 0; i < nfrags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
@@ -2119,7 +2119,7 @@ static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
goto workaround;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* all fragments need to have aligned start addresses */
@@ -1102,8 +1102,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
txbdp->lstatus = 0;
- for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
- j++) {
+ skb_for_each_frag(tx_queue->tx_skbuff[i], j) {
txbdp++;
dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length),
@@ -2250,7 +2249,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
gfar_clear_txbd_status(bdp);
bdp = next_txbd(bdp, base, tx_ring_size);
- for (i = 0; i < frags; i++) {
+ skb_for_each_frag(skb, i) {
dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
be16_to_cpu(bdp->length),
DMA_TO_DEVICE);
@@ -577,7 +577,7 @@ static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
len = le32_to_cpu(desc->linear_len);
dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
addr = le32_to_cpu(desc->frags[i].addr);
len = le32_to_cpu(desc->frags[i].size);
dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
@@ -717,7 +717,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
desc->linear_addr = cpu_to_le32(addr);
desc->linear_len = cpu_to_le32(skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
@@ -252,7 +252,7 @@ static int hns_nic_maybe_stop_tso(
buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
frag_num = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frag_num; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
@@ -1257,7 +1257,7 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
return bd_num;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
if (!size)
@@ -1507,7 +1507,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
bd_num += ret;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
@@ -336,7 +336,7 @@ static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
frag_len = (int)skb_headlen(skb);
memcpy(lb_buf + pkt_offset, skb->data, frag_len);
pkt_offset += frag_len;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
memcpy((lb_buf + pkt_offset), frag_data, frag_len);
@@ -149,7 +149,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
- for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
@@ -189,7 +189,7 @@ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct pci_dev *pdev = hwif->pdev;
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
+ skb_for_each_frag(skb, i)
dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
sges[i + 1].len, DMA_TO_DEVICE);
@@ -1132,7 +1132,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
descs[0].fields.address = dma_addr;
/* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
@@ -1675,7 +1675,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
cur = skb_headlen(skb);
/* Copy the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
memcpy(dst + cur, skb_frag_address(frag),
@@ -3193,7 +3193,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count++;
nr_frags = skb_shinfo(skb)->nr_frags;
- for (f = 0; f < nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
if (adapter->pcix_82544)
@@ -5870,7 +5870,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
nr_frags = skb_shinfo(skb)->nr_frags;
- for (f = 0; f < nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
adapter->tx_fifo_limit);
@@ -1055,7 +1055,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
count += TXD_USE_COUNT(skb_frag_size(frag));
@@ -6324,7 +6324,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
@@ -2165,7 +2165,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
const skb_frag_t *frag;
count++;
@@ -1348,7 +1348,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
@@ -8602,7 +8602,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
@@ -4127,7 +4127,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
count += TXD_USE_COUNT(skb_frag_size(frag));
@@ -656,7 +656,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
{
int frag;
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
@@ -4129,7 +4129,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
int i;
dma_addr_t buf_dma_addr;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);
@@ -2786,7 +2786,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *tf = td;
control |= BMU_STFWD;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
@@ -1199,7 +1199,7 @@ static void sky2_rx_submit(struct sky2_port *sky2,
sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
- for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
+ skb_for_each_frag(re->skb, i)
sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
}
@@ -1217,7 +1217,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
dma_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
@@ -1254,7 +1254,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
dma_unmap_single(&pdev->dev, re->data_addr,
dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
dma_unmap_page(&pdev->dev, re->frag_addr[i],
skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_FROM_DEVICE);
@@ -1932,7 +1932,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
@@ -2498,7 +2498,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
length -= size;
num_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < num_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (length == 0) {
@@ -972,7 +972,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
txd_pdma = qdma_to_pdma(ring, txd);
nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
@@ -1089,7 +1089,7 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
nfrags = 1;
if (skb_is_gso(skb)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
MTK_TX_DMA_BUF_LEN);
@@ -320,7 +320,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg++;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int fsz = skb_frag_size(frag);
@@ -1692,7 +1692,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
if (err)
goto unlock;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
@@ -1640,7 +1640,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (nr_frags <= 0)
goto finish;
- for (j = 0; j < nr_frags; j++) {
+ skb_for_each_frag(skb, j) {
const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
@@ -4132,7 +4132,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
frg_cnt = skb_shinfo(skb)->nr_frags;
/* For fragmented SKB. */
- for (i = 0; i < frg_cnt; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* A '0' length fragment will be ignored */
if (!skb_frag_size(frag))
@@ -589,7 +589,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
skb_headlen(skb), DMA_TO_DEVICE);
- for (j = 0; j < frg_cnt; j++) {
+ skb_for_each_frag(skb, j) {
dma_unmap_page(&fifo->pdev->dev,
txd_priv->dma_buffers[i++],
skb_frag_size(frag), DMA_TO_DEVICE);
@@ -922,7 +922,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
first_frg_len);
frag = &skb_shinfo(skb)->frags[0];
- for (i = 0; i < frg_cnt; i++) {
+ skb_for_each_frag(skb, i) {
/* ignore 0 length fragment */
if (!skb_frag_size(frag))
continue;
@@ -1052,7 +1052,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
skb_headlen(skb), DMA_TO_DEVICE);
- for (j = 0; j < frg_cnt; j++) {
+ skb_for_each_frag(skb, j) {
dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
skb_frag_size(frag), DMA_TO_DEVICE);
frag += 1;
@@ -536,7 +536,7 @@ static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
tx_skb->size = skb_headlen(skb);
tx_skb->mapped_as_page = false;
- for (ii = 0; ii < num_frag; ii++) {
+ skb_for_each_frag(skb, ii) {
++priv->tx_bd_tail;
priv->tx_bd_tail %= TX_BD_NUM;
cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
@@ -1450,7 +1450,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
goto out_err_nolock;
- for (i = 0; i < nfrags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
@@ -1986,7 +1986,7 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nf->dma = map;
nf->length = skb_headlen(skb);
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
@@ -2666,7 +2666,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
if (rc)
goto err;
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
@@ -595,7 +595,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
nf->dma = map;
nf->length = skb_headlen(skb);
- for (i = 0; i < nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
@@ -816,7 +816,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
cp->tx_skb[entry] = skb;
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
@@ -1910,7 +1910,7 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
goto unmap_frags;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
@@ -203,7 +203,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
copy_buf);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
@@ -2796,7 +2796,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
}
entry = TX_DESC_NEXT(ring, entry);
- for (frag = 0; frag < nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
len = skb_frag_size(fragp);
@@ -3565,7 +3565,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
len -= MAX_TX_DESC_LEN;
} while (len > 0);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
tb = &rp->tx_buffs[idx];
BUG_ON(tb->skb != NULL);
np->ops->unmap_page(np->device, tb->mapping,
@@ -6688,7 +6688,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len -= this_len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
@@ -1054,7 +1054,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
first_len, DMA_TO_DEVICE);
entry = NEXT_TX(entry);
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
@@ -2339,7 +2339,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
goto out_dma_error;
entry = NEXT_TX(entry);
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len, mapping, this_txflags;
@@ -1080,7 +1080,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
return err;
nc = err;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
@@ -1121,7 +1121,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
/* make sure we have enough cookies and alignment in every frag */
docopy = skb_shinfo(skb)->nr_frags >= ncookies;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
docopy |= skb_frag_off(f) & 7;
@@ -576,7 +576,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
netif_dbg(pdata, tx_queued, pdata->netdev,
"mapping frag %u\n", i);
@@ -177,7 +177,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
for (len = skb_frag_size(frag); len; ) {
pkt_info->desc_count++;
@@ -1183,7 +1183,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 frag_size = skb_frag_size(frag);
@@ -1113,7 +1113,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
pdesc = desc;
/* Handle the case where skb is fragmented in pages */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = skb_frag_page(frag);
u32 page_offset = skb_frag_off(frag);
@@ -2576,7 +2576,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
/* Handle fragments */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
@@ -878,7 +878,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->phys = cpu_to_be32(skb_dma_addr);
ptr_to_txbd((void *)skb, cur_p);
- for (ii = 0; ii < num_frag; ii++) {
+ skb_for_each_frag(skb, ii) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
@@ -772,7 +772,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- for (ii = 0; ii < num_frag; ii++) {
+ skb_for_each_frag(skb, ii) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -1321,7 +1321,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
total_len += skb_headlen(skb);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
total_len += skb_frag_size(f);
@@ -747,7 +747,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
buf_offset += buf_size;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 buf_size;
@@ -990,7 +990,7 @@ static int txd_estimate(const struct sk_buff *skb)
int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
@@ -1277,7 +1277,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
/* set up the remaining entries to point to the data */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
int tb_idx;
@@ -544,7 +544,7 @@ static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
{
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
unsigned int fragsz = skb_frag_size(frag);
@@ -1086,7 +1086,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
}
/* Release all the original (foreign) frags. */
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
skb_frag_unref(skb, f);
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
@@ -744,7 +744,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
/* Requests for all the frags. */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
skb_frag_off(frag),
@@ -3939,7 +3939,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
{
int cnt, elements = 0;
- for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ skb_for_each_frag(skb, cnt) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
elements += qeth_get_elements_for_range(
@@ -4152,7 +4152,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
}
/* map page frags into buffer element(s) */
- for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ skb_for_each_frag(skb, cnt) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
data = skb_frag_address(frag);
@@ -317,7 +317,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
crc = crc32(~0, skb->data, skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
off = skb_frag_off(frag);
len = skb_frag_size(frag);
@@ -269,7 +269,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr =
@@ -949,7 +949,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
}
/* checksum stuff in frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
@@ -432,7 +432,7 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -564,7 +564,7 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1623,7 +1623,7 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
n = NULL;
goto out;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
skb_frag_ref(skb, i);
}
@@ -1698,7 +1698,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto nofrags;
if (skb_zcopy(skb))
refcount_inc(&skb_uarg(skb)->refcnt);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
@@ -2126,7 +2126,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Estimate size of pulled pages. */
eat = delta;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size >= eat)
@@ -2191,7 +2191,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
pull_pages:
eat = delta;
k = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size <= eat) {
@@ -2259,7 +2259,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to += copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
@@ -2447,7 +2447,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
/*
* then map the fragments
*/
- for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
+ skb_for_each_frag(skb, seg) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(skb_frag_page(f),
@@ -2562,7 +2562,7 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
offset -= skb_headlen(skb);
/* Find where we are in frag list */
- for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
+ skb_for_each_frag(skb, fragidx) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
if (offset < skb_frag_size(frag))
@@ -2661,7 +2661,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
from += copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
@@ -2740,7 +2740,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
pos = copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2840,7 +2840,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
pos = copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
WARN_ON(start > offset + len);
@@ -3072,7 +3072,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
}
skb_zerocopy_clone(to, from, GFP_ATOMIC);
- for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+ skb_for_each_frag(from, i) {
int size;
if (!len)
@@ -3292,7 +3292,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
pos - len);
/* And move data appendix as is. */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
@@ -4419,7 +4419,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
offset += copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
WARN_ON(start > offset + len);
@@ -5329,7 +5329,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
/* if the skb is not cloned this does nothing
* since we set nr_frags to 0.
*/
- for (i = 0; i < from_shinfo->nr_frags; i++)
+ skb_for_each_frag(from, i)
__skb_frag_ref(&from_shinfo->frags[i]);
to->truesize += delta;
@@ -6053,7 +6053,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
kfree(data);
return -ENOMEM;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
@@ -487,7 +487,7 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
return NULL;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ skb_for_each_frag(head, i)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->data_len = head->data_len - plen;
clone->len = clone->data_len;
@@ -1644,7 +1644,7 @@ static int __pskb_trim_head(struct sk_buff *skb, int len)
eat = len;
k = 0;
shinfo = skb_shinfo(skb);
- for (i = 0; i < shinfo->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int size = skb_frag_size(&shinfo->frags[i]);
if (size <= eat) {
@@ -1079,7 +1079,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* skip iucv_array lying in the headroom */
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
@@ -1181,7 +1181,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
@@ -630,8 +630,7 @@ static int kcm_write_msgs(struct kcm_sock *kcm)
goto out;
}
- for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
- fragidx++) {
+ skb_for_each_frag(skb, fragidx) {
skb_frag_t *frag;
frag_offset = 0;
@@ -63,7 +63,7 @@ static int __skb_nsg(struct sk_buff *skb, int offset, int len,
offset += chunk;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
WARN_ON(start > offset + len);