diff mbox series

net: chelsio: switch from 'pci_' to 'dma_' API

Message ID 0be58ad9de650bfe430a3a02b64f2294457e0669.1629612718.git.christophe.jaillet@wanadoo.fr (mailing list archive)
State Accepted
Commit 4489d8f528d4e465f858e1a01997801130c5f322
Delegated to: Netdev Maintainers
Headers show
Series net: chelsio: switch from 'pci_' to 'dma_' API | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cc_maintainers success CCed 4 of 4 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 6 this patch: 6
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch fail ERROR: do not use assignment in if condition WARNING: line length of 83 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 6 this patch: 6
netdev/header_inline success Link

Commit Message

Christophe JAILLET Aug. 22, 2021, 6:14 a.m. UTC
The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.


@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
---
If needed, see post from Christoph Hellwig on the kernel-janitors ML:
   https://marc.info/?l=kernel-janitors&m=158745678307186&w=4
---
 .../net/ethernet/chelsio/cxgb3/cxgb3_main.c   |  10 +-
 drivers/net/ethernet/chelsio/cxgb3/sge.c      | 101 +++++++++---------
 .../net/ethernet/chelsio/cxgb4/cxgb4_main.c   |  12 +--
 drivers/net/ethernet/chelsio/cxgb4/sge.c      |   8 +-
 .../ethernet/chelsio/cxgb4vf/cxgb4vf_main.c   |  10 +-
 drivers/net/ethernet/chelsio/cxgb4vf/sge.c    |   8 +-
 6 files changed, 63 insertions(+), 86 deletions(-)

Comments

patchwork-bot+netdevbpf@kernel.org Aug. 23, 2021, 11 a.m. UTC | #1
Hello:

This patch was applied to netdev/net-next.git (refs/heads/master):

On Sun, 22 Aug 2021 08:14:03 +0200 you wrote:
> The wrappers in include/linux/pci-dma-compat.h should go away.
> 
> The patch has been generated with the coccinelle script below.
> 
> It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
> 'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
> This is less verbose.
> 
> [...]

Here is the summary with links:
  - net: chelsio: switch from 'pci_' to 'dma_' API
    https://git.kernel.org/netdev/net-next/c/4489d8f528d4

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
diff mbox series

Patch

diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 72af9d2a00ae..538f737af4fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3235,15 +3235,9 @@  static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_disable_device;
 	}
 
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		pci_using_dac = 1;
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (err) {
-			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
-			       "coherent allocations\n");
-			goto out_release_regions;
-		}
-	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+	} else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
 		dev_err(&pdev->dev, "no usable DMA configuration\n");
 		goto out_release_regions;
 	}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cb5c79c43bc9..175e5b2aa12b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -244,8 +244,8 @@  static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
 	frag_idx = d->fragidx;
 
 	if (frag_idx == 0 && skb_headlen(skb)) {
-		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
-				 skb_headlen(skb), PCI_DMA_TODEVICE);
+		dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
+				 skb_headlen(skb), DMA_TO_DEVICE);
 		j = 1;
 	}
 
@@ -253,9 +253,9 @@  static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
 	nfrags = skb_shinfo(skb)->nr_frags;
 
 	while (frag_idx < nfrags && curflit < WR_FLITS) {
-		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
+		dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
-			       PCI_DMA_TODEVICE);
+			       DMA_TO_DEVICE);
 		j ^= 1;
 		if (j == 0) {
 			sgp++;
@@ -355,15 +355,14 @@  static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
 	if (q->use_pages && d->pg_chunk.page) {
 		(*d->pg_chunk.p_cnt)--;
 		if (!*d->pg_chunk.p_cnt)
-			pci_unmap_page(pdev,
-				       d->pg_chunk.mapping,
-				       q->alloc_size, PCI_DMA_FROMDEVICE);
+			dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
+				       q->alloc_size, DMA_FROM_DEVICE);
 
 		put_page(d->pg_chunk.page);
 		d->pg_chunk.page = NULL;
 	} else {
-		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
-				 q->buf_size, PCI_DMA_FROMDEVICE);
+		dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
+				 q->buf_size, DMA_FROM_DEVICE);
 		kfree_skb(d->skb);
 		d->skb = NULL;
 	}
@@ -414,8 +413,8 @@  static inline int add_one_rx_buf(void *va, unsigned int len,
 {
 	dma_addr_t mapping;
 
-	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
-	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
+	mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
 		return -ENOMEM;
 
 	dma_unmap_addr_set(sd, dma_addr, mapping);
@@ -453,9 +452,9 @@  static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
 				    SGE_PG_RSVD;
 		q->pg_chunk.offset = 0;
-		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
-				       0, q->alloc_size, PCI_DMA_FROMDEVICE);
-		if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
+		mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
+				       0, q->alloc_size, DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
 			__free_pages(q->pg_chunk.page, order);
 			q->pg_chunk.page = NULL;
 			return -EIO;
@@ -522,9 +521,9 @@  nomem:				q->alloc_failed++;
 			dma_unmap_addr_set(sd, dma_addr, mapping);
 
 			add_one_rx_chunk(mapping, d, q->gen);
-			pci_dma_sync_single_for_device(adap->pdev, mapping,
-						q->buf_size - SGE_PG_RSVD,
-						PCI_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&adap->pdev->dev, mapping,
+						   q->buf_size - SGE_PG_RSVD,
+						   DMA_FROM_DEVICE);
 		} else {
 			void *buf_start;
 
@@ -793,13 +792,13 @@  static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
 		skb = alloc_skb(len, GFP_ATOMIC);
 		if (likely(skb != NULL)) {
 			__skb_put(skb, len);
-			pci_dma_sync_single_for_cpu(adap->pdev,
-					    dma_unmap_addr(sd, dma_addr), len,
-					    PCI_DMA_FROMDEVICE);
+			dma_sync_single_for_cpu(&adap->pdev->dev,
+						dma_unmap_addr(sd, dma_addr),
+						len, DMA_FROM_DEVICE);
 			memcpy(skb->data, sd->skb->data, len);
-			pci_dma_sync_single_for_device(adap->pdev,
-					    dma_unmap_addr(sd, dma_addr), len,
-					    PCI_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&adap->pdev->dev,
+						   dma_unmap_addr(sd, dma_addr),
+						   len, DMA_FROM_DEVICE);
 		} else if (!drop_thres)
 			goto use_orig_buf;
 recycle:
@@ -813,8 +812,8 @@  static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
 		goto recycle;
 
 use_orig_buf:
-	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
-			 fl->buf_size, PCI_DMA_FROMDEVICE);
+	dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
+			 fl->buf_size, DMA_FROM_DEVICE);
 	skb = sd->skb;
 	skb_put(skb, len);
 	__refill_fl(adap, fl);
@@ -854,12 +853,11 @@  static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
 		newskb = alloc_skb(len, GFP_ATOMIC);
 		if (likely(newskb != NULL)) {
 			__skb_put(newskb, len);
-			pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
-					    PCI_DMA_FROMDEVICE);
+			dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
+						len, DMA_FROM_DEVICE);
 			memcpy(newskb->data, sd->pg_chunk.va, len);
-			pci_dma_sync_single_for_device(adap->pdev, dma_addr,
-						       len,
-						       PCI_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
+						   len, DMA_FROM_DEVICE);
 		} else if (!drop_thres)
 			return NULL;
 recycle:
@@ -883,14 +881,12 @@  static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
 		goto recycle;
 	}
 
-	pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
-				    PCI_DMA_FROMDEVICE);
+	dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
+				DMA_FROM_DEVICE);
 	(*sd->pg_chunk.p_cnt)--;
 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
-		pci_unmap_page(adap->pdev,
-			       sd->pg_chunk.mapping,
-			       fl->alloc_size,
-			       PCI_DMA_FROMDEVICE);
+		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
+			       fl->alloc_size, DMA_FROM_DEVICE);
 	if (!skb) {
 		__skb_put(newskb, SGE_RX_PULL_LEN);
 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
@@ -968,9 +964,9 @@  static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
 	const struct skb_shared_info *si;
 
 	if (skb_headlen(skb)) {
-		*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
-				       PCI_DMA_TODEVICE);
-		if (pci_dma_mapping_error(pdev, *addr))
+		*addr = dma_map_single(&pdev->dev, skb->data,
+				       skb_headlen(skb), DMA_TO_DEVICE);
+		if (dma_mapping_error(&pdev->dev, *addr))
 			goto out_err;
 		addr++;
 	}
@@ -981,7 +977,7 @@  static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
 	for (fp = si->frags; fp < end; fp++) {
 		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
 					 DMA_TO_DEVICE);
-		if (pci_dma_mapping_error(pdev, *addr))
+		if (dma_mapping_error(&pdev->dev, *addr))
 			goto unwind;
 		addr++;
 	}
@@ -992,7 +988,8 @@  static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
 			       DMA_TO_DEVICE);
 
-	pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
+	dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
+			 DMA_TO_DEVICE);
 out_err:
 	return -ENOMEM;
 }
@@ -1592,13 +1589,14 @@  static void deferred_unmap_destructor(struct sk_buff *skb)
 	p = dui->addr;
 
 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
-		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
-				 skb_transport_header(skb), PCI_DMA_TODEVICE);
+		dma_unmap_single(&dui->pdev->dev, *p++,
+				 skb_tail_pointer(skb) - skb_transport_header(skb),
+				 DMA_TO_DEVICE);
 
 	si = skb_shinfo(skb);
 	for (i = 0; i < si->nr_frags; i++)
-		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
-			       PCI_DMA_TODEVICE);
+		dma_unmap_page(&dui->pdev->dev, *p++,
+			       skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
 }
 
 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
@@ -2153,17 +2151,14 @@  static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
 
 	fl->credits--;
 
-	pci_dma_sync_single_for_cpu(adap->pdev,
-				    dma_unmap_addr(sd, dma_addr),
-				    fl->buf_size - SGE_PG_RSVD,
-				    PCI_DMA_FROMDEVICE);
+	dma_sync_single_for_cpu(&adap->pdev->dev,
+				dma_unmap_addr(sd, dma_addr),
+				fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
 
 	(*sd->pg_chunk.p_cnt)--;
 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
-		pci_unmap_page(adap->pdev,
-			       sd->pg_chunk.mapping,
-			       fl->alloc_size,
-			       PCI_DMA_FROMDEVICE);
+		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
+			       fl->alloc_size, DMA_FROM_DEVICE);
 
 	if (!skb) {
 		put_page(sd->pg_chunk.page);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index aa8573202c37..fb6ac730fb50 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4008,7 +4008,7 @@  static void adap_free_hma_mem(struct adapter *adapter)
 
 	if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
 		dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
-			     adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
+			     adapter->hma.sgt->nents, DMA_BIDIRECTIONAL);
 		adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
 	}
 
@@ -6687,16 +6687,10 @@  static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		return 0;
 	}
 
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		highdma = true;
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (err) {
-			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
-				"coherent allocations\n");
-			goto out_free_adapter;
-		}
 	} else {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
 			dev_err(&pdev->dev, "no usable DMA configuration\n");
 			goto out_free_adapter;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6a099cb34b12..fa5b596ff23a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -443,7 +443,7 @@  static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
 		if (is_buf_mapped(d))
 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
 				       get_buf_size(adap, d),
-				       PCI_DMA_FROMDEVICE);
+				       DMA_FROM_DEVICE);
 		put_page(d->page);
 		d->page = NULL;
 		if (++q->cidx == q->size)
@@ -469,7 +469,7 @@  static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
 
 	if (is_buf_mapped(d))
 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
-			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
+			       get_buf_size(adap, d), DMA_FROM_DEVICE);
 	d->page = NULL;
 	if (++q->cidx == q->size)
 		q->cidx = 0;
@@ -566,7 +566,7 @@  static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 
 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
 				       PAGE_SIZE << s->fl_pg_order,
-				       PCI_DMA_FROMDEVICE);
+				       DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
 			__free_pages(pg, s->fl_pg_order);
 			q->mapping_err++;
@@ -596,7 +596,7 @@  static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 		}
 
 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
-				       PCI_DMA_FROMDEVICE);
+				       DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
 			put_page(pg);
 			q->mapping_err++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2842628ad2c5..e5882df551ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2917,17 +2917,11 @@  static int cxgb4vf_pci_probe(struct pci_dev *pdev,
 	 * Set up our DMA mask: try for 64-bit address masking first and
 	 * fall back to 32-bit if we can't get 64 bits ...
 	 */
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (err == 0) {
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (err) {
-			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
-				" coherent allocations\n");
-			goto err_release_regions;
-		}
 		pci_using_dac = 1;
 	} else {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (err != 0) {
 			dev_err(&pdev->dev, "no usable DMA configuration\n");
 			goto err_release_regions;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 7bc80eeb2c21..0295b2406646 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -478,7 +478,7 @@  static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
 		if (is_buf_mapped(sdesc))
 			dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
 				       get_buf_size(adapter, sdesc),
-				       PCI_DMA_FROMDEVICE);
+				       DMA_FROM_DEVICE);
 		put_page(sdesc->page);
 		sdesc->page = NULL;
 		if (++fl->cidx == fl->size)
@@ -507,7 +507,7 @@  static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
 	if (is_buf_mapped(sdesc))
 		dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
 			       get_buf_size(adapter, sdesc),
-			       PCI_DMA_FROMDEVICE);
+			       DMA_FROM_DEVICE);
 	sdesc->page = NULL;
 	if (++fl->cidx == fl->size)
 		fl->cidx = 0;
@@ -644,7 +644,7 @@  static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 
 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
 					PAGE_SIZE << s->fl_pg_order,
-					PCI_DMA_FROMDEVICE);
+					DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
 			/*
 			 * We've run out of DMA mapping space.  Free up the
@@ -682,7 +682,7 @@  static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 		poison_buf(page, PAGE_SIZE);
 
 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
-				       PCI_DMA_FROMDEVICE);
+				       DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
 			put_page(page);
 			break;