diff mbox

[v4,2/3] drm/nouveau/fb/gf100: defer DMA mapping of scratch page to init() hook

Message ID 1474893160-12321-3-git-send-email-ard.biesheuvel@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Ard Biesheuvel Sept. 26, 2016, 12:32 p.m. UTC
The 100c10 scratch page is mapped using dma_map_page() before the TTM
layer has had a chance to set the DMA mask. This means we are still
running with the default of 32 when this code executes, and this causes
problems for platforms with no memory below 4 GB (such as AMD Seattle)

So move the dma_map_page() to the .init hook, which executes after the
DMA mask has been set.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 26 ++++++++++++++------
 1 file changed, 18 insertions(+), 8 deletions(-)

Comments

Alexandre Courbot Oct. 3, 2016, 5:44 a.m. UTC | #1
On Mon, Sep 26, 2016 at 9:32 PM, Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> The 100c10 scratch page is mapped using dma_map_page() before the TTM
> layer has had a chance to set the DMA mask. This means we are still
> running with the default of 32 when this code executes, and this causes
> problems for platforms with no memory below 4 GB (such as AMD Seattle)
>
> So move the dma_map_page() to the .init hook, which executes after the
> DMA mask has been set.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
>  drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 26 ++++++++++++++------
>  1 file changed, 18 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
> index 76433cc66fff..5c8132873e60 100644
> --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
> +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
> @@ -93,7 +93,18 @@ gf100_fb_init(struct nvkm_fb *base)
>         struct gf100_fb *fb = gf100_fb(base);
>         struct nvkm_device *device = fb->base.subdev.device;
>
> -       if (fb->r100c10_page)
> +       if (!fb->r100c10) {
> +               dma_addr_t addr = dma_map_page(device->dev, fb->r100c10_page, 0,
> +                                              PAGE_SIZE, DMA_BIDIRECTIONAL);
> +               if (!dma_mapping_error(device->dev, addr)) {
> +                       fb->r100c10 = addr;
> +               } else {
> +                       nvkm_warn(&fb->base.subdev,
> +                                "dma_map_page() failed on 100c10 page\n");
> +               }
> +       }
> +
> +       if (fb->r100c10)
>                 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);

gf100_fb_oneinit() seems to be a better place for this, since it will
be executed exactly once, which is what you want for a memory
allocation. As you can see other memory allocations are also performed
there, which hints it should have been done there (and not in ctor) in
the first place. Maybe you can also move the alloc_page() there so
everything is done in the same place.

>  }
>
> @@ -103,12 +114,13 @@ gf100_fb_dtor(struct nvkm_fb *base)
>         struct gf100_fb *fb = gf100_fb(base);
>         struct nvkm_device *device = fb->base.subdev.device;
>
> -       if (fb->r100c10_page) {
> +       if (fb->r100c10) {
>                 dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
>                                DMA_BIDIRECTIONAL);
> -               __free_page(fb->r100c10_page);
>         }
>
> +       __free_page(fb->r100c10_page);
> +

If you move the allocation/mapping to gf100_fb_oneinit() then I
suppose you don't need this change.
diff mbox

Patch

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..5c8132873e60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -93,7 +93,18 @@  gf100_fb_init(struct nvkm_fb *base)
 	struct gf100_fb *fb = gf100_fb(base);
 	struct nvkm_device *device = fb->base.subdev.device;
 
-	if (fb->r100c10_page)
+	if (!fb->r100c10) {
+		dma_addr_t addr = dma_map_page(device->dev, fb->r100c10_page, 0,
+					       PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (!dma_mapping_error(device->dev, addr)) {
+			fb->r100c10 = addr;
+		} else {
+			nvkm_warn(&fb->base.subdev,
+				 "dma_map_page() failed on 100c10 page\n");
+		}
+	}
+
+	if (fb->r100c10)
 		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
 }
 
@@ -103,12 +114,13 @@  gf100_fb_dtor(struct nvkm_fb *base)
 	struct gf100_fb *fb = gf100_fb(base);
 	struct nvkm_device *device = fb->base.subdev.device;
 
-	if (fb->r100c10_page) {
+	if (fb->r100c10) {
 		dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
 			       DMA_BIDIRECTIONAL);
-		__free_page(fb->r100c10_page);
 	}
 
+	__free_page(fb->r100c10_page);
+
 	return fb;
 }
 
@@ -124,11 +136,9 @@  gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
 	*pfb = &fb->base;
 
 	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (fb->r100c10_page) {
-		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
-					   PAGE_SIZE, DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(device->dev, fb->r100c10))
-			return -EFAULT;
+	if (!fb->r100c10_page) {
+		nvkm_error(&fb->base.subdev, "failed 100c10 page alloc\n");
+		return -ENOMEM;
 	}
 
 	return 0;