Message ID | 20210201010151.28898-1-airlied@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/nouveau: fix dma syncing warning with debugging on. | expand |
Reviewed-by: Lyude Paul <lyude@redhat.com> Let me know when this has been pushed somewhere On Mon, 2021-02-01 at 11:01 +1000, Dave Airlie wrote: > From: Dave Airlie <airlied@redhat.com> > > Since I wrote the below patch if you run a debug kernel you can a > dma debug warning like: > nouveau 0000:1f:00.0: DMA-API: device driver tries to sync DMA memory it has > not allocated [device address=0x000000016e012000] [size=4096 bytes] > > The old nouveau code wasn't consolidate the pages like the ttm code, > but the dma-debug expects the sync code to give it the same base/range > pairs as the allocator. > > Fix the nouveau sync code to consolidate pages before calling the > sync code. > > Fixes: bd549d35b4be0 ("nouveau: use ttm populate mapping functions. (v2)") > Reported-by: Lyude Paul <lyude@redhat.com> > Reviewed-by: Ben Skeggs <bskeggs@redhat.com> > Signed-off-by: Dave Airlie <airlied@redhat.com> > --- > drivers/gpu/drm/nouveau/nouveau_bo.c | 35 +++++++++++++++++++++++----- > 1 file changed, 29 insertions(+), 6 deletions(-) > > diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c > b/drivers/gpu/drm/nouveau/nouveau_bo.c > index 33dc886d1d6d..2375711877cf 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_bo.c > +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c > @@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) > { > struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); > struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; > - int i; > + int i, j; > > if (!ttm_dma) > return; > @@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) > if (nvbo->force_coherent) > return; > > - for (i = 0; i < ttm_dma->num_pages; i++) > + for (i = 0; i < ttm_dma->num_pages; ++i) { > + struct page *p = ttm_dma->pages[i]; > + size_t num_pages = 1; > + > + for (j = i + 1; j < ttm_dma->num_pages; ++j) { > + if (++p != ttm_dma->pages[j]) > + break; > + > + ++num_pages; > + } > dma_sync_single_for_device(drm->dev->dev, > ttm_dma->dma_address[i], > - PAGE_SIZE, DMA_TO_DEVICE); > + num_pages * PAGE_SIZE, > DMA_TO_DEVICE); > + i += num_pages; > + } > } > > void > @@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) > { > struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); > struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; > - int i; > + int i, j; > > if (!ttm_dma) > return; > @@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) > if (nvbo->force_coherent) > return; > > - for (i = 0; i < ttm_dma->num_pages; i++) > + for (i = 0; i < ttm_dma->num_pages; ++i) { > + struct page *p = ttm_dma->pages[i]; > + size_t num_pages = 1; > + > + for (j = i + 1; j < ttm_dma->num_pages; ++j) { > + if (++p != ttm_dma->pages[j]) > + break; > + > + ++num_pages; > + } > + > dma_sync_single_for_cpu(drm->dev->dev, ttm_dma- > >dma_address[i], > - PAGE_SIZE, DMA_FROM_DEVICE); > + num_pages * PAGE_SIZE, > DMA_FROM_DEVICE); > + i += num_pages; > + } > } > > void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 33dc886d1d6d..2375711877cf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; ++i) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } dma_sync_single_for_device(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_TO_DEVICE); + num_pages * PAGE_SIZE, DMA_TO_DEVICE); + i += num_pages; + } } void @@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; ++i) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } + dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_FROM_DEVICE); + num_pages * PAGE_SIZE, DMA_FROM_DEVICE); + i += num_pages; + } } void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)