Message ID | 20221107173435.68116-1-matthew.auld@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/i915: use i915_sg_dma_sizes() for all backends | expand |
Hi Matthew,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on drm-tip/drm-tip]
url: https://github.com/intel-lab-lkp/linux/commits/Matthew-Auld/drm-i915-use-i915_sg_dma_sizes-for-all-backends/20221108-014733
base: git://anongit.freedesktop.org/drm/drm-tip drm-tip
patch link: https://lore.kernel.org/r/20221107173435.68116-1-matthew.auld%40intel.com
patch subject: [Intel-gfx] [PATCH] drm/i915: use i915_sg_dma_sizes() for all backends
config: x86_64-allyesconfig
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/353d26ad7a68798f492f2cd8716a740b2e5efea4
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Matthew-Auld/drm-i915-use-i915_sg_dma_sizes-for-all-backends/20221108-014733
git checkout 353d26ad7a68798f492f2cd8716a740b2e5efea4
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
drivers/gpu/drm/i915/gvt/dmabuf.c: In function 'vgpu_gem_get_pages':
>> drivers/gpu/drm/i915/gvt/dmabuf.c:91:9: error: too many arguments to function '__i915_gem_object_set_pages'
91 | __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from drivers/gpu/drm/i915/i915_vma.h:34,
from drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h:13,
from drivers/gpu/drm/i915/gt/uc/intel_guc.h:19,
from drivers/gpu/drm/i915/gt/uc/intel_uc.h:9,
from drivers/gpu/drm/i915/gt/intel_gt_types.h:19,
from drivers/gpu/drm/i915/gt/intel_engine.h:18,
from drivers/gpu/drm/i915/i915_drv.h:46,
from drivers/gpu/drm/i915/gvt/dmabuf.c:39:
drivers/gpu/drm/i915/gem/i915_gem_object.h:405:6: note: declared here
405 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/__i915_gem_object_set_pages +91 drivers/gpu/drm/i915/gvt/dmabuf.c
e546e281d33d1f Tina Zhang 2017-11-23 44
e546e281d33d1f Tina Zhang 2017-11-23 45 static int vgpu_gem_get_pages(
e546e281d33d1f Tina Zhang 2017-11-23 46 struct drm_i915_gem_object *obj)
e546e281d33d1f Tina Zhang 2017-11-23 47 {
e546e281d33d1f Tina Zhang 2017-11-23 48 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
9f674c811740b5 Tina Zhang 2019-12-13 49 struct intel_vgpu *vgpu;
e546e281d33d1f Tina Zhang 2017-11-23 50 struct sg_table *st;
e546e281d33d1f Tina Zhang 2017-11-23 51 struct scatterlist *sg;
9f674c811740b5 Tina Zhang 2019-12-13 52 int i, j, ret;
e546e281d33d1f Tina Zhang 2017-11-23 53 gen8_pte_t __iomem *gtt_entries;
e546e281d33d1f Tina Zhang 2017-11-23 54 struct intel_vgpu_fb_info *fb_info;
4a6eccbcb9ea88 Xiong Zhang 2019-04-10 55 u32 page_num;
e546e281d33d1f Tina Zhang 2017-11-23 56
e546e281d33d1f Tina Zhang 2017-11-23 57 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
db19c724cb185a Pankaj Bharadiya 2020-02-20 58 if (drm_WARN_ON(&dev_priv->drm, !fb_info))
e546e281d33d1f Tina Zhang 2017-11-23 59 return -ENODEV;
e546e281d33d1f Tina Zhang 2017-11-23 60
9f674c811740b5 Tina Zhang 2019-12-13 61 vgpu = fb_info->obj->vgpu;
db19c724cb185a Pankaj Bharadiya 2020-02-20 62 if (drm_WARN_ON(&dev_priv->drm, !vgpu))
9f674c811740b5 Tina Zhang 2019-12-13 63 return -ENODEV;
9f674c811740b5 Tina Zhang 2019-12-13 64
e546e281d33d1f Tina Zhang 2017-11-23 65 st = kmalloc(sizeof(*st), GFP_KERNEL);
e546e281d33d1f Tina Zhang 2017-11-23 66 if (unlikely(!st))
e546e281d33d1f Tina Zhang 2017-11-23 67 return -ENOMEM;
e546e281d33d1f Tina Zhang 2017-11-23 68
4a6eccbcb9ea88 Xiong Zhang 2019-04-10 69 page_num = obj->base.size >> PAGE_SHIFT;
4a6eccbcb9ea88 Xiong Zhang 2019-04-10 70 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
e546e281d33d1f Tina Zhang 2017-11-23 71 if (ret) {
e546e281d33d1f Tina Zhang 2017-11-23 72 kfree(st);
e546e281d33d1f Tina Zhang 2017-11-23 73 return ret;
e546e281d33d1f Tina Zhang 2017-11-23 74 }
204129a211fc48 MichaĆ Winiarski 2022-01-05 75 gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
e546e281d33d1f Tina Zhang 2017-11-23 76 (fb_info->start >> PAGE_SHIFT);
4a6eccbcb9ea88 Xiong Zhang 2019-04-10 77 for_each_sg(st->sgl, sg, page_num, i) {
9f674c811740b5 Tina Zhang 2019-12-13 78 dma_addr_t dma_addr =
9f674c811740b5 Tina Zhang 2019-12-13 79 GEN8_DECODE_PTE(readq(>t_entries[i]));
91879bbaf8890f Christoph Hellwig 2022-04-11 80 if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
9f674c811740b5 Tina Zhang 2019-12-13 81 ret = -EINVAL;
9f674c811740b5 Tina Zhang 2019-12-13 82 goto out;
9f674c811740b5 Tina Zhang 2019-12-13 83 }
9f674c811740b5 Tina Zhang 2019-12-13 84
e546e281d33d1f Tina Zhang 2017-11-23 85 sg->offset = 0;
e546e281d33d1f Tina Zhang 2017-11-23 86 sg->length = PAGE_SIZE;
e546e281d33d1f Tina Zhang 2017-11-23 87 sg_dma_len(sg) = PAGE_SIZE;
9f674c811740b5 Tina Zhang 2019-12-13 88 sg_dma_address(sg) = dma_addr;
e546e281d33d1f Tina Zhang 2017-11-23 89 }
e546e281d33d1f Tina Zhang 2017-11-23 90
e546e281d33d1f Tina Zhang 2017-11-23 @91 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
9f674c811740b5 Tina Zhang 2019-12-13 92 out:
9f674c811740b5 Tina Zhang 2019-12-13 93 if (ret) {
9f674c811740b5 Tina Zhang 2019-12-13 94 dma_addr_t dma_addr;
9f674c811740b5 Tina Zhang 2019-12-13 95
9f674c811740b5 Tina Zhang 2019-12-13 96 for_each_sg(st->sgl, sg, i, j) {
9f674c811740b5 Tina Zhang 2019-12-13 97 dma_addr = sg_dma_address(sg);
9f674c811740b5 Tina Zhang 2019-12-13 98 if (dma_addr)
8398eee85fd009 Christoph Hellwig 2022-04-11 99 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
9f674c811740b5 Tina Zhang 2019-12-13 100 }
9f674c811740b5 Tina Zhang 2019-12-13 101 sg_free_table(st);
9f674c811740b5 Tina Zhang 2019-12-13 102 kfree(st);
9f674c811740b5 Tina Zhang 2019-12-13 103 }
9f674c811740b5 Tina Zhang 2019-12-13 104
9f674c811740b5 Tina Zhang 2019-12-13 105 return ret;
e546e281d33d1f Tina Zhang 2017-11-23 106
On 07/11/2022 17:34, Matthew Auld wrote: > We rely on page_sizes.sg in setup_scratch_page() reporting the correct > value if the underlying sgl is not contiguous, however in > get_pages_internal() we are only looking at the layout of the created > pages when calculating the sg_page_sizes, and not the final sgl, which > could in theory be completely different. In such a situation we might > incorrectly think we have a 64K scratch page, when it is actually only > 4K or similar split over multiple non-contiguous entries, which could > lead to broken behaviour when touching the scratch space within the > padding of a 64K GTT page-table. For most of the other backends we > already just call i915_sg_dma_sizes() on the final mapping, so rather > just move that into __i915_gem_object_set_pages() to avoid such issues > coming back to bite us later. > > Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> > Signed-off-by: Matthew Auld <matthew.auld@intel.com> > Cc: Stuart Summers <stuart.summers@intel.com> > Cc: Andrzej Hajda <andrzej.hajda@intel.com> > --- > drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 4 +--- > drivers/gpu/drm/i915/gem/i915_gem_internal.c | 5 +---- > drivers/gpu/drm/i915/gem/i915_gem_object.h | 3 +-- > drivers/gpu/drm/i915/gem/i915_gem_pages.c | 7 +++---- > drivers/gpu/drm/i915/gem/i915_gem_phys.c | 9 +++------ > drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 2 +- > drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- > drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 3 +-- > drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +--- > drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c | 2 +- > drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 10 +++------- > drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 5 +---- > drivers/gpu/drm/i915/selftests/mock_region.c | 2 +- > 13 files changed, 19 insertions(+), 39 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c > index ec6f7ae47783..1df74f7aa3dc 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c > @@ -238,7 +238,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) > { > struct drm_i915_private *i915 = to_i915(obj->base.dev); > struct sg_table *sgt; > - unsigned int sg_page_sizes; > > assert_object_held(obj); > > @@ -262,8 +261,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) > (!HAS_LLC(i915) && !IS_DG1(i915))) > wbinvd_on_all_cpus(); > > - sg_page_sizes = i915_sg_dma_sizes(sgt->sgl); > - __i915_gem_object_set_pages(obj, sgt, sg_page_sizes); > + __i915_gem_object_set_pages(obj, sgt); > > return 0; > } > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c > index 629acb403a2c..f66bcefc09ec 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c > @@ -35,7 +35,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) > struct drm_i915_private *i915 = to_i915(obj->base.dev); > struct sg_table *st; > struct scatterlist *sg; > - unsigned int sg_page_sizes; > unsigned int npages; > int max_order = MAX_ORDER; > unsigned int max_segment; > @@ -64,7 +63,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) > > sg = st->sgl; > st->nents = 0; > - sg_page_sizes = 0; > > do { > int order = min(fls(npages) - 1, max_order); > @@ -83,7 +81,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) > } while (1); > > sg_set_page(sg, page, PAGE_SIZE << order, 0); > - sg_page_sizes |= PAGE_SIZE << order; > st->nents++; > > npages -= 1 << order; > @@ -105,7 +102,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) > goto err; > } > > - __i915_gem_object_set_pages(obj, st, sg_page_sizes); > + __i915_gem_object_set_pages(obj, st); > > return 0; > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h > index 6b9ecff42bb5..3db53769864c 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h > @@ -403,8 +403,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, > unsigned long n); > > void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > - struct sg_table *pages, > - unsigned int sg_page_sizes); > + struct sg_table *pages); > > int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); > int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c > index 16f845663ff2..05a27723ebb8 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c > @@ -16,8 +16,7 @@ > #include "i915_gem_mman.h" > > void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > - struct sg_table *pages, > - unsigned int sg_page_sizes) > + struct sg_table *pages) > { > struct drm_i915_private *i915 = to_i915(obj->base.dev); > unsigned long supported = RUNTIME_INFO(i915)->page_sizes; > @@ -45,8 +44,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > > obj->mm.pages = pages; > > - GEM_BUG_ON(!sg_page_sizes); > - obj->mm.page_sizes.phys = sg_page_sizes; > + obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); > + GEM_BUG_ON(!obj->mm.page_sizes.phys); > > /* > * Calculate the supported page-sizes which fit into the given > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c > index 0d0e46dae559..68453572275b 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c > @@ -79,7 +79,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) > > /* We're no longer struct page backed */ > obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; > - __i915_gem_object_set_pages(obj, st, sg->length); > + __i915_gem_object_set_pages(obj, st); > > return 0; > > @@ -209,11 +209,8 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj) > return 0; > > err_xfer: > - if (!IS_ERR_OR_NULL(pages)) { > - unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl); > - > - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); > - } > + if (!IS_ERR_OR_NULL(pages)) > + __i915_gem_object_set_pages(obj, pages); > return err; > } > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c > index 2f7804492cd5..9c759df700ca 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c > @@ -247,7 +247,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) > if (i915_gem_object_can_bypass_llc(obj)) > obj->cache_dirty = true; > > - __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); > + __i915_gem_object_set_pages(obj, st); > > return 0; > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c > index 0c70711818ed..bc9521078807 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c > @@ -628,7 +628,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) > sg_dma_len(pages->sgl), > POISON_INUSE); > > - __i915_gem_object_set_pages(obj, pages, obj->stolen->size); > + __i915_gem_object_set_pages(obj, pages); > > return 0; > } > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > index 25129af70f70..fb9ba712e63c 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > @@ -815,8 +815,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, > > GEM_BUG_ON(obj->mm.rsgt); > obj->mm.rsgt = rsgt; > - __i915_gem_object_set_pages(obj, &rsgt->table, > - i915_sg_dma_sizes(rsgt->table.sgl)); > + __i915_gem_object_set_pages(obj, &rsgt->table); > } > > GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages)); > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c > index ca7a388ba2bf..9348b1804d53 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c > @@ -131,7 +131,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) > const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; > unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); > struct sg_table *st; > - unsigned int sg_page_sizes; > struct page **pvec; > int ret; > > @@ -170,8 +169,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) > if (i915_gem_object_can_bypass_llc(obj)) > obj->cache_dirty = true; > > - sg_page_sizes = i915_sg_dma_sizes(st->sgl); > - __i915_gem_object_set_pages(obj, st, sg_page_sizes); > + __i915_gem_object_set_pages(obj, st); > > return 0; > > diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c > index f963b8e1e37b..cbd9b624a788 100644 > --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c > +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c > @@ -68,7 +68,7 @@ static int huge_get_pages(struct drm_i915_gem_object *obj) > if (i915_gem_gtt_prepare_pages(obj, pages)) > goto err; > > - __i915_gem_object_set_pages(obj, pages, PAGE_SIZE); > + __i915_gem_object_set_pages(obj, pages); > > return 0; > > diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c > index 0cb99e75b0bc..beaf27e09e8a 100644 > --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c > +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c > @@ -136,7 +136,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) > goto err; > > GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); > - __i915_gem_object_set_pages(obj, st, sg_page_sizes); > + __i915_gem_object_set_pages(obj, st); > > return 0; > > @@ -210,7 +210,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) > const u64 max_len = rounddown_pow_of_two(UINT_MAX); > struct sg_table *st; > struct scatterlist *sg; > - unsigned int sg_page_sizes; > u64 rem; > > st = kmalloc(sizeof(*st), GFP); > @@ -226,7 +225,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) > rem = obj->base.size; > sg = st->sgl; > st->nents = 0; > - sg_page_sizes = 0; > do { > unsigned int page_size = get_largest_page_size(i915, rem); > unsigned int len = min(page_size * div_u64(rem, page_size), > @@ -239,8 +237,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) > sg_dma_len(sg) = len; > sg_dma_address(sg) = page_size; > > - sg_page_sizes |= len; > - > st->nents++; > > rem -= len; > @@ -254,7 +250,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) > > i915_sg_trim(st); > > - __i915_gem_object_set_pages(obj, st, sg_page_sizes); > + __i915_gem_object_set_pages(obj, st); > > return 0; > } > @@ -286,7 +282,7 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) > sg_dma_len(sg) = obj->base.size; > sg_dma_address(sg) = page_size; > > - __i915_gem_object_set_pages(obj, st, sg->length); > + __i915_gem_object_set_pages(obj, st); > > return 0; > #undef GFP > diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c > index 27c733b00976..eae7d947d7de 100644 > --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c > @@ -61,7 +61,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) > #define PFN_BIAS 0x1000 > struct sg_table *pages; > struct scatterlist *sg; > - unsigned int sg_page_sizes; > typeof(obj->base.size) rem; > > pages = kmalloc(sizeof(*pages), GFP); > @@ -74,7 +73,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) > return -ENOMEM; > } > > - sg_page_sizes = 0; > rem = obj->base.size; > for (sg = pages->sgl; sg; sg = sg_next(sg)) { > unsigned long len = min_t(typeof(rem), rem, BIT(31)); > @@ -83,13 +81,12 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) > sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0); > sg_dma_address(sg) = page_to_phys(sg_page(sg)); > sg_dma_len(sg) = len; > - sg_page_sizes |= len; > > rem -= len; > } > GEM_BUG_ON(rem); > > - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); > + __i915_gem_object_set_pages(obj, pages); > > return 0; > #undef GFP > diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c > index bac21fe84ca5..6324eb32d4dd 100644 > --- a/drivers/gpu/drm/i915/selftests/mock_region.c > +++ b/drivers/gpu/drm/i915/selftests/mock_region.c > @@ -41,7 +41,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) > } > > pages = &obj->mm.rsgt->table; > - __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); > + __i915_gem_object_set_pages(obj, pages); > > return 0; > Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Regards, Tvrtko
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index ec6f7ae47783..1df74f7aa3dc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -238,7 +238,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *sgt; - unsigned int sg_page_sizes; assert_object_held(obj); @@ -262,8 +261,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) (!HAS_LLC(i915) && !IS_DG1(i915))) wbinvd_on_all_cpus(); - sg_page_sizes = i915_sg_dma_sizes(sgt->sgl); - __i915_gem_object_set_pages(obj, sgt, sg_page_sizes); + __i915_gem_object_set_pages(obj, sgt); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 629acb403a2c..f66bcefc09ec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -35,7 +35,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; - unsigned int sg_page_sizes; unsigned int npages; int max_order = MAX_ORDER; unsigned int max_segment; @@ -64,7 +63,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; do { int order = min(fls(npages) - 1, max_order); @@ -83,7 +81,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); - sg_page_sizes |= PAGE_SIZE << order; st->nents++; npages -= 1 << order; @@ -105,7 +102,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) goto err; } - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 6b9ecff42bb5..3db53769864c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -403,8 +403,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, unsigned long n); void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages, - unsigned int sg_page_sizes); + struct sg_table *pages); int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 16f845663ff2..05a27723ebb8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -16,8 +16,7 @@ #include "i915_gem_mman.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages, - unsigned int sg_page_sizes) + struct sg_table *pages) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned long supported = RUNTIME_INFO(i915)->page_sizes; @@ -45,8 +44,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, obj->mm.pages = pages; - GEM_BUG_ON(!sg_page_sizes); - obj->mm.page_sizes.phys = sg_page_sizes; + obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); + GEM_BUG_ON(!obj->mm.page_sizes.phys); /* * Calculate the supported page-sizes which fit into the given diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 0d0e46dae559..68453572275b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -79,7 +79,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) /* We're no longer struct page backed */ obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; - __i915_gem_object_set_pages(obj, st, sg->length); + __i915_gem_object_set_pages(obj, st); return 0; @@ -209,11 +209,8 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj) return 0; err_xfer: - if (!IS_ERR_OR_NULL(pages)) { - unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl); - - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); - } + if (!IS_ERR_OR_NULL(pages)) + __i915_gem_object_set_pages(obj, pages); return err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 2f7804492cd5..9c759df700ca 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -247,7 +247,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); + __i915_gem_object_set_pages(obj, st); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 0c70711818ed..bc9521078807 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -628,7 +628,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) sg_dma_len(pages->sgl), POISON_INUSE); - __i915_gem_object_set_pages(obj, pages, obj->stolen->size); + __i915_gem_object_set_pages(obj, pages); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 25129af70f70..fb9ba712e63c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -815,8 +815,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, GEM_BUG_ON(obj->mm.rsgt); obj->mm.rsgt = rsgt; - __i915_gem_object_set_pages(obj, &rsgt->table, - i915_sg_dma_sizes(rsgt->table.sgl)); + __i915_gem_object_set_pages(obj, &rsgt->table); } GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index ca7a388ba2bf..9348b1804d53 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -131,7 +131,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); struct sg_table *st; - unsigned int sg_page_sizes; struct page **pvec; int ret; @@ -170,8 +169,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - sg_page_sizes = i915_sg_dma_sizes(st->sgl); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index f963b8e1e37b..cbd9b624a788 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -68,7 +68,7 @@ static int huge_get_pages(struct drm_i915_gem_object *obj) if (i915_gem_gtt_prepare_pages(obj, pages)) goto err; - __i915_gem_object_set_pages(obj, pages, PAGE_SIZE); + __i915_gem_object_set_pages(obj, pages); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 0cb99e75b0bc..beaf27e09e8a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -136,7 +136,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) goto err; GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; @@ -210,7 +210,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) const u64 max_len = rounddown_pow_of_two(UINT_MAX); struct sg_table *st; struct scatterlist *sg; - unsigned int sg_page_sizes; u64 rem; st = kmalloc(sizeof(*st), GFP); @@ -226,7 +225,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) rem = obj->base.size; sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; do { unsigned int page_size = get_largest_page_size(i915, rem); unsigned int len = min(page_size * div_u64(rem, page_size), @@ -239,8 +237,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) sg_dma_len(sg) = len; sg_dma_address(sg) = page_size; - sg_page_sizes |= len; - st->nents++; rem -= len; @@ -254,7 +250,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) i915_sg_trim(st); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; } @@ -286,7 +282,7 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) sg_dma_len(sg) = obj->base.size; sg_dma_address(sg) = page_size; - __i915_gem_object_set_pages(obj, st, sg->length); + __i915_gem_object_set_pages(obj, st); return 0; #undef GFP diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 27c733b00976..eae7d947d7de 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -61,7 +61,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) #define PFN_BIAS 0x1000 struct sg_table *pages; struct scatterlist *sg; - unsigned int sg_page_sizes; typeof(obj->base.size) rem; pages = kmalloc(sizeof(*pages), GFP); @@ -74,7 +73,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) return -ENOMEM; } - sg_page_sizes = 0; rem = obj->base.size; for (sg = pages->sgl; sg; sg = sg_next(sg)) { unsigned long len = min_t(typeof(rem), rem, BIT(31)); @@ -83,13 +81,12 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0); sg_dma_address(sg) = page_to_phys(sg_page(sg)); sg_dma_len(sg) = len; - sg_page_sizes |= len; rem -= len; } GEM_BUG_ON(rem); - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); + __i915_gem_object_set_pages(obj, pages); return 0; #undef GFP diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index bac21fe84ca5..6324eb32d4dd 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -41,7 +41,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) } pages = &obj->mm.rsgt->table; - __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); + __i915_gem_object_set_pages(obj, pages); return 0;
We rely on page_sizes.sg in setup_scratch_page() reporting the correct value if the underlying sgl is not contiguous, however in get_pages_internal() we are only looking at the layout of the created pages when calculating the sg_page_sizes, and not the final sgl, which could in theory be completely different. In such a situation we might incorrectly think we have a 64K scratch page, when it is actually only 4K or similar split over multiple non-contiguous entries, which could lead to broken behaviour when touching the scratch space within the padding of a 64K GTT page-table. For most of the other backends we already just call i915_sg_dma_sizes() on the final mapping, so rather just move that into __i915_gem_object_set_pages() to avoid such issues coming back to bite us later. Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Stuart Summers <stuart.summers@intel.com> Cc: Andrzej Hajda <andrzej.hajda@intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 4 +--- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 5 +---- drivers/gpu/drm/i915/gem/i915_gem_object.h | 3 +-- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 7 +++---- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 9 +++------ drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 3 +-- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +--- drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c | 2 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 10 +++------- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 5 +---- drivers/gpu/drm/i915/selftests/mock_region.c | 2 +- 13 files changed, 19 insertions(+), 39 deletions(-)