Message ID | 1458129407-4940-3-git-send-email-jglisse@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Am 16.03.2016 um 12:56 schrieb jglisse@redhat.com: > From: Jérome Glisse <jglisse@redhat.com> > > We need to unpin on suspend and pin on resume. This shuffle code around > to do just that. > > Signed-off-by: Jérôme Glisse <jglisse@redhat.com> > Cc: Alex Deucher <alexander.deucher@amd.com> > Cc: Christian König <christian.koenig@amd.com> NAK, that won't work correctly. When the firmware is loaded at one location once you can't move it without power cycling the ASIC. That only works when you completely shut down the system, but not if you just suspend to memory on an APU or do a test cycle without actually power down. We already tried this approach and it took me month to figure out what's going wrong when the firmware end up at a different location after the driver started again. Regards, Christian. > --- > drivers/gpu/drm/radeon/radeon_uvd.c | 61 ++++++++++++++++--------------------- > drivers/gpu/drm/radeon/radeon_vce.c | 37 +++++++++------------- > 2 files changed, 41 insertions(+), 57 deletions(-) > > diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c > index 6fe9e4e..333b885 100644 > --- a/drivers/gpu/drm/radeon/radeon_uvd.c > +++ b/drivers/gpu/drm/radeon/radeon_uvd.c > @@ -148,30 +148,6 @@ int radeon_uvd_init(struct radeon_device *rdev) > return r; > } > > - r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); > - if (r) { > - radeon_bo_unref(&rdev->uvd.vcpu_bo); > - dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); > - return r; > - } > - > - r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, > - &rdev->uvd.gpu_addr); > - if (r) { > - radeon_bo_unreserve(rdev->uvd.vcpu_bo); > - radeon_bo_unref(&rdev->uvd.vcpu_bo); > - dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); > - return r; > - } > - > - r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); > - if (r) { > - dev_err(rdev->dev, "(%d) UVD map failed\n", r); > - return r; > - } > - > - radeon_bo_unreserve(rdev->uvd.vcpu_bo); > - > for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { > atomic_set(&rdev->uvd.handles[i], 0); > rdev->uvd.filp[i] = NULL; > @@ -183,18 +159,9 @@ int radeon_uvd_init(struct radeon_device *rdev) > > void radeon_uvd_fini(struct radeon_device *rdev) > { > - int r; > - > if (rdev->uvd.vcpu_bo == NULL) > return; > > - r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); > - if (!r) { > - radeon_bo_kunmap(rdev->uvd.vcpu_bo); > - radeon_bo_unpin(rdev->uvd.vcpu_bo); > - radeon_bo_unreserve(rdev->uvd.vcpu_bo); > - } > - > radeon_bo_unref(&rdev->uvd.vcpu_bo); > > radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); > @@ -231,6 +198,13 @@ int radeon_uvd_suspend(struct radeon_device *rdev) > } > } > > + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); > + if (!r) { > + radeon_bo_kunmap(rdev->uvd.vcpu_bo); > + radeon_bo_unpin(rdev->uvd.vcpu_bo); > + radeon_bo_unreserve(rdev->uvd.vcpu_bo); > + } > + > return 0; > } > > @@ -238,9 +212,26 @@ int radeon_uvd_resume(struct radeon_device *rdev) > { > unsigned size; > void *ptr; > + int r; > > - if (rdev->uvd.vcpu_bo == NULL) > - return -EINVAL; > + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); > + if (r) { > + dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); > + return r; > + } > + r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, > + &rdev->uvd.gpu_addr); > + if (r) { > + radeon_bo_unreserve(rdev->uvd.vcpu_bo); > + dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); > + return r; > + } > + r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); > + radeon_bo_unreserve(rdev->uvd.vcpu_bo); > + if (r) { > + dev_err(rdev->dev, "(%d) UVD map failed\n", r); > + return r; > + } > > memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); > > diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c > index c1c619f..4f7ae3c 100644 > --- a/drivers/gpu/drm/radeon/radeon_vce.c > +++ b/drivers/gpu/drm/radeon/radeon_vce.c > @@ -147,22 +147,6 @@ int radeon_vce_init(struct radeon_device *rdev) > return r; > } > > - r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); > - if (r) { > - radeon_bo_unref(&rdev->vce.vcpu_bo); > - dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); > - return r; > - } > - > - r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, > - &rdev->vce.gpu_addr); > - radeon_bo_unreserve(rdev->vce.vcpu_bo); > - if (r) { > - radeon_bo_unref(&rdev->vce.vcpu_bo); > - dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); > - return r; > - } > - > for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { > atomic_set(&rdev->vce.handles[i], 0); > rdev->vce.filp[i] = NULL; > @@ -196,7 +180,7 @@ void radeon_vce_fini(struct radeon_device *rdev) > */ > int radeon_vce_suspend(struct radeon_device *rdev) > { > - int i; > + int i, r; > > if (rdev->vce.vcpu_bo == NULL) > return 0; > @@ -209,6 +193,13 @@ int radeon_vce_suspend(struct radeon_device *rdev) > return 0; > > /* TODO: suspending running encoding sessions isn't supported */ > + > + r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); > + if (!r) { > + radeon_bo_unpin(rdev->vce.vcpu_bo); > + radeon_bo_unreserve(rdev->vce.vcpu_bo); > + } > + > return -EINVAL; > } > > @@ -223,15 +214,18 @@ int radeon_vce_resume(struct radeon_device *rdev) > void *cpu_addr; > int r; > > - if (rdev->vce.vcpu_bo == NULL) > - return -EINVAL; > - > r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); > if (r) { > dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); > return r; > } > - > + r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, > + &rdev->vce.gpu_addr); > + if (r) { > + radeon_bo_unreserve(rdev->vce.vcpu_bo); > + dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); > + return r; > + } > r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); > if (r) { > radeon_bo_unreserve(rdev->vce.vcpu_bo); > @@ -246,7 +240,6 @@ int radeon_vce_resume(struct radeon_device *rdev) > memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); > > radeon_bo_kunmap(rdev->vce.vcpu_bo); > - > radeon_bo_unreserve(rdev->vce.vcpu_bo); > > return r;
On Wed, Mar 16, 2016 at 1:10 PM, Christian König <christian.koenig@amd.com> wrote: > Am 16.03.2016 um 12:56 schrieb jglisse@redhat.com: >> >> From: Jérome Glisse <jglisse@redhat.com> >> >> We need to unpin on suspend and pin on resume. This shuffle code around >> to do just that. >> >> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> >> Cc: Alex Deucher <alexander.deucher@amd.com> >> Cc: Christian König <christian.koenig@amd.com> > > > NAK, that won't work correctly. > > When the firmware is loaded at one location once you can't move it without > power cycling the ASIC. > > That only works when you completely shut down the system, but not if you > just suspend to memory on an APU or do a test cycle without actually power > down. > > We already tried this approach and it took me month to figure out what's > going wrong when the firmware end up at a different location after the > driver started again. > > Regards, > Christian. Well that exactly what happens on hibernation. The firmware keycheck is failing and vce breaks. I have been trying to make it works but even putting it at same location is not enough. This is only an issue with hibernation, and module load/unload, suspend is fine as hw is powercycle. So this patch will not break anything that does already work. It only make code follow what other part of the code does. Cheers, Jérôme
Am 16.03.2016 um 13:35 schrieb Jerome Glisse: > On Wed, Mar 16, 2016 at 1:10 PM, Christian König > <christian.koenig@amd.com> wrote: >> Am 16.03.2016 um 12:56 schrieb jglisse@redhat.com: >>> From: Jérome Glisse <jglisse@redhat.com> >>> >>> We need to unpin on suspend and pin on resume. This shuffle code around >>> to do just that. >>> >>> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> >>> Cc: Alex Deucher <alexander.deucher@amd.com> >>> Cc: Christian König <christian.koenig@amd.com> >> >> NAK, that won't work correctly. >> >> When the firmware is loaded at one location once you can't move it without >> power cycling the ASIC. >> >> That only works when you completely shut down the system, but not if you >> just suspend to memory on an APU or do a test cycle without actually power >> down. >> >> We already tried this approach and it took me month to figure out what's >> going wrong when the firmware end up at a different location after the >> driver started again. >> >> Regards, >> Christian. > Well that exactly what happens on hibernation. The firmware keycheck > is failing and vce breaks. I have been trying to make it works but > even putting it at same location is not enough. This is only an issue > with hibernation, and module load/unload, suspend is fine as hw is > powercycle. So this patch will not break anything that does already > work. It only make code follow what other part of the code does. Take a look at the history of the UVD code. We already had it this way and changed it because that fixed suspend/resume for a lot of people. Additional to that keeping the firmware at the same location is the documented behavior how the hardware should be programmed. For Amdgpu Leo even had it working that UVD sessions survive a suspend/resume cycle, but we haven't dared to enable that for everything again because of all the problems testing it. Regards, Christian. > > Cheers, > Jérôme > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6fe9e4e..333b885 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -148,30 +148,6 @@ int radeon_uvd_init(struct radeon_device *rdev) return r; } - r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); - if (r) { - radeon_bo_unref(&rdev->uvd.vcpu_bo); - dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); - return r; - } - - r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, - &rdev->uvd.gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->uvd.vcpu_bo); - radeon_bo_unref(&rdev->uvd.vcpu_bo); - dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); - return r; - } - - r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); - if (r) { - dev_err(rdev->dev, "(%d) UVD map failed\n", r); - return r; - } - - radeon_bo_unreserve(rdev->uvd.vcpu_bo); - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); rdev->uvd.filp[i] = NULL; @@ -183,18 +159,9 @@ int radeon_uvd_init(struct radeon_device *rdev) void radeon_uvd_fini(struct radeon_device *rdev) { - int r; - if (rdev->uvd.vcpu_bo == NULL) return; - r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); - if (!r) { - radeon_bo_kunmap(rdev->uvd.vcpu_bo); - radeon_bo_unpin(rdev->uvd.vcpu_bo); - radeon_bo_unreserve(rdev->uvd.vcpu_bo); - } - radeon_bo_unref(&rdev->uvd.vcpu_bo); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); @@ -231,6 +198,13 @@ int radeon_uvd_suspend(struct radeon_device *rdev) } } + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); + if (!r) { + radeon_bo_kunmap(rdev->uvd.vcpu_bo); + radeon_bo_unpin(rdev->uvd.vcpu_bo); + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + } + return 0; } @@ -238,9 +212,26 @@ int radeon_uvd_resume(struct radeon_device *rdev) { unsigned size; void *ptr; + int r; - if (rdev->uvd.vcpu_bo == NULL) - return -EINVAL; + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); + if (r) { + dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); + return r; + } + r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, + &rdev->uvd.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); + return r; + } + r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + if (r) { + dev_err(rdev->dev, "(%d) UVD map failed\n", r); + return r; + } memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index c1c619f..4f7ae3c 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -147,22 +147,6 @@ int radeon_vce_init(struct radeon_device *rdev) return r; } - r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); - if (r) { - radeon_bo_unref(&rdev->vce.vcpu_bo); - dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); - return r; - } - - r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, - &rdev->vce.gpu_addr); - radeon_bo_unreserve(rdev->vce.vcpu_bo); - if (r) { - radeon_bo_unref(&rdev->vce.vcpu_bo); - dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); - return r; - } - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { atomic_set(&rdev->vce.handles[i], 0); rdev->vce.filp[i] = NULL; @@ -196,7 +180,7 @@ void radeon_vce_fini(struct radeon_device *rdev) */ int radeon_vce_suspend(struct radeon_device *rdev) { - int i; + int i, r; if (rdev->vce.vcpu_bo == NULL) return 0; @@ -209,6 +193,13 @@ int radeon_vce_suspend(struct radeon_device *rdev) return 0; /* TODO: suspending running encoding sessions isn't supported */ + + r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); + if (!r) { + radeon_bo_unpin(rdev->vce.vcpu_bo); + radeon_bo_unreserve(rdev->vce.vcpu_bo); + } + return -EINVAL; } @@ -223,15 +214,18 @@ int radeon_vce_resume(struct radeon_device *rdev) void *cpu_addr; int r; - if (rdev->vce.vcpu_bo == NULL) - return -EINVAL; - r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); if (r) { dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); return r; } - + r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, + &rdev->vce.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->vce.vcpu_bo); + dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); + return r; + } r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); if (r) { radeon_bo_unreserve(rdev->vce.vcpu_bo); @@ -246,7 +240,6 @@ int radeon_vce_resume(struct radeon_device *rdev) memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); radeon_bo_kunmap(rdev->vce.vcpu_bo); - radeon_bo_unreserve(rdev->vce.vcpu_bo); return r;