Message ID | 1341247519-10563-1-git-send-email-j.glisse@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 02.07.2012 18:45, j.glisse@gmail.com wrote: > From: Jerome Glisse <jglisse@redhat.com> > > GPU reset need to be exclusive, one happening at a time. For this > add a rw semaphore so that any path that trigger GPU activities > have to take the semaphore as a reader thus allowing concurency. > > The GPU reset path take the semaphore as a writer ensuring that > no concurrent reset take place. > > v2: init rw semaphore > > Signed-off-by: Jerome Glisse <jglisse@redhat.com> Maybe we don't want to call it "exclusive_lock", cause a lock is always somehow exclusive. Anyway it's: Reviewed-by: Christian König <christian.koenig@amd.com> > --- > drivers/gpu/drm/radeon/radeon.h | 1 + > drivers/gpu/drm/radeon/radeon_cs.c | 5 +++++ > drivers/gpu/drm/radeon/radeon_device.c | 3 +++ > drivers/gpu/drm/radeon/radeon_gem.c | 8 ++++++++ > 4 files changed, 17 insertions(+) > > diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h > index 77b4519b..29d6986 100644 > --- a/drivers/gpu/drm/radeon/radeon.h > +++ b/drivers/gpu/drm/radeon/radeon.h > @@ -1446,6 +1446,7 @@ struct radeon_device { > struct device *dev; > struct drm_device *ddev; > struct pci_dev *pdev; > + struct rw_semaphore exclusive_lock; > /* ASIC */ > union radeon_asic_config config; > enum radeon_family family; > diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c > index f1b7527..7ee6491 100644 > --- a/drivers/gpu/drm/radeon/radeon_cs.c > +++ b/drivers/gpu/drm/radeon/radeon_cs.c > @@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) > struct radeon_cs_parser parser; > int r; > > + down_read(&rdev->exclusive_lock); > if (!rdev->accel_working) { > + up_read(&rdev->exclusive_lock); > return -EBUSY; > } > /* initialize parser */ > @@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) > if (r) { > DRM_ERROR("Failed to initialize parser !\n"); > radeon_cs_parser_fini(&parser, r); > + up_read(&rdev->exclusive_lock); > r = radeon_cs_handle_lockup(rdev, r); > return r; > } > @@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) > if (r != -ERESTARTSYS) > DRM_ERROR("Failed to parse relocation %d!\n", r); > radeon_cs_parser_fini(&parser, r); > + up_read(&rdev->exclusive_lock); > r = radeon_cs_handle_lockup(rdev, r); > return r; > } > @@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) > } > out: > radeon_cs_parser_fini(&parser, r); > + up_read(&rdev->exclusive_lock); > r = radeon_cs_handle_lockup(rdev, r); > return r; > } > diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c > index f654ba8..254fdb4 100644 > --- a/drivers/gpu/drm/radeon/radeon_device.c > +++ b/drivers/gpu/drm/radeon/radeon_device.c > @@ -734,6 +734,7 @@ int radeon_device_init(struct radeon_device *rdev, > mutex_init(&rdev->gem.mutex); > mutex_init(&rdev->pm.mutex); > init_rwsem(&rdev->pm.mclk_lock); > + init_rwsem(&rdev->exclusive_lock); > init_waitqueue_head(&rdev->irq.vblank_queue); > init_waitqueue_head(&rdev->irq.idle_queue); > r = radeon_gem_init(rdev); > @@ -988,6 +989,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) > int r; > int resched; > > + down_write(&rdev->exclusive_lock); > radeon_save_bios_scratch_regs(rdev); > /* block TTM */ > resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); > @@ -1007,6 +1009,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) > dev_info(rdev->dev, "GPU reset failed\n"); > } > > + up_write(&rdev->exclusive_lock); > return r; > } > > diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c > index d9b0809..b0be9c4 100644 > --- a/drivers/gpu/drm/radeon/radeon_gem.c > +++ b/drivers/gpu/drm/radeon/radeon_gem.c > @@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, > uint32_t handle; > int r; > > + down_read(&rdev->exclusive_lock); > /* create a gem object to contain this object in */ > args->size = roundup(args->size, PAGE_SIZE); > r = radeon_gem_object_create(rdev, args->size, args->alignment, > args->initial_domain, false, > false, &gobj); > if (r) { > + up_read(&rdev->exclusive_lock); > r = radeon_gem_handle_lockup(rdev, r); > return r; > } > @@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, > /* drop reference from allocate - handle holds it now */ > drm_gem_object_unreference_unlocked(gobj); > if (r) { > + up_read(&rdev->exclusive_lock); > r = radeon_gem_handle_lockup(rdev, r); > return r; > } > args->handle = handle; > + up_read(&rdev->exclusive_lock); > return 0; > } > > @@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, > { > /* transition the BO to a domain - > * just validate the BO into a certain domain */ > + struct radeon_device *rdev = dev->dev_private; > struct drm_radeon_gem_set_domain *args = data; > struct drm_gem_object *gobj; > struct radeon_bo *robj; > @@ -247,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, > > /* for now if someone requests domain CPU - > * just make sure the buffer is finished with */ > + down_read(&rdev->exclusive_lock); > > /* just do a BO wait for now */ > gobj = drm_gem_object_lookup(dev, filp, args->handle); > if (gobj == NULL) { > + up_read(&rdev->exclusive_lock); > return -ENOENT; > } > robj = gem_to_radeon_bo(gobj); > @@ -258,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, > r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); > > drm_gem_object_unreference_unlocked(gobj); > + up_read(&rdev->exclusive_lock); > r = radeon_gem_handle_lockup(robj->rdev, r); > return r; > }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 77b4519b..29d6986 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1446,6 +1446,7 @@ struct radeon_device { struct device *dev; struct drm_device *ddev; struct pci_dev *pdev; + struct rw_semaphore exclusive_lock; /* ASIC */ union radeon_asic_config config; enum radeon_family family; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index f1b7527..7ee6491 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct radeon_cs_parser parser; int r; + down_read(&rdev->exclusive_lock); if (!rdev->accel_working) { + up_read(&rdev->exclusive_lock); return -EBUSY; } /* initialize parser */ @@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); + up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } @@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); + up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } @@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } out: radeon_cs_parser_fini(&parser, r); + up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f654ba8..254fdb4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -734,6 +734,7 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); init_rwsem(&rdev->pm.mclk_lock); + init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.idle_queue); r = radeon_gem_init(rdev); @@ -988,6 +989,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) int r; int resched; + down_write(&rdev->exclusive_lock); radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); @@ -1007,6 +1009,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) dev_info(rdev->dev, "GPU reset failed\n"); } + up_write(&rdev->exclusive_lock); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d9b0809..b0be9c4 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, uint32_t handle; int r; + down_read(&rdev->exclusive_lock); /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, args->initial_domain, false, false, &gobj); if (r) { + up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); return r; } @@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); if (r) { + up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); return r; } args->handle = handle; + up_read(&rdev->exclusive_lock); return 0; } @@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, { /* transition the BO to a domain - * just validate the BO into a certain domain */ + struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -247,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* for now if someone requests domain CPU - * just make sure the buffer is finished with */ + down_read(&rdev->exclusive_lock); /* just do a BO wait for now */ gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { + up_read(&rdev->exclusive_lock); return -ENOENT; } robj = gem_to_radeon_bo(gobj); @@ -258,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); drm_gem_object_unreference_unlocked(gobj); + up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(robj->rdev, r); return r; }