@@ -1024,7 +1024,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
/* Prevent writes into HWSP after returning the page to the system */
intel_engine_set_hwsp_writemask(engine, ~0u);
- vma = fetch_and_zero(&engine->status_page.vma);
+ vma = xchg(&engine->status_page.vma, NULL);
if (!vma)
return;
@@ -229,7 +229,7 @@ static void heartbeat(struct work_struct *wrk)
mutex_unlock(&ce->timeline->mutex);
out:
if (!engine->i915->params.enable_hangcheck || !next_heartbeat(engine))
- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ i915_request_put(xchg(&engine->heartbeat.systole, 0));
intel_engine_pm_put(engine);
}
@@ -244,7 +244,7 @@ void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
{
if (cancel_delayed_work(&engine->heartbeat.work))
- i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ i915_request_put(xchg(&engine->heartbeat.systole, 0));
}
void intel_gt_unpark_heartbeats(struct intel_gt *gt)
@@ -3197,7 +3197,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.sched_engine->lock);
- rq = fetch_and_zero(&ve->request);
+ rq = xchg(&ve->request, NULL);
if (rq) {
if (i915_request_mark_eio(rq)) {
rq->engine = engine;
@@ -3602,7 +3602,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
spin_lock_irq(&ve->base.sched_engine->lock);
- old = fetch_and_zero(&ve->request);
+ old = xchg(&ve->request, NULL);
if (old) {
GEM_BUG_ON(!__i915_request_is_complete(old));
__i915_request_submit(old);
@@ -684,7 +684,7 @@ static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
struct i915_ppgtt *ppgtt;
- ppgtt = fetch_and_zero(&ggtt->alias);
+ ppgtt = xchg(&ggtt->alias, NULL);
if (!ppgtt)
return;
@@ -1238,7 +1238,7 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
was_bound);
if (obj) { /* only used during resume => exclusive access */
- write_domain_objs |= fetch_and_zero(&obj->write_domain);
+ write_domain_objs |= xchg(&obj->write_domain, 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
@@ -70,7 +70,7 @@ gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size
static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
{
- struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
+ struct drm_i915_gem_object *obj = xchg(&intf->gem_obj, NULL);
if (!obj)
return;
@@ -757,7 +757,7 @@ int intel_gt_init(struct intel_gt *gt)
intel_uc_fini(>->uc);
err_engines:
intel_engines_release(gt);
- i915_vm_put(fetch_and_zero(>->vm));
+ i915_vm_put(xchg(>->vm, 0));
err_pm:
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
@@ -806,7 +806,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
{
struct i915_address_space *vm;
- vm = fetch_and_zero(>->vm);
+ vm = xchg(>->vm, NULL);
if (vm) /* FIXME being called twice on error paths :( */
i915_vm_put(vm);
@@ -123,7 +123,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
static int __gt_park(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
- intel_wakeref_t wakeref = fetch_and_zero(>->awake);
+ intel_wakeref_t wakeref = xchg(>->awake, 0);
struct drm_i915_private *i915 = gt->i915;
GT_TRACE(gt, "\n");
@@ -1144,7 +1144,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
static struct intel_timeline *
pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
+ struct intel_timeline *tl = xchg(&ce->timeline, NULL);
return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
@@ -1261,8 +1261,8 @@ void lrc_fini(struct intel_context *ce)
if (!ce->state)
return;
- intel_ring_put(fetch_and_zero(&ce->ring));
- i915_vma_put(fetch_and_zero(&ce->state));
+ intel_ring_put(xchg(&ce->ring, 0));
+ i915_vma_put(xchg(&ce->state, 0));
}
void lrc_destroy(struct kref *kref)
@@ -1116,7 +1116,7 @@ void intel_migrate_fini(struct intel_migrate *m)
{
struct intel_context *ce;
- ce = fetch_and_zero(&m->context);
+ ce = xchg(&m->context, NULL);
if (!ce)
return;
@@ -702,7 +702,7 @@ void intel_rc6_fini(struct intel_rc6 *rc6)
intel_rc6_disable(rc6);
- pctx = fetch_and_zero(&rc6->pctx);
+ pctx = xchg(&rc6->pctx, NULL);
if (pctx)
i915_gem_object_put(pctx);
@@ -1831,7 +1831,7 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
+ pm_iir = xchg(&rps->pm_iir, 0) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(gt->irq_lock);
@@ -171,7 +171,7 @@ static int live_context_size(void *arg)
* active state is sufficient, we are only checking that we
* don't use more than we planned.
*/
- saved = fetch_and_zero(&engine->default_state);
+ saved = xchg(&engine->default_state, NULL);
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
@@ -269,7 +269,7 @@ static int live_ctx_switch_wa(void *arg)
if (IS_GRAPHICS_VER(gt->i915, 4, 5))
continue; /* MI_STORE_DWORD is privileged! */
- saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+ saved_wa = xchg(&engine->wa_ctx.vma, NULL);
intel_engine_pm_get(engine);
err = __live_ctx_switch_wa(engine);
@@ -892,7 +892,7 @@ static int create_watcher(struct hwsp_watcher *w,
static int check_watcher(struct hwsp_watcher *w, const char *name,
bool (*op)(u32 hwsp, u32 seqno))
{
- struct i915_request *rq = fetch_and_zero(&w->rq);
+ struct i915_request *rq = xchg(&w->rq, NULL);
u32 offset, end;
int err;
@@ -166,7 +166,7 @@ static void __uc_capture_load_err_log(struct intel_uc *uc)
static void __uc_free_load_err_log(struct intel_uc *uc)
{
- struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
+ struct drm_i915_gem_object *log = xchg(&uc->load_err_log, NULL);
if (log)
i915_gem_object_put(log);
@@ -1055,7 +1055,7 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
if (!intel_uc_fw_is_available(uc_fw))
return;
- i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
+ i915_gem_object_put(xchg(&uc_fw->obj, 0));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
}
Macro fetch_and_zero will be dropped. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_execlists_submission.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_ggtt.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_gsc.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 6 +++--- drivers/gpu/drm/i915/gt/intel_migrate.c | 2 +- drivers/gpu/drm/i915/gt/intel_rc6.c | 2 +- drivers/gpu/drm/i915/gt/intel_rps.c | 2 +- drivers/gpu/drm/i915/gt/selftest_context.c | 2 +- drivers/gpu/drm/i915/gt/selftest_ring_submission.c | 2 +- drivers/gpu/drm/i915/gt/selftest_timeline.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 2 +- 16 files changed, 22 insertions(+), 22 deletions(-)