@@ -1013,7 +1013,7 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
if (reg & DSIM_SFR_HEADER_EMPTY)
return 0;
- if (!cond_resched())
+ if (!cond_resched_stall())
usleep_range(950, 1050);
} while (--timeout);
@@ -311,7 +311,6 @@ void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
list_for_each_entry_safe(block, on, objects, link) {
drm_buddy_free_block(mm, block);
- cond_resched();
}
INIT_LIST_HEAD(objects);
}
@@ -506,7 +506,6 @@ static void drm_gem_check_release_batch(struct folio_batch *fbatch)
{
check_move_unevictable_folios(fbatch);
__folio_batch_release(fbatch);
- cond_resched();
}
/**
@@ -1812,7 +1812,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
err = eb_copy_relocations(eb);
have_copy = err == 0;
} else {
- cond_resched();
+ cond_resched_stall();
err = 0;
}
@@ -414,7 +414,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
- cond_resched();
}
}
@@ -26,7 +26,6 @@ static void check_release_folio_batch(struct folio_batch *fbatch)
{
check_move_unevictable_folios(fbatch);
__folio_batch_release(fbatch);
- cond_resched();
}
void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
@@ -108,7 +107,6 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
gfp_t gfp = noreclaim;
do {
- cond_resched();
folio = shmem_read_folio_gfp(mapping, i, gfp);
if (!IS_ERR(folio))
break;
@@ -1447,8 +1447,6 @@ static int igt_ppgtt_smoke_huge(void *arg)
if (err)
break;
-
- cond_resched();
}
return err;
@@ -1538,8 +1536,6 @@ static int igt_ppgtt_sanity_check(void *arg)
goto out;
}
}
-
- cond_resched();
}
out:
@@ -1738,8 +1734,6 @@ static int igt_ppgtt_mixed(void *arg)
break;
addr += obj->base.size;
-
- cond_resched();
}
i915_gem_context_unlock_engines(ctx);
@@ -221,7 +221,6 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
u32 *cpu;
GEM_BUG_ON(view.partial.size > nreal);
- cond_resched();
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1026,8 +1025,6 @@ static void igt_close_objects(struct drm_i915_private *i915,
i915_gem_object_put(obj);
}
- cond_resched();
-
i915_gem_drain_freed_objects(i915);
}
@@ -1041,8 +1038,6 @@ static void igt_make_evictable(struct list_head *objects)
i915_gem_object_unpin_pages(obj);
i915_gem_object_unlock(obj);
}
-
- cond_resched();
}
static int igt_fill_mappable(struct intel_memory_region *mr,
@@ -315,7 +315,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
- cond_resched();
+ cond_resched_stall();
}
}
@@ -664,7 +664,7 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
&remaining_timeout)) > 0) {
- cond_resched();
+ cond_resched_stall();
if (signal_pending(current))
return -EINTR;
}
@@ -906,8 +906,6 @@ intel_context_migrate_copy(struct intel_context *ce,
err = -EINVAL;
break;
}
-
- cond_resched();
} while (1);
out_ce:
@@ -1067,8 +1065,6 @@ intel_context_migrate_clear(struct intel_context *ce,
i915_request_add(rq);
if (err || !it.sg || !sg_dma_len(it.sg))
break;
-
- cond_resched();
} while (1);
out_ce:
@@ -60,8 +60,6 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (done)
return -ETIME;
-
- cond_resched();
} while (1);
}
@@ -72,7 +70,6 @@ static int wait_for_reset(struct intel_engine_cs *engine,
timeout += jiffies;
do {
- cond_resched();
intel_engine_flush_submission(engine);
if (READ_ONCE(engine->execlists.pending[0]))
@@ -1373,7 +1370,6 @@ static int live_timeslice_queue(void *arg)
/* Wait until we ack the release_queue and start timeslicing */
do {
- cond_resched();
intel_engine_flush_submission(engine);
} while (READ_ONCE(engine->execlists.pending[0]));
@@ -939,8 +939,6 @@ static void active_engine(struct kthread_work *work)
pr_err("[%s] Request put failed: %d!\n", engine->name, err);
break;
}
-
- cond_resched();
}
for (count = 0; count < ARRAY_SIZE(rq); count++) {
@@ -70,8 +70,6 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (done)
return -ETIME;
-
- cond_resched();
} while (1);
}
@@ -210,8 +210,6 @@ static int intel_context_copy_ccs(struct intel_context *ce,
i915_request_add(rq);
if (err || !it.sg || !sg_dma_len(it.sg))
break;
-
- cond_resched();
} while (1);
out_ce:
@@ -352,7 +352,6 @@ static int bench_sync(void *arg)
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
- cond_resched();
mock_timeline_init(&tl, 0);
@@ -382,7 +381,6 @@ static int bench_sync(void *arg)
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
- cond_resched();
mock_timeline_init(&tl, 0);
@@ -405,7 +403,6 @@ static int bench_sync(void *arg)
pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
- cond_resched();
/* Benchmark searching for a known context id and changing the seqno */
for (last_order = 1, order = 1; order < 32;
@@ -434,7 +431,6 @@ static int bench_sync(void *arg)
__func__, count, order,
(long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
- cond_resched();
}
return 0;
@@ -865,7 +865,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
/* Wait until the previous preallocation is completed */
while (!llist_empty(&ref->preallocated_barriers))
- cond_resched();
+ cond_resched_stall();
/*
* Preallocate a node for each physical engine supporting the target
@@ -267,8 +267,6 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (ret)
return ret;
- cond_resched();
-
flags |= PIN_NONBLOCK;
goto search_again;
@@ -320,8 +320,6 @@ static int compress_page(struct i915_vma_compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
-
- cond_resched();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -408,7 +406,6 @@ static int compress_page(struct i915_vma_compress *c,
if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
- cond_resched();
return 0;
}
@@ -2325,13 +2322,6 @@ void intel_klog_error_capture(struct intel_gt *gt,
l_count, line++, ptr2);
ptr[pos] = chr;
ptr2 = ptr + pos;
-
- /*
- * If spewing large amounts of data via a serial console,
- * this can be a very slow process. So be friendly and try
- * not to cause 'softlockup on CPU' problems.
- */
- cond_resched();
}
if (ptr2 < (ptr + count))
@@ -2352,8 +2342,12 @@ void intel_klog_error_capture(struct intel_gt *gt,
got--;
}
- /* As above. */
- cond_resched();
+ /*
+ * If spewing large amounts of data via a serial console,
+ * this can be a very slow process. So be friendly and try
+ * not to cause 'softlockup on CPU' problems.
+ */
+ cond_resched_stall();
}
if (got)
@@ -487,7 +487,6 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
}
spin_unlock_irqrestore(&uncore->lock, irqflags);
- cond_resched();
}
drm_WARN_ON(&uncore->i915->drm, active_domains);
@@ -201,7 +201,6 @@ static int igt_ppgtt_alloc(void *arg)
}
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
- cond_resched();
ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
@@ -224,7 +223,6 @@ static int igt_ppgtt_alloc(void *arg)
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
last, size - last);
- cond_resched();
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
}
@@ -438,8 +438,6 @@ static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
num_fences += count;
num_waits++;
-
- cond_resched();
}
atomic_long_add(num_fences, &t->num_fences);
@@ -179,7 +179,6 @@ static int __run_selftests(const char *name,
if (!st->enabled)
continue;
- cond_resched();
if (signal_pending(current))
return -EINTR;
@@ -381,7 +380,6 @@ int __i915_subtests(const char *caller,
int err;
for (; count--; st++) {
- cond_resched();
if (signal_pending(current))
return -EINTR;
@@ -414,7 +412,6 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
va_list va;
if (!signal_pending(current)) {
- cond_resched();
if (time_before(jiffies, timeout))
return false;
}
@@ -197,8 +197,6 @@ static int igt_vma_create(void *arg)
list_del_init(&ctx->link);
mock_context_close(ctx);
}
-
- cond_resched();
}
end:
@@ -347,8 +345,6 @@ static int igt_vma_pin1(void *arg)
goto out;
}
}
-
- cond_resched();
}
err = 0;
@@ -697,7 +693,6 @@ static int igt_vma_rotate_remap(void *arg)
pr_err("Unbinding returned %i\n", err);
goto out_object;
}
- cond_resched();
}
}
}
@@ -858,8 +853,6 @@ static int igt_vma_partial(void *arg)
pr_err("Unbinding returned %i\n", err);
goto out_object;
}
-
- cond_resched();
}
}
@@ -1085,8 +1078,6 @@ static int igt_vma_remapped_gtt(void *arg)
}
}
i915_vma_unpin_iomap(vma);
-
- cond_resched();
}
}
@@ -22,8 +22,6 @@ int igt_flush_test(struct drm_i915_private *i915)
if (intel_gt_is_wedged(gt))
ret = -EIO;
- cond_resched();
-
if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
@@ -46,8 +46,6 @@ static void close_objects(struct intel_memory_region *mem,
i915_gem_object_put(obj);
}
- cond_resched();
-
i915_gem_drain_freed_objects(i915);
}
@@ -1290,8 +1288,6 @@ static int _perf_memcpy(struct intel_memory_region *src_mr,
div64_u64(mul_u32_u32(4 * size,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
-
- cond_resched();
}
i915_gem_object_unpin_map(dst);
@@ -29,7 +29,6 @@ static bool __timeout(unsigned long timeout, const char *fmt, ...)
va_list va;
if (!signal_pending(current)) {
- cond_resched();
if (time_before(jiffies, timeout))
return false;
}
@@ -485,8 +484,6 @@ static void drm_test_buddy_alloc_smoke(struct kunit *test)
if (err || timeout)
break;
-
- cond_resched();
}
kfree(order);
@@ -681,8 +678,6 @@ static void drm_test_buddy_alloc_range(struct kunit *test)
rem -= size;
if (!rem)
break;
-
- cond_resched();
}
drm_buddy_free_list(&mm, &blocks);
@@ -474,8 +474,6 @@ static void drm_test_mm_reserve(struct kunit *test)
KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
-
- cond_resched();
}
}
@@ -645,8 +643,6 @@ static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
}
ret = 0;
@@ -671,8 +667,6 @@ static void drm_test_mm_insert(struct kunit *test)
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
-
- cond_resched();
}
}
@@ -693,8 +687,6 @@ static void drm_test_mm_replace(struct kunit *test)
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
-
- cond_resched();
}
}
@@ -882,8 +874,6 @@ static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u6
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
}
ret = 0;
@@ -942,8 +932,6 @@ static void drm_test_mm_insert_range(struct kunit *test)
max / 2, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
max / 4 + 1, 3 * max / 4 - 1));
-
- cond_resched();
}
}
@@ -1086,8 +1074,6 @@ static void drm_test_mm_align(struct kunit *test)
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
}
out:
@@ -1122,8 +1108,6 @@ static void drm_test_mm_align_pot(struct kunit *test, int max)
KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
goto out;
}
-
- cond_resched();
}
out:
@@ -1465,8 +1449,6 @@ static void drm_test_mm_evict(struct kunit *test)
goto out;
}
}
-
- cond_resched();
}
out:
@@ -1547,8 +1529,6 @@ static void drm_test_mm_evict_range(struct kunit *test)
goto out;
}
}
-
- cond_resched();
}
out:
@@ -1658,7 +1638,6 @@ static void drm_test_mm_topdown(struct kunit *test)
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
- cond_resched();
}
out:
@@ -1750,7 +1729,6 @@ static void drm_test_mm_bottomup(struct kunit *test)
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
- cond_resched();
}
out:
@@ -1968,8 +1946,6 @@ static void drm_test_mm_color(struct kunit *test)
drm_mm_remove_node(node);
kfree(node);
}
-
- cond_resched();
}
out:
@@ -2038,7 +2014,6 @@ static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
}
}
- cond_resched();
return 0;
}
@@ -2110,8 +2085,6 @@ static void drm_test_mm_color_evict(struct kunit *test)
goto out;
}
}
-
- cond_resched();
}
out:
@@ -2196,8 +2169,6 @@ static void drm_test_mm_color_evict_range(struct kunit *test)
goto out;
}
}
-
- cond_resched();
}
out:
There are broadly three sets of uses of cond_resched(): 1. Calls to cond_resched() out of the goodness of our heart, otherwise known as avoiding lockup splats. 2. Open coded variants of cond_resched_lock() which call cond_resched(). 3. Retry or error handling loops, where cond_resched() is used as a quick alternative to spinning in a tight-loop. When running under a full preemption model, the cond_resched() reduces to a NOP (not even a barrier) so removing it obviously cannot matter. But considering only voluntary preemption models (for say code that has been mostly tested under those), for set-1 and set-2 the scheduler can now preempt kernel tasks running beyond their time quanta anywhere they are preemptible() [1]. Which removes any need for these explicitly placed scheduling points. The cond_resched() calls in set-3 are a little more difficult. To start with, given it's NOP character under full preemption, it never actually saved us from a tight loop. With voluntary preemption, it's not a NOP, but it might as well be -- for most workloads the scheduler does not have an interminable supply of runnable tasks on the runqueue. So, cond_resched() is useful to not get softlockup splats, but not terribly good for error handling. Ideally, these should be replaced with some kind of timed or event wait. For now we use cond_resched_stall(), which tries to schedule if possible, and executes a cpu_relax() if not. Most of the uses here are in set-1 (some right after we give up a lock or enable bottom-halves, causing an explicit preemption check.) There are a few cases from set-3. Replace them with cond_resched_stall(). [1] https://lore.kernel.org/lkml/20231107215742.363031-1-ankur.a.arora@oracle.com/ Cc: Inki Dae <inki.dae@samsung.com> Cc: Jagan Teki <jagan@amarulasolutions.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Andrzej Hajda <andrzej.hajda@intel.com> Cc: Neil Armstrong <neil.armstrong@linaro.org> Cc: Robert Foss <rfoss@kernel.org> Cc: David Airlie <airlied@gmail.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Thomas Zimmermann <tzimmermann@suse.de> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> --- drivers/gpu/drm/bridge/samsung-dsim.c | 2 +- drivers/gpu/drm/drm_buddy.c | 1 - drivers/gpu/drm/drm_gem.c | 1 - .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 - drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 2 -- .../gpu/drm/i915/gem/selftests/huge_pages.c | 6 ---- .../drm/i915/gem/selftests/i915_gem_mman.c | 5 ---- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 2 +- drivers/gpu/drm/i915/gt/intel_migrate.c | 4 --- drivers/gpu/drm/i915/gt/selftest_execlists.c | 4 --- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 2 -- drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 -- drivers/gpu/drm/i915/gt/selftest_migrate.c | 2 -- drivers/gpu/drm/i915/gt/selftest_timeline.c | 4 --- drivers/gpu/drm/i915/i915_active.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 2 -- drivers/gpu/drm/i915/i915_gpu_error.c | 18 ++++-------- drivers/gpu/drm/i915/intel_uncore.c | 1 - drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 -- drivers/gpu/drm/i915/selftests/i915_request.c | 2 -- .../gpu/drm/i915/selftests/i915_selftest.c | 3 -- drivers/gpu/drm/i915/selftests/i915_vma.c | 9 ------ .../gpu/drm/i915/selftests/igt_flush_test.c | 2 -- .../drm/i915/selftests/intel_memory_region.c | 4 --- drivers/gpu/drm/tests/drm_buddy_test.c | 5 ---- drivers/gpu/drm/tests/drm_mm_test.c | 29 ------------------- 28 files changed, 11 insertions(+), 110 deletions(-)