@@ -264,6 +264,117 @@ static void test_co_mutex_lockable(void)
g_assert(QEMU_MAKE_LOCKABLE(null_pointer) == NULL);
}
+static bool c1_done;
+static bool c2_done;
+static bool c3_done;
+static bool c4_done;
+
+static void coroutine_fn rwlock_c1(void *opaque)
+{
+ CoRwlock *l = opaque;
+
+ qemu_co_rwlock_rdlock(l);
+ qemu_coroutine_yield();
+
+ qemu_co_rwlock_unlock(l);
+ qemu_coroutine_yield();
+
+ c1_done = true;
+}
+
+static void coroutine_fn rwlock_c2(void *opaque)
+{
+ CoRwlock *l = opaque;
+
+ qemu_co_rwlock_wrlock(l);
+
+ qemu_co_rwlock_downgrade(l);
+ qemu_co_rwlock_unlock(l);
+ c2_done = true;
+}
+
+static void coroutine_fn rwlock_c3(void *opaque)
+{
+ CoRwlock *l = opaque;
+
+ qemu_co_rwlock_rdlock(l);
+
+ qemu_co_rwlock_unlock(l);
+ c3_done = true;
+}
+
+static void coroutine_fn rwlock_c4(void *opaque)
+{
+ CoRwlock *l = opaque;
+
+ qemu_co_rwlock_wrlock(l);
+
+ qemu_co_rwlock_unlock(l);
+ c4_done = true;
+}
+
+/*
+ * Check that downgrading a reader-writer lock does not cause a hang.
+ *
+ * Four coroutines are used to produce a situation where there are
+ * both reader and writer hopefuls waiting to acquire an rwlock that
+ * is held by a reader.
+ *
+ * The correct sequence of operations we aim to provoke can be
+ * represented as:
+ *
+ * | c1 | c2 | c3 | c4 |
+ * |--------+------------+------------+------------|
+ * | rdlock | | | |
+ * | yield | | | |
+ * | | wrlock | | |
+ * | | <queued> | | |
+ * | | | rdlock | |
+ * | | | <queued> | |
+ * | | | | wrlock |
+ * | | | | <queued> |
+ * | unlock | | | |
+ * | yield | | | |
+ * | | <dequeued> | | |
+ * | | downgrade | | |
+ * | | ... | | |
+ * | | unlock | | |
+ * | | | | <dequeued> |
+ * | | | | unlock |
+ * | | | <dequeued> | |
+ * | | | unlock | |
+ *
+ * Significantly, when c2 unlocks the downgraded lock, it should be c4
+ * that is run rather than c3.
+ */
+static void test_co_rwlock_downgrade(void)
+{
+ CoRwlock l;
+ Coroutine *c1, *c2, *c3, *c4;
+
+ qemu_co_rwlock_init(&l);
+
+ c1 = qemu_coroutine_create(rwlock_c1, &l);
+ c2 = qemu_coroutine_create(rwlock_c2, &l);
+ c3 = qemu_coroutine_create(rwlock_c3, &l);
+ c4 = qemu_coroutine_create(rwlock_c4, &l);
+
+ qemu_coroutine_enter(c1);
+ qemu_coroutine_enter(c2);
+ qemu_coroutine_enter(c3);
+ qemu_coroutine_enter(c4);
+
+ qemu_coroutine_enter(c1);
+
+ g_assert(c2_done);
+ g_assert(c3_done);
+ g_assert(c4_done);
+
+ qemu_coroutine_enter(c1);
+
+ g_assert(c1_done);
+}
+
/*
* Check that creation, enter, and return work
*/
@@ -501,6 +612,7 @@ int main(int argc, char **argv)
g_test_add_func("/basic/order", test_order);
g_test_add_func("/locking/co-mutex", test_co_mutex);
g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable);
+ g_test_add_func("/locking/co-rwlock/downgrade", test_co_rwlock_downgrade);
if (g_test_perf()) {
g_test_add_func("/perf/lifecycle", perf_lifecycle);
g_test_add_func("/perf/nesting", perf_nesting);
Test that downgrading an rwlock does not result in a failure to schedule coroutines queued on the rwlock. The diagram associated with test_co_rwlock_downgrade() describes the intended behaviour, but what is observed currently corresponds to: | c1 | c2 | c3 | c4 | |--------+------------+------------+----------| | rdlock | | | | | yield | | | | | | wrlock | | | | | <queued> | | | | | | rdlock | | | | | <queued> | | | | | | wrlock | | | | | <queued> | | unlock | | | | | yield | | | | | | <dequeued> | | | | | downgrade | | | | | ... | | | | | unlock | | | | | | <dequeued> | | | | | <queued> | | In the test, this results in a failure... ERROR:../tests/test-coroutine.c:369:test_co_rwlock_downgrade: assertion failed: (c3_done) Bail out! ERROR:../tests/test-coroutine.c:369:test_co_rwlock_downgrade: assertion failed: (c3_done) ...as a result of the c3 coroutine failing to run to completion. Signed-off-by: David Edmondson <david.edmondson@oracle.com> --- tests/test-coroutine.c | 112 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+)