@@ -602,6 +602,10 @@ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *
void
remote_event_signal(struct remote_event *event)
{
+ /*
+ * Ensure that all writes to shared data structures have completed
+ * before signalling the peer.
+ */
wmb();
event->fired = 1;
@@ -528,6 +528,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
return 0;
}
event->armed = 0;
+ /* Ensure that the peer sees that we are not waiting (armed == 0). */
wmb();
}
@@ -650,6 +651,7 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service,
skip_service:
state->poll_needed = 1;
+ /* Ensure the slot handler thread sees the poll_needed flag. */
wmb();
/* ... and ensure the slot handler runs. */
@@ -1156,6 +1158,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
remote_event_wait(&state->sync_release_event, &local->sync_release);
+ /* Ensure that reads don't overtake the remote_event_wait. */
rmb();
header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
@@ -1959,6 +1962,7 @@ slot_handler_func(void *v)
DEBUG_TRACE(SLOT_HANDLER_LINE);
remote_event_wait(&state->trigger_event, &local->trigger);
+ /* Ensure that reads don't overtake the remote_event_wait. */
rmb();
DEBUG_TRACE(SLOT_HANDLER_LINE);
@@ -2021,6 +2025,7 @@ sync_func(void *v)
remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
+ /* Ensure that reads don't overtake the remote_event_wait. */
rmb();
msgid = header->msgid;
@@ -3081,6 +3086,10 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
goto unlock_error_exit;
+ /*
+ * Ensure that the bulk data record is visible to the peer
+ * before proceeding.
+ */
wmb();
vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",