@@ -134,7 +134,10 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct reservation_object *resv;
+ struct reservation_object_shared *shared;
+ struct fence *fence_excl;
unsigned long events;
+ unsigned shared_count;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -148,14 +151,18 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;
- ww_mutex_lock(&resv->lock, NULL);
+ rcu_read_lock();
- if (resv->fence_excl && (!(events & POLLOUT) ||
- resv->fence_shared_count == 0)) {
+ shared = rcu_dereference(resv->shared);
+ fence_excl = rcu_dereference(resv->fence_excl);
+ shared_count = ACCESS_ONCE(shared->count);
+
+ if (fence_excl && (!(events & POLLOUT) ||
+ (!shared || shared_count == 0))) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
unsigned long pevents = POLLIN;
- if (resv->fence_shared_count == 0)
+ if (!shared || shared_count == 0)
pevents |= POLLOUT;
spin_lock_irq(&dmabuf->poll.lock);
@@ -167,19 +174,26 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
- if (!fence_add_callback(resv->fence_excl,
- &dcb->cb, dma_buf_poll_cb))
+ if (!kref_get_unless_zero(&fence_excl->refcount)) {
+ /* force a recheck */
+ events &= ~pevents;
+ dma_buf_poll_cb(NULL, &dcb->cb);
+ } else if (!fence_add_callback(fence_excl, &dcb->cb,
+ dma_buf_poll_cb)) {
events &= ~pevents;
- else
+ fence_put(fence_excl);
+ } else {
/*
* No callback queued, wake up any additional
* waiters.
*/
+ fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
+ }
}
}
- if ((events & POLLOUT) && resv->fence_shared_count > 0) {
+ if ((events & POLLOUT) && shared && shared_count > 0) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
int i;
@@ -194,20 +208,34 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
if (!(events & POLLOUT))
goto out;
- for (i = 0; i < resv->fence_shared_count; ++i)
- if (!fence_add_callback(resv->fence_shared[i],
- &dcb->cb, dma_buf_poll_cb)) {
+ for (i = 0; i < shared_count; ++i) {
+ struct fence *fence = ACCESS_ONCE(shared->fence[i]);
+ if (!kref_get_unless_zero(&fence->refcount)) {
+ /*
+ * fence refcount dropped to zero, this means
+ * that the shared object has been freed from under us.
+ * call dma_buf_poll_cb and force a recheck!
+ */
events &= ~POLLOUT;
+ dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
+ if (!fence_add_callback(fence, &dcb->cb,
+ dma_buf_poll_cb)) {
+ fence_put(fence);
+ events &= ~POLLOUT;
+ break;
+ }
+ fence_put(fence);
+ }
/* No callback queued, wake up any additional waiters. */
- if (i == resv->fence_shared_count)
+ if (i == shared_count)
dma_buf_poll_cb(NULL, &dcb->cb);
}
out:
- ww_mutex_unlock(&resv->lock);
+ rcu_read_unlock();
return events;
}
@@ -170,7 +170,7 @@ void release_fence(struct kref *kref)
if (fence->ops->release)
fence->ops->release(fence);
else
- kfree(fence);
+ kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(release_fence);
@@ -40,6 +40,7 @@ struct fence_cb;
* struct fence - software synchronization primitive
* @refcount: refcount for this fence
* @ops: fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
* @cb_list: list of all callbacks to call
* @lock: spin_lock_irqsave used for locking
* @context: execution context this fence belongs to, returned by
@@ -73,6 +74,7 @@ struct fence_cb;
struct fence {
struct kref refcount;
const struct fence_ops *ops;
+ struct rcu_head rcu;
struct list_head cb_list;
spinlock_t *lock;
unsigned context, seqno;
@@ -42,6 +42,7 @@
#include <linux/ww_mutex.h>
#include <linux/fence.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
@@ -49,8 +50,12 @@ struct reservation_object {
struct ww_mutex lock;
struct fence *fence_excl;
- struct fence **fence_shared;
- u32 fence_shared_count, fence_shared_max;
+ u32 shared_max_fence;
+ struct reservation_object_shared {
+ struct rcu_head rcu;
+ u32 count;
+ struct fence *fence[];
+ } *shared;
};
static inline void
@@ -58,8 +63,8 @@ reservation_object_init(struct reservation_object *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
- obj->fence_shared_count = obj->fence_shared_max = 0;
- obj->fence_shared = NULL;
+ obj->shared = NULL;
+ obj->shared_max_fence = 4;
obj->fence_excl = NULL;
}
@@ -70,11 +75,97 @@ reservation_object_fini(struct reservation_object *obj)
if (obj->fence_excl)
fence_put(obj->fence_excl);
- for (i = 0; i < obj->fence_shared_count; ++i)
- fence_put(obj->fence_shared[i]);
- kfree(obj->fence_shared);
+ if (obj->shared) {
+ for (i = 0; i < obj->shared->count; ++i)
+ fence_put(obj->shared->fence[i]);
+
+ /*
+ * This object should be dead and all references must have
+ * been released to it, so no need to free with rcu.
+ */
+ kfree(obj->shared);
+ }
ww_mutex_destroy(&obj->lock);
}
+/*
+ * Reserve space to add a shared fence to a reservation_object,
+ * must be called with obj->lock held.
+ */
+static inline int
+reservation_object_reserve_shared_fence(struct reservation_object *obj)
+{
+ struct reservation_object_shared *shared, *old;
+ u32 max = obj->shared_max_fence;
+
+ if (obj->shared) {
+ if (obj->shared->count < max)
+ return 0;
+ max *= 2;
+ }
+
+ shared = kmalloc(offsetof(typeof(*shared), fence[max]), GFP_KERNEL);
+ if (!shared)
+ return -ENOMEM;
+ old = obj->shared;
+
+ if (old) {
+ shared->count = old->count;
+ memcpy(shared->fence, old->fence, old->count * sizeof(*old->fence));
+ } else {
+ shared->count = 0;
+ }
+ rcu_assign_pointer(obj->shared, shared);
+ obj->shared_max_fence = max;
+ kfree_rcu(old, rcu);
+ return 0;
+}
+
+/*
+ * Add a fence to a shared slot, obj->lock must be held, and
+ * reservation_object_reserve_shared_fence has been called.
+ */
+
+static inline void
+reservation_object_add_shared_fence(struct reservation_object *obj,
+ struct fence *fence)
+{
+ unsigned i;
+
+ BUG_ON(obj->shared->count == obj->shared_max_fence);
+ fence_get(fence);
+
+ for (i = 0; i < obj->shared->count; ++i)
+ if (obj->shared->fence[i]->context == fence->context) {
+ struct fence *old = obj->shared->fence[i];
+ rcu_assign_pointer(obj->shared->fence[i], fence);
+ fence_put(old);
+ return;
+ }
+
+ obj->shared->fence[obj->shared->count] = fence;
+ smp_wmb();
+ obj->shared->count++;
+}
+
+/*
+ * May be called after adding an exclusive to wipe all shared fences.
+ */
+
+static inline void
+reservation_object_clear_shared(struct reservation_object *obj)
+{
+ struct reservation_object_shared *old = obj->shared;
+ unsigned i;
+
+ if (!old)
+ return;
+
+ rcu_assign_pointer(obj->shared, NULL);
+ for (i = 0; i < old->count; ++i)
+ fence_put(old->fence[i]);
+ kfree_rcu(old, rcu);
+}
+
#endif /* _LINUX_RESERVATION_H */