@@ -28,6 +28,10 @@ struct io_zc_rx_pool {
u32 cache_count;
u32 cache[POOL_CACHE_SIZE];
+ /* delayed destruction */
+ unsigned long delay_end;
+ struct delayed_work destroy_work;
+
/* freelist */
spinlock_t freelist_lock;
u32 free_count;
@@ -222,20 +226,56 @@ int io_zc_rx_create_pool(struct io_ring_ctx *ctx,
return ret;
}
-static void io_zc_rx_destroy_pool(struct io_zc_rx_pool *pool)
+static void io_zc_rx_destroy_ifq(struct io_zc_rx_ifq *ifq)
+{
+ if (ifq->dev)
+ dev_put(ifq->dev);
+ io_free_rbuf_ring(ifq);
+ kfree(ifq);
+}
+
+static void io_zc_rx_destroy_pool_work(struct work_struct *work)
{
+ struct io_zc_rx_pool *pool = container_of(
+ to_delayed_work(work), struct io_zc_rx_pool, destroy_work);
struct device *dev = netdev2dev(pool->ifq->dev);
struct io_zc_rx_buf *buf;
+ int i, refc, count;
- for (int i = 0; i < pool->nr_pages; i++) {
+ for (i = 0; i < pool->nr_pages; i++) {
buf = &pool->bufs[i];
+ refc = atomic_read(&buf->refcount) & IO_ZC_RX_KREF_MASK;
+ if (refc) {
+ if (time_before(jiffies, pool->delay_end)) {
+ schedule_delayed_work(&pool->destroy_work, HZ);
+ return;
+ }
+ count++;
+ }
+ }
+
+ if (count)
+ pr_debug("freeing pool with %d/%d outstanding pages\n",
+ count, pool->nr_pages);
+
+ for (i = 0; i < pool->nr_pages; i++) {
+ buf = &pool->bufs[i];
io_zc_rx_unmap_buf(dev, buf);
}
+
+ io_zc_rx_destroy_ifq(pool->ifq);
kvfree(pool->bufs);
kvfree(pool);
}
+static void io_zc_rx_destroy_pool(struct io_zc_rx_pool *pool)
+{
+ pool->delay_end = jiffies + HZ * 10;
+ INIT_DELAYED_WORK(&pool->destroy_work, io_zc_rx_destroy_pool_work);
+ schedule_delayed_work(&pool->destroy_work, 0);
+}
+
static struct io_zc_rx_ifq *io_zc_rx_ifq_alloc(struct io_ring_ctx *ctx)
{
struct io_zc_rx_ifq *ifq;
@@ -256,10 +296,8 @@ static void io_zc_rx_ifq_free(struct io_zc_rx_ifq *ifq)
io_close_zc_rxq(ifq);
if (ifq->pool)
io_zc_rx_destroy_pool(ifq->pool);
- if (ifq->dev)
- dev_put(ifq->dev);
- io_free_rbuf_ring(ifq);
- kfree(ifq);
+ else
+ io_zc_rx_destroy_ifq(ifq);
}
int io_register_zc_rx_ifq(struct io_ring_ctx *ctx,