@@ -161,6 +161,8 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
+ struct work_struct destruct_work;
+ struct tls_context *ctx;
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
@@ -46,10 +46,8 @@
*/
static DECLARE_RWSEM(device_offload_lock);
-static void tls_device_gc_task(struct work_struct *work);
+static struct workqueue_struct *destruct_wq __read_mostly;
-static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
-static LIST_HEAD(tls_device_gc_list);
static LIST_HEAD(tls_device_list);
static LIST_HEAD(tls_device_down_list);
static DEFINE_SPINLOCK(tls_device_lock);
@@ -68,29 +66,17 @@ static void tls_device_free_ctx(struct tls_context *ctx)
tls_ctx_free(NULL, ctx);
}
-static void tls_device_gc_task(struct work_struct *work)
+static void tls_device_tx_del_task(struct work_struct *work)
{
- struct tls_context *ctx, *tmp;
- unsigned long flags;
- LIST_HEAD(gc_list);
-
- spin_lock_irqsave(&tls_device_lock, flags);
- list_splice_init(&tls_device_gc_list, &gc_list);
- spin_unlock_irqrestore(&tls_device_lock, flags);
-
- list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
- struct net_device *netdev = ctx->netdev;
+ struct tls_offload_context_tx *offload_ctx =
+ container_of(work, struct tls_offload_context_tx, destruct_work);
+ struct tls_context *ctx = offload_ctx->ctx;
+ struct net_device *netdev = ctx->netdev;
- if (netdev && ctx->tx_conf == TLS_HW) {
- netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
- TLS_OFFLOAD_CTX_DIR_TX);
- dev_put(netdev);
- ctx->netdev = NULL;
- }
-
- list_del(&ctx->list);
- tls_device_free_ctx(ctx);
- }
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ dev_put(netdev);
+ ctx->netdev = NULL;
+ tls_device_free_ctx(ctx);
}
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
@@ -104,16 +90,15 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
return;
}
+ list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW;
if (async_cleanup) {
- list_move_tail(&ctx->list, &tls_device_gc_list);
+ struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
- /* schedule_work inside the spinlock
+ /* queue_work inside the spinlock
* to make sure tls_device_down waits for that work.
*/
- schedule_work(&tls_device_gc_work);
- } else {
- list_del(&ctx->list);
+ queue_work(destruct_wq, &offload_ctx->destruct_work);
}
spin_unlock_irqrestore(&tls_device_lock, flags);
@@ -1160,6 +1145,9 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
start_marker_record->len = 0;
start_marker_record->num_frags = 0;
+ INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
+ offload_ctx->ctx = ctx;
+
INIT_LIST_HEAD(&offload_ctx->records_list);
list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
spin_lock_init(&offload_ctx->lock);
@@ -1399,7 +1387,7 @@ static int tls_device_down(struct net_device *netdev)
up_write(&device_offload_lock);
- flush_work(&tls_device_gc_work);
+ flush_workqueue(destruct_wq);
return NOTIFY_DONE;
}
@@ -1440,12 +1428,23 @@ static struct notifier_block tls_dev_notifier = {
int __init tls_device_init(void)
{
- return register_netdevice_notifier(&tls_dev_notifier);
+ int err;
+
+ destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
+ if (!destruct_wq)
+ return -ENOMEM;
+
+ err = register_netdevice_notifier(&tls_dev_notifier);
+ if (err)
+ destroy_workqueue(destruct_wq);
+
+ return err;
}
void __exit tls_device_cleanup(void)
{
unregister_netdevice_notifier(&tls_dev_notifier);
- flush_work(&tls_device_gc_work);
+ flush_workqueue(destruct_wq);
+ destroy_workqueue(destruct_wq);
clean_acked_data_flush();
}