Message ID | 20230324115450.11268-1-sgarzare@redhat.com (mailing list archive) |
---|---|
State | Accepted |
Commit | b465518dc27da1ed74b8cbada4659708aac35adb |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | [net] vsock/loopback: use only sk_buff_head.lock to protect the packet queue | expand |
On Fri, Mar 24, 2023 at 12:54:50PM +0100, Stefano Garzarella wrote: > pkt_list_lock was used before commit 71dc9ec9ac7d ("virtio/vsock: > replace virtio_vsock_pkt with sk_buff") to protect the packet queue. > After that commit we switched to sk_buff and we are using > sk_buff_head.lock in almost every place to protect the packet queue > except in vsock_loopback_work() when we call skb_queue_splice_init(). > > As reported by syzbot, this caused unlocked concurrent access to the > packet queue between vsock_loopback_work() and > vsock_loopback_cancel_pkt() since it is not holding pkt_list_lock. > > With the introduction of sk_buff_head, pkt_list_lock is redundant and > can cause confusion, so let's remove it and use sk_buff_head.lock > everywhere to protect the packet queue access. > > Fixes: 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") > Cc: bobby.eshleman@bytedance.com > Reported-and-tested-by: syzbot+befff0a9536049e7902e@syzkaller.appspotmail.com > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > --- > net/vmw_vsock/vsock_loopback.c | 10 ++-------- > 1 file changed, 2 insertions(+), 8 deletions(-) > > diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c > index 671e03240fc5..89905c092645 100644 > --- a/net/vmw_vsock/vsock_loopback.c > +++ b/net/vmw_vsock/vsock_loopback.c > @@ -15,7 +15,6 @@ > struct vsock_loopback { > struct workqueue_struct *workqueue; > > - spinlock_t pkt_list_lock; /* protects pkt_list */ > struct sk_buff_head pkt_queue; > struct work_struct pkt_work; > }; > @@ -32,9 +31,7 @@ static int vsock_loopback_send_pkt(struct sk_buff *skb) > struct vsock_loopback *vsock = &the_vsock_loopback; > int len = skb->len; > > - spin_lock_bh(&vsock->pkt_list_lock); > skb_queue_tail(&vsock->pkt_queue, skb); > - spin_unlock_bh(&vsock->pkt_list_lock); > > queue_work(vsock->workqueue, &vsock->pkt_work); > > @@ -113,9 +110,9 @@ static void vsock_loopback_work(struct work_struct *work) > > skb_queue_head_init(&pkts); > > - spin_lock_bh(&vsock->pkt_list_lock); > + spin_lock_bh(&vsock->pkt_queue.lock); > skb_queue_splice_init(&vsock->pkt_queue, &pkts); > - spin_unlock_bh(&vsock->pkt_list_lock); > + spin_unlock_bh(&vsock->pkt_queue.lock); > > while ((skb = __skb_dequeue(&pkts))) { > virtio_transport_deliver_tap_pkt(skb); > @@ -132,7 +129,6 @@ static int __init vsock_loopback_init(void) > if (!vsock->workqueue) > return -ENOMEM; > > - spin_lock_init(&vsock->pkt_list_lock); > skb_queue_head_init(&vsock->pkt_queue); > INIT_WORK(&vsock->pkt_work, vsock_loopback_work); > > @@ -156,9 +152,7 @@ static void __exit vsock_loopback_exit(void) > > flush_work(&vsock->pkt_work); > > - spin_lock_bh(&vsock->pkt_list_lock); > virtio_vsock_skb_queue_purge(&vsock->pkt_queue); > - spin_unlock_bh(&vsock->pkt_list_lock); > > destroy_workqueue(vsock->workqueue); > } > -- > 2.39.2 > Makes sense to me. Thanks for getting to this so fast. Best, Bobby Reviewed-by: Bobby Eshleman <bobby.eshleman@bytedance.com>
On 24.03.2023 14:54, Stefano Garzarella wrote: > pkt_list_lock was used before commit 71dc9ec9ac7d ("virtio/vsock: > replace virtio_vsock_pkt with sk_buff") to protect the packet queue. > After that commit we switched to sk_buff and we are using > sk_buff_head.lock in almost every place to protect the packet queue > except in vsock_loopback_work() when we call skb_queue_splice_init(). > > As reported by syzbot, this caused unlocked concurrent access to the > packet queue between vsock_loopback_work() and > vsock_loopback_cancel_pkt() since it is not holding pkt_list_lock. > > With the introduction of sk_buff_head, pkt_list_lock is redundant and > can cause confusion, so let's remove it and use sk_buff_head.lock > everywhere to protect the packet queue access. > > Fixes: 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") > Cc: bobby.eshleman@bytedance.com > Reported-and-tested-by: syzbot+befff0a9536049e7902e@syzkaller.appspotmail.com > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> > --- > net/vmw_vsock/vsock_loopback.c | 10 ++-------- > 1 file changed, 2 insertions(+), 8 deletions(-) Reviewed-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru> > > diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c > index 671e03240fc5..89905c092645 100644 > --- a/net/vmw_vsock/vsock_loopback.c > +++ b/net/vmw_vsock/vsock_loopback.c > @@ -15,7 +15,6 @@ > struct vsock_loopback { > struct workqueue_struct *workqueue; > > - spinlock_t pkt_list_lock; /* protects pkt_list */ > struct sk_buff_head pkt_queue; > struct work_struct pkt_work; > }; > @@ -32,9 +31,7 @@ static int vsock_loopback_send_pkt(struct sk_buff *skb) > struct vsock_loopback *vsock = &the_vsock_loopback; > int len = skb->len; > > - spin_lock_bh(&vsock->pkt_list_lock); > skb_queue_tail(&vsock->pkt_queue, skb); > - spin_unlock_bh(&vsock->pkt_list_lock); > > queue_work(vsock->workqueue, &vsock->pkt_work); > > @@ -113,9 +110,9 @@ static void vsock_loopback_work(struct work_struct *work) > > skb_queue_head_init(&pkts); > > - spin_lock_bh(&vsock->pkt_list_lock); > + spin_lock_bh(&vsock->pkt_queue.lock); > skb_queue_splice_init(&vsock->pkt_queue, &pkts); > - spin_unlock_bh(&vsock->pkt_list_lock); > + spin_unlock_bh(&vsock->pkt_queue.lock); > > while ((skb = __skb_dequeue(&pkts))) { > virtio_transport_deliver_tap_pkt(skb); > @@ -132,7 +129,6 @@ static int __init vsock_loopback_init(void) > if (!vsock->workqueue) > return -ENOMEM; > > - spin_lock_init(&vsock->pkt_list_lock); > skb_queue_head_init(&vsock->pkt_queue); > INIT_WORK(&vsock->pkt_work, vsock_loopback_work); > > @@ -156,9 +152,7 @@ static void __exit vsock_loopback_exit(void) > > flush_work(&vsock->pkt_work); > > - spin_lock_bh(&vsock->pkt_list_lock); > virtio_vsock_skb_queue_purge(&vsock->pkt_queue); > - spin_unlock_bh(&vsock->pkt_list_lock); > > destroy_workqueue(vsock->workqueue); > }
Hello: This patch was applied to netdev/net.git (main) by David S. Miller <davem@davemloft.net>: On Fri, 24 Mar 2023 12:54:50 +0100 you wrote: > pkt_list_lock was used before commit 71dc9ec9ac7d ("virtio/vsock: > replace virtio_vsock_pkt with sk_buff") to protect the packet queue. > After that commit we switched to sk_buff and we are using > sk_buff_head.lock in almost every place to protect the packet queue > except in vsock_loopback_work() when we call skb_queue_splice_init(). > > As reported by syzbot, this caused unlocked concurrent access to the > packet queue between vsock_loopback_work() and > vsock_loopback_cancel_pkt() since it is not holding pkt_list_lock. > > [...] Here is the summary with links: - [net] vsock/loopback: use only sk_buff_head.lock to protect the packet queue https://git.kernel.org/netdev/net/c/b465518dc27d You are awesome, thank you!
diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c index 671e03240fc5..89905c092645 100644 --- a/net/vmw_vsock/vsock_loopback.c +++ b/net/vmw_vsock/vsock_loopback.c @@ -15,7 +15,6 @@ struct vsock_loopback { struct workqueue_struct *workqueue; - spinlock_t pkt_list_lock; /* protects pkt_list */ struct sk_buff_head pkt_queue; struct work_struct pkt_work; }; @@ -32,9 +31,7 @@ static int vsock_loopback_send_pkt(struct sk_buff *skb) struct vsock_loopback *vsock = &the_vsock_loopback; int len = skb->len; - spin_lock_bh(&vsock->pkt_list_lock); skb_queue_tail(&vsock->pkt_queue, skb); - spin_unlock_bh(&vsock->pkt_list_lock); queue_work(vsock->workqueue, &vsock->pkt_work); @@ -113,9 +110,9 @@ static void vsock_loopback_work(struct work_struct *work) skb_queue_head_init(&pkts); - spin_lock_bh(&vsock->pkt_list_lock); + spin_lock_bh(&vsock->pkt_queue.lock); skb_queue_splice_init(&vsock->pkt_queue, &pkts); - spin_unlock_bh(&vsock->pkt_list_lock); + spin_unlock_bh(&vsock->pkt_queue.lock); while ((skb = __skb_dequeue(&pkts))) { virtio_transport_deliver_tap_pkt(skb); @@ -132,7 +129,6 @@ static int __init vsock_loopback_init(void) if (!vsock->workqueue) return -ENOMEM; - spin_lock_init(&vsock->pkt_list_lock); skb_queue_head_init(&vsock->pkt_queue); INIT_WORK(&vsock->pkt_work, vsock_loopback_work); @@ -156,9 +152,7 @@ static void __exit vsock_loopback_exit(void) flush_work(&vsock->pkt_work); - spin_lock_bh(&vsock->pkt_list_lock); virtio_vsock_skb_queue_purge(&vsock->pkt_queue); - spin_unlock_bh(&vsock->pkt_list_lock); destroy_workqueue(vsock->workqueue); }
pkt_list_lock was used before commit 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") to protect the packet queue. After that commit we switched to sk_buff and we are using sk_buff_head.lock in almost every place to protect the packet queue except in vsock_loopback_work() when we call skb_queue_splice_init(). As reported by syzbot, this caused unlocked concurrent access to the packet queue between vsock_loopback_work() and vsock_loopback_cancel_pkt() since it is not holding pkt_list_lock. With the introduction of sk_buff_head, pkt_list_lock is redundant and can cause confusion, so let's remove it and use sk_buff_head.lock everywhere to protect the packet queue access. Fixes: 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") Cc: bobby.eshleman@bytedance.com Reported-and-tested-by: syzbot+befff0a9536049e7902e@syzkaller.appspotmail.com Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> --- net/vmw_vsock/vsock_loopback.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-)