@@ -12,6 +12,10 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ /* Current fragment in 'frags' of skb. */
+ u32 curr_frag;
+ /* Offset from 0 in current fragment. */
+ u32 frag_off;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -246,4 +250,7 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
+int virtio_transport_nl_skb_to_iov(struct sk_buff *skb,
+ struct iov_iter *iov_iter, size_t len,
+ bool peek);
#endif /* _LINUX_VIRTIO_VSOCK_H */
@@ -337,6 +337,60 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
return virtio_transport_send_pkt_info(vsk, &info);
}
+int virtio_transport_nl_skb_to_iov(struct sk_buff *skb,
+ struct iov_iter *iov_iter,
+ size_t len,
+ bool peek)
+{
+ unsigned int skb_len;
+ size_t rest_len = len;
+ int curr_frag;
+ int curr_offs;
+ int err = 0;
+
+ skb_len = skb->len;
+ curr_frag = VIRTIO_VSOCK_SKB_CB(skb)->curr_frag;
+ curr_offs = VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
+
+ while (rest_len && skb->len) {
+ struct bio_vec *curr_vec;
+ size_t curr_vec_end;
+ size_t to_copy;
+ void *data;
+
+ curr_vec = &skb_shinfo(skb)->frags[curr_frag];
+ curr_vec_end = curr_vec->bv_offset + curr_vec->bv_len;
+ to_copy = min(rest_len, (size_t)(curr_vec_end - curr_offs));
+ data = kmap_local_page(curr_vec->bv_page);
+
+ if (copy_to_iter(data + curr_offs, to_copy, iov_iter) != to_copy)
+ err = -EFAULT;
+
+ kunmap_local(data);
+
+ if (err)
+ break;
+
+ rest_len -= to_copy;
+ skb_len -= to_copy;
+ curr_offs += to_copy;
+
+ if (curr_offs == (curr_vec_end)) {
+ curr_frag++;
+ curr_offs = 0;
+ }
+ }
+
+ if (!peek) {
+ skb->len = skb_len;
+ VIRTIO_VSOCK_SKB_CB(skb)->curr_frag = curr_frag;
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off = curr_offs;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_nl_skb_to_iov);
+
static ssize_t
virtio_transport_stream_do_peek(struct vsock_sock *vsk,
struct msghdr *msg,
@@ -365,7 +419,14 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data + off, bytes);
+ if (skb_is_nonlinear(skb)) {
+ err = virtio_transport_nl_skb_to_iov(skb,
+ &msg->msg_iter,
+ bytes,
+ true);
+ } else {
+ err = memcpy_to_msg(msg, skb->data + off, bytes);
+ }
if (err)
goto out;
@@ -417,14 +478,22 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes);
+ if (skb_is_nonlinear(skb)) {
+ err = virtio_transport_nl_skb_to_iov(skb, &msg->msg_iter,
+ bytes, false);
+ } else {
+ err = memcpy_to_msg(msg, skb->data, bytes);
+ }
+
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
- skb_pull(skb, bytes);
+
+ if (!skb_is_nonlinear(skb))
+ skb_pull(skb, bytes);
if (skb->len == 0) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
@@ -498,7 +567,14 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ if (skb_is_nonlinear(skb)) {
+ err = virtio_transport_nl_skb_to_iov(skb,
+ &msg->msg_iter,
+ bytes_to_copy,
+ false);
+ } else {
+ err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ }
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
This is preparation patch for non-linear skbuff handling. It does two things: 1) Handles freeing of non-linear skbuffs. 2) Adds copying from non-linear skbuffs to user's buffer. Signed-off-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru> --- include/linux/virtio_vsock.h | 7 +++ net/vmw_vsock/virtio_transport_common.c | 84 +++++++++++++++++++++++-- 2 files changed, 87 insertions(+), 4 deletions(-)