diff mbox series

[v4,2/4] libbpf: ringbuf: allow to consume up to a certain amount of items

Message ID 20240406092005.92399-3-andrea.righi@canonical.com (mailing list archive)
State Accepted
Commit 13e8125a22763557d719db996f70c71f77c9509c
Headers show
Series libbpf: API to partially consume items from ringbuffer | expand

Commit Message

Andrea Righi April 6, 2024, 9:15 a.m. UTC
In some cases, instead of always consuming all items from ring buffers
in a greedy way, we may want to consume up to a certain amount of items,
for example when we need to copy items from the BPF ring buffer to a
limited user buffer.

This change allows to set an upper limit to the amount of items consumed
from one or more ring buffers.

Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
---
 tools/lib/bpf/ringbuf.c | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

Comments

Andrii Nakryiko April 6, 2024, 5:41 p.m. UTC | #1
On Sat, Apr 6, 2024 at 2:20 AM Andrea Righi <andrea.righi@canonical.com> wrote:
>
> In some cases, instead of always consuming all items from ring buffers
> in a greedy way, we may want to consume up to a certain amount of items,
> for example when we need to copy items from the BPF ring buffer to a
> limited user buffer.
>
> This change allows to set an upper limit to the amount of items consumed
> from one or more ring buffers.
>
> Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
> ---
>  tools/lib/bpf/ringbuf.c | 23 +++++++++++++++--------
>  1 file changed, 15 insertions(+), 8 deletions(-)
>
> diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
> index aacb64278a01..2c4031168413 100644
> --- a/tools/lib/bpf/ringbuf.c
> +++ b/tools/lib/bpf/ringbuf.c
> @@ -231,7 +231,7 @@ static inline int roundup_len(__u32 len)
>         return (len + 7) / 8 * 8;
>  }
>
> -static int64_t ringbuf_process_ring(struct ring *r)
> +static int64_t ringbuf_process_ring(struct ring *r, size_t n)
>  {
>         int *len_ptr, len, err;
>         /* 64-bit to avoid overflow in case of extreme application behavior */
> @@ -268,6 +268,9 @@ static int64_t ringbuf_process_ring(struct ring *r)
>                         }
>
>                         smp_store_release(r->consumer_pos, cons_pos);
> +
> +                       if (cnt >= n)
> +                               goto done;
>                 }
>         } while (got_new_data);
>  done:
> @@ -287,13 +290,15 @@ int ring_buffer__consume(struct ring_buffer *rb)
>         for (i = 0; i < rb->ring_cnt; i++) {
>                 struct ring *ring = rb->rings[i];
>
> -               err = ringbuf_process_ring(ring);
> +               err = ringbuf_process_ring(ring, INT_MAX);
>                 if (err < 0)
>                         return libbpf_err(err);
>                 res += err;
> +               if (res > INT_MAX) {
> +                       res = INT_MAX;
> +                       break;
> +               }
>         }
> -       if (res > INT_MAX)
> -               return INT_MAX;

the idea here was to avoid returning overflown int, not really to stop
at INT_MAX samples. So I kept this part intact (res is int64_t, so no
overflow inside the loop can happen)


>         return res;
>  }
>
> @@ -314,13 +319,15 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
>                 __u32 ring_id = rb->events[i].data.fd;
>                 struct ring *ring = rb->rings[ring_id];
>
> -               err = ringbuf_process_ring(ring);
> +               err = ringbuf_process_ring(ring, INT_MAX);
>                 if (err < 0)
>                         return libbpf_err(err);
>                 res += err;
> +               if (res > INT_MAX) {
> +                       res = INT_MAX;
> +                       break;
> +               }
>         }
> -       if (res > INT_MAX)
> -               return INT_MAX;
>         return res;
>  }
>
> @@ -375,7 +382,7 @@ int ring__consume(struct ring *r)
>  {
>         int64_t res;
>
> -       res = ringbuf_process_ring(r);
> +       res = ringbuf_process_ring(r, INT_MAX);
>         if (res < 0)
>                 return libbpf_err(res);
>
> --
> 2.43.0
>
diff mbox series

Patch

diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index aacb64278a01..2c4031168413 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -231,7 +231,7 @@  static inline int roundup_len(__u32 len)
 	return (len + 7) / 8 * 8;
 }
 
-static int64_t ringbuf_process_ring(struct ring *r)
+static int64_t ringbuf_process_ring(struct ring *r, size_t n)
 {
 	int *len_ptr, len, err;
 	/* 64-bit to avoid overflow in case of extreme application behavior */
@@ -268,6 +268,9 @@  static int64_t ringbuf_process_ring(struct ring *r)
 			}
 
 			smp_store_release(r->consumer_pos, cons_pos);
+
+			if (cnt >= n)
+				goto done;
 		}
 	} while (got_new_data);
 done:
@@ -287,13 +290,15 @@  int ring_buffer__consume(struct ring_buffer *rb)
 	for (i = 0; i < rb->ring_cnt; i++) {
 		struct ring *ring = rb->rings[i];
 
-		err = ringbuf_process_ring(ring);
+		err = ringbuf_process_ring(ring, INT_MAX);
 		if (err < 0)
 			return libbpf_err(err);
 		res += err;
+		if (res > INT_MAX) {
+			res = INT_MAX;
+			break;
+		}
 	}
-	if (res > INT_MAX)
-		return INT_MAX;
 	return res;
 }
 
@@ -314,13 +319,15 @@  int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
 		__u32 ring_id = rb->events[i].data.fd;
 		struct ring *ring = rb->rings[ring_id];
 
-		err = ringbuf_process_ring(ring);
+		err = ringbuf_process_ring(ring, INT_MAX);
 		if (err < 0)
 			return libbpf_err(err);
 		res += err;
+		if (res > INT_MAX) {
+			res = INT_MAX;
+			break;
+		}
 	}
-	if (res > INT_MAX)
-		return INT_MAX;
 	return res;
 }
 
@@ -375,7 +382,7 @@  int ring__consume(struct ring *r)
 {
 	int64_t res;
 
-	res = ringbuf_process_ring(r);
+	res = ringbuf_process_ring(r, INT_MAX);
 	if (res < 0)
 		return libbpf_err(res);