diff mbox series

[PATCHv3,1/7] uprobe: Add support for session consumer

Message ID 20240909074554.2339984-2-jolsa@kernel.org (mailing list archive)
State Superseded
Headers show
Series uprobe, bpf: Add session support | expand

Commit Message

Jiri Olsa Sept. 9, 2024, 7:45 a.m. UTC
Adding support for uprobe consumer to be defined as session and have
new behaviour for consumer's 'handler' and 'ret_handler' callbacks.

The session means that 'handler' and 'ret_handler' callbacks are
connected in a way that allows to:

  - control execution of 'ret_handler' from 'handler' callback
  - share data between 'handler' and 'ret_handler' callbacks

The session is enabled by setting new 'session' bool field to true
in uprobe_consumer object.

We use return_consumer object to keep track of consumers with
'ret_handler'. This object also carries the shared data between
'handler' and and 'ret_handler' callbacks.

The control of 'ret_handler' callback execution is done via return
value of the 'handler' callback. This patch adds new 'ret_handler'
return value (2) which means to ignore ret_handler callback.

Actions on 'handler' callback return values are now:

  0 - execute ret_handler (if it's defined)
  1 - remove uprobe
  2 - do nothing (ignore ret_handler)

The session concept fits to our common use case where we do filtering
on entry uprobe and based on the result we decide to run the return
uprobe (or not).

It's also convenient to share the data between session callbacks.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
 include/linux/uprobes.h                       |  17 ++-
 kernel/events/uprobes.c                       | 132 ++++++++++++++----
 kernel/trace/bpf_trace.c                      |   6 +-
 kernel/trace/trace_uprobe.c                   |  12 +-
 .../selftests/bpf/bpf_testmod/bpf_testmod.c   |   2 +-
 5 files changed, 133 insertions(+), 36 deletions(-)

Comments

Andrii Nakryiko Sept. 9, 2024, 11:44 p.m. UTC | #1
On Mon, Sep 9, 2024 at 12:46 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Adding support for uprobe consumer to be defined as session and have
> new behaviour for consumer's 'handler' and 'ret_handler' callbacks.
>
> The session means that 'handler' and 'ret_handler' callbacks are
> connected in a way that allows to:
>
>   - control execution of 'ret_handler' from 'handler' callback
>   - share data between 'handler' and 'ret_handler' callbacks
>
> The session is enabled by setting new 'session' bool field to true
> in uprobe_consumer object.
>
> We use return_consumer object to keep track of consumers with
> 'ret_handler'. This object also carries the shared data between
> 'handler' and and 'ret_handler' callbacks.

and and

>
> The control of 'ret_handler' callback execution is done via return
> value of the 'handler' callback. This patch adds new 'ret_handler'
> return value (2) which means to ignore ret_handler callback.
>
> Actions on 'handler' callback return values are now:
>
>   0 - execute ret_handler (if it's defined)
>   1 - remove uprobe
>   2 - do nothing (ignore ret_handler)
>
> The session concept fits to our common use case where we do filtering
> on entry uprobe and based on the result we decide to run the return
> uprobe (or not).
>
> It's also convenient to share the data between session callbacks.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---

Just minor things:

Acked-by: Andrii Nakryiko <andrii@kernel.org>

>  include/linux/uprobes.h                       |  17 ++-
>  kernel/events/uprobes.c                       | 132 ++++++++++++++----
>  kernel/trace/bpf_trace.c                      |   6 +-
>  kernel/trace/trace_uprobe.c                   |  12 +-
>  .../selftests/bpf/bpf_testmod/bpf_testmod.c   |   2 +-
>  5 files changed, 133 insertions(+), 36 deletions(-)
>

[...]

>  enum rp_check {
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 4b7e590dc428..9e971f86afdf 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -67,6 +67,8 @@ struct uprobe {
>         loff_t                  ref_ctr_offset;
>         unsigned long           flags;

we should shorten flags to unsigned int, we use one bit out of it

>
> +       unsigned int            consumers_cnt;
> +

and then this won't increase the size of the struct unnecessarily

>         /*
>          * The generic code assumes that it has two members of unknown type
>          * owned by the arch-specific code:
> @@ -826,8 +828,12 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
>

[...]

>         current->utask->auprobe = NULL;
>
> -       if (need_prep && !remove)
> -               prepare_uretprobe(uprobe, regs); /* put bp at return */
> +       if (ri && !remove)
> +               prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
> +       else
> +               kfree(ri);

maybe `else if (ri) kfree(ri)` to avoid unnecessary calls to kfree
when we only have uprobes?

>
>         if (remove && has_consumers) {
>                 down_read(&uprobe->register_rwsem);
> @@ -2160,15 +2230,25 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
>  static void
>  handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
>  {
> +       struct return_consumer *ric = NULL;
>         struct uprobe *uprobe = ri->uprobe;
>         struct uprobe_consumer *uc;
> -       int srcu_idx;
> +       int srcu_idx, iter = 0;

iter -> next_ric_idx  or just ric_idx?

>
>         srcu_idx = srcu_read_lock(&uprobes_srcu);
>         list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
>                                  srcu_read_lock_held(&uprobes_srcu)) {
> +               /*
> +                * If we don't find return consumer, it means uprobe consumer
> +                * was added after we hit uprobe and return consumer did not
> +                * get registered in which case we call the ret_handler only
> +                * if it's not session consumer.
> +                */
> +               ric = return_consumer_find(ri, &iter, uc->id);
> +               if (!ric && uc->session)
> +                       continue;
>                 if (uc->ret_handler)
> -                       uc->ret_handler(uc, ri->func, regs);
> +                       uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
>         }
>         srcu_read_unlock(&uprobes_srcu, srcu_idx);
>  }

[...]
Jiri Olsa Sept. 10, 2024, 7:17 a.m. UTC | #2
On Mon, Sep 09, 2024 at 04:44:09PM -0700, Andrii Nakryiko wrote:
> On Mon, Sep 9, 2024 at 12:46 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > Adding support for uprobe consumer to be defined as session and have
> > new behaviour for consumer's 'handler' and 'ret_handler' callbacks.
> >
> > The session means that 'handler' and 'ret_handler' callbacks are
> > connected in a way that allows to:
> >
> >   - control execution of 'ret_handler' from 'handler' callback
> >   - share data between 'handler' and 'ret_handler' callbacks
> >
> > The session is enabled by setting new 'session' bool field to true
> > in uprobe_consumer object.
> >
> > We use return_consumer object to keep track of consumers with
> > 'ret_handler'. This object also carries the shared data between
> > 'handler' and and 'ret_handler' callbacks.
> 
> and and

ok

> 
> >
> > The control of 'ret_handler' callback execution is done via return
> > value of the 'handler' callback. This patch adds new 'ret_handler'
> > return value (2) which means to ignore ret_handler callback.
> >
> > Actions on 'handler' callback return values are now:
> >
> >   0 - execute ret_handler (if it's defined)
> >   1 - remove uprobe
> >   2 - do nothing (ignore ret_handler)
> >
> > The session concept fits to our common use case where we do filtering
> > on entry uprobe and based on the result we decide to run the return
> > uprobe (or not).
> >
> > It's also convenient to share the data between session callbacks.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> 
> Just minor things:
> 
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
> 
> >  include/linux/uprobes.h                       |  17 ++-
> >  kernel/events/uprobes.c                       | 132 ++++++++++++++----
> >  kernel/trace/bpf_trace.c                      |   6 +-
> >  kernel/trace/trace_uprobe.c                   |  12 +-
> >  .../selftests/bpf/bpf_testmod/bpf_testmod.c   |   2 +-
> >  5 files changed, 133 insertions(+), 36 deletions(-)
> >
> 
> [...]
> 
> >  enum rp_check {
> > diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> > index 4b7e590dc428..9e971f86afdf 100644
> > --- a/kernel/events/uprobes.c
> > +++ b/kernel/events/uprobes.c
> > @@ -67,6 +67,8 @@ struct uprobe {
> >         loff_t                  ref_ctr_offset;
> >         unsigned long           flags;
> 
> we should shorten flags to unsigned int, we use one bit out of it
> 
> >
> > +       unsigned int            consumers_cnt;
> > +
> 
> and then this won't increase the size of the struct unnecessarily

right, makes sense

> 
> >         /*
> >          * The generic code assumes that it has two members of unknown type
> >          * owned by the arch-specific code:
> > @@ -826,8 +828,12 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
> >
> 
> [...]
> 
> >         current->utask->auprobe = NULL;
> >
> > -       if (need_prep && !remove)
> > -               prepare_uretprobe(uprobe, regs); /* put bp at return */
> > +       if (ri && !remove)
> > +               prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
> > +       else
> > +               kfree(ri);
> 
> maybe `else if (ri) kfree(ri)` to avoid unnecessary calls to kfree
> when we only have uprobes?

there's null check in kfree, but it's true that we can skip the
whole call and there's the else condition line already, ok

> 
> >
> >         if (remove && has_consumers) {
> >                 down_read(&uprobe->register_rwsem);
> > @@ -2160,15 +2230,25 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> >  static void
> >  handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
> >  {
> > +       struct return_consumer *ric = NULL;
> >         struct uprobe *uprobe = ri->uprobe;
> >         struct uprobe_consumer *uc;
> > -       int srcu_idx;
> > +       int srcu_idx, iter = 0;
> 
> iter -> next_ric_idx  or just ric_idx?

sure, ric_idx seems ok to me

thanks,
jirka

> 
> >
> >         srcu_idx = srcu_read_lock(&uprobes_srcu);
> >         list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> >                                  srcu_read_lock_held(&uprobes_srcu)) {
> > +               /*
> > +                * If we don't find return consumer, it means uprobe consumer
> > +                * was added after we hit uprobe and return consumer did not
> > +                * get registered in which case we call the ret_handler only
> > +                * if it's not session consumer.
> > +                */
> > +               ric = return_consumer_find(ri, &iter, uc->id);
> > +               if (!ric && uc->session)
> > +                       continue;
> >                 if (uc->ret_handler)
> > -                       uc->ret_handler(uc, ri->func, regs);
> > +                       uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> >         }
> >         srcu_read_unlock(&uprobes_srcu, srcu_idx);
> >  }
> 
> [...]
Masami Hiramatsu (Google) Sept. 10, 2024, 2:10 p.m. UTC | #3
On Mon,  9 Sep 2024 09:45:48 +0200
Jiri Olsa <jolsa@kernel.org> wrote:

> Adding support for uprobe consumer to be defined as session and have
> new behaviour for consumer's 'handler' and 'ret_handler' callbacks.
> 
> The session means that 'handler' and 'ret_handler' callbacks are
> connected in a way that allows to:
> 
>   - control execution of 'ret_handler' from 'handler' callback
>   - share data between 'handler' and 'ret_handler' callbacks
> 
> The session is enabled by setting new 'session' bool field to true
> in uprobe_consumer object.
> 
> We use return_consumer object to keep track of consumers with
> 'ret_handler'. This object also carries the shared data between
> 'handler' and and 'ret_handler' callbacks.
> 
> The control of 'ret_handler' callback execution is done via return
> value of the 'handler' callback. This patch adds new 'ret_handler'
> return value (2) which means to ignore ret_handler callback.
> 
> Actions on 'handler' callback return values are now:
> 
>   0 - execute ret_handler (if it's defined)
>   1 - remove uprobe
>   2 - do nothing (ignore ret_handler)
> 
> The session concept fits to our common use case where we do filtering
> on entry uprobe and based on the result we decide to run the return
> uprobe (or not).

OK, so this allows uprobes handler to record any input parameter and
access it from ret_handler as fprobe's private entry data, right?

The code looks good to me.

Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>

trace_uprobe should also support $argN in retuprobe.

https://lore.kernel.org/all/170952365552.229804.224112990211602895.stgit@devnote2/

Thank you,

> 
> It's also convenient to share the data between session callbacks.
> 
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
>  include/linux/uprobes.h                       |  17 ++-
>  kernel/events/uprobes.c                       | 132 ++++++++++++++----
>  kernel/trace/bpf_trace.c                      |   6 +-
>  kernel/trace/trace_uprobe.c                   |  12 +-
>  .../selftests/bpf/bpf_testmod/bpf_testmod.c   |   2 +-
>  5 files changed, 133 insertions(+), 36 deletions(-)
> 
> diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
> index 2b294bf1881f..557901e04624 100644
> --- a/include/linux/uprobes.h
> +++ b/include/linux/uprobes.h
> @@ -24,7 +24,7 @@ struct notifier_block;
>  struct page;
>  
>  #define UPROBE_HANDLER_REMOVE		1
> -#define UPROBE_HANDLER_MASK		1
> +#define UPROBE_HANDLER_IGNORE		2
>  
>  #define MAX_URETPROBE_DEPTH		64
>  
> @@ -37,13 +37,16 @@ struct uprobe_consumer {
>  	 * for the current process. If filter() is omitted or returns true,
>  	 * UPROBE_HANDLER_REMOVE is effectively ignored.
>  	 */
> -	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
> +	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
>  	int (*ret_handler)(struct uprobe_consumer *self,
>  				unsigned long func,
> -				struct pt_regs *regs);
> +				struct pt_regs *regs, __u64 *data);
>  	bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
>  
>  	struct list_head cons_node;
> +	bool session;
> +
> +	__u64 id;	/* set when uprobe_consumer is registered */
>  };
>  
>  #ifdef CONFIG_UPROBES
> @@ -83,14 +86,22 @@ struct uprobe_task {
>  	unsigned int			depth;
>  };
>  
> +struct return_consumer {
> +	__u64	cookie;
> +	__u64	id;
> +};
> +
>  struct return_instance {
>  	struct uprobe		*uprobe;
>  	unsigned long		func;
>  	unsigned long		stack;		/* stack pointer */
>  	unsigned long		orig_ret_vaddr; /* original return address */
>  	bool			chained;	/* true, if instance is nested */
> +	int			consumers_cnt;
>  
>  	struct return_instance	*next;		/* keep as stack */
> +
> +	struct return_consumer	consumers[] __counted_by(consumers_cnt);
>  };
>  
>  enum rp_check {
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 4b7e590dc428..9e971f86afdf 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -67,6 +67,8 @@ struct uprobe {
>  	loff_t			ref_ctr_offset;
>  	unsigned long		flags;
>  
> +	unsigned int		consumers_cnt;
> +
>  	/*
>  	 * The generic code assumes that it has two members of unknown type
>  	 * owned by the arch-specific code:
> @@ -826,8 +828,12 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
>  
>  static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
>  {
> +	static atomic64_t id;
> +
>  	down_write(&uprobe->consumer_rwsem);
>  	list_add_rcu(&uc->cons_node, &uprobe->consumers);
> +	uc->id = (__u64) atomic64_inc_return(&id);
> +	uprobe->consumers_cnt++;
>  	up_write(&uprobe->consumer_rwsem);
>  }
>  
> @@ -839,6 +845,7 @@ static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
>  {
>  	down_write(&uprobe->consumer_rwsem);
>  	list_del_rcu(&uc->cons_node);
> +	uprobe->consumers_cnt--;
>  	up_write(&uprobe->consumer_rwsem);
>  }
>  
> @@ -1786,6 +1793,30 @@ static struct uprobe_task *get_utask(void)
>  	return current->utask;
>  }
>  
> +static size_t ri_size(int consumers_cnt)
> +{
> +	struct return_instance *ri;
> +
> +	return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
> +}
> +
> +static struct return_instance *alloc_return_instance(int consumers_cnt)
> +{
> +	struct return_instance *ri;
> +
> +	ri = kzalloc(ri_size(consumers_cnt), GFP_KERNEL);
> +	if (ri)
> +		ri->consumers_cnt = consumers_cnt;
> +	return ri;
> +}
> +
> +static struct return_instance *dup_return_instance(struct return_instance *old)
> +{
> +	size_t size = ri_size(old->consumers_cnt);
> +
> +	return kmemdup(old, size, GFP_KERNEL);
> +}
> +
>  static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
>  {
>  	struct uprobe_task *n_utask;
> @@ -1798,11 +1829,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
>  
>  	p = &n_utask->return_instances;
>  	for (o = o_utask->return_instances; o; o = o->next) {
> -		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> +		n = dup_return_instance(o);
>  		if (!n)
>  			return -ENOMEM;
>  
> -		*n = *o;
>  		/*
>  		 * uprobe's refcnt has to be positive at this point, kept by
>  		 * utask->return_instances items; return_instances can't be
> @@ -1895,39 +1925,35 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
>  	utask->return_instances = ri;
>  }
>  
> -static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> +static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
> +			      struct return_instance *ri)
>  {
> -	struct return_instance *ri;
>  	struct uprobe_task *utask;
>  	unsigned long orig_ret_vaddr, trampoline_vaddr;
>  	bool chained;
>  
>  	if (!get_xol_area())
> -		return;
> +		goto free;
>  
>  	utask = get_utask();
>  	if (!utask)
> -		return;
> +		goto free;
>  
>  	if (utask->depth >= MAX_URETPROBE_DEPTH) {
>  		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
>  				" nestedness limit pid/tgid=%d/%d\n",
>  				current->pid, current->tgid);
> -		return;
> +		goto free;
>  	}
>  
>  	/* we need to bump refcount to store uprobe in utask */
>  	if (!try_get_uprobe(uprobe))
> -		return;
> -
> -	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> -	if (!ri)
> -		goto fail;
> +		goto free;
>  
>  	trampoline_vaddr = uprobe_get_trampoline_vaddr();
>  	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
>  	if (orig_ret_vaddr == -1)
> -		goto fail;
> +		goto put;
>  
>  	/* drop the entries invalidated by longjmp() */
>  	chained = (orig_ret_vaddr == trampoline_vaddr);
> @@ -1945,7 +1971,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
>  			 * attack from user-space.
>  			 */
>  			uprobe_warn(current, "handle tail call");
> -			goto fail;
> +			goto put;
>  		}
>  		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
>  	}
> @@ -1960,9 +1986,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
>  	utask->return_instances = ri;
>  
>  	return;
> -fail:
> -	kfree(ri);
> +put:
>  	put_uprobe(uprobe);
> +free:
> +	kfree(ri);
>  }
>  
>  /* Prepare to single-step probed instruction out of line. */
> @@ -2114,35 +2141,78 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
>  	return uprobe;
>  }
>  
> +static struct return_consumer *
> +return_consumer_next(struct return_instance *ri, struct return_consumer *ric)
> +{
> +	return ric ? ric + 1 : &ri->consumers[0];
> +}
> +
> +static struct return_consumer *
> +return_consumer_find(struct return_instance *ri, int *iter, int id)
> +{
> +	struct return_consumer *ric;
> +	int idx = *iter;
> +
> +	for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
> +		if (ric->id == id) {
> +			*iter = idx + 1;
> +			return ric;
> +		}
> +	}
> +	return NULL;
> +}
> +
>  static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
>  {
>  	struct uprobe_consumer *uc;
>  	int remove = UPROBE_HANDLER_REMOVE;
> -	bool need_prep = false; /* prepare return uprobe, when needed */
> +	struct return_consumer *ric = NULL;
> +	struct return_instance *ri = NULL;
>  	bool has_consumers = false;
>  
>  	current->utask->auprobe = &uprobe->arch;
>  
>  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
>  				 srcu_read_lock_held(&uprobes_srcu)) {
> +		__u64 cookie = 0;
>  		int rc = 0;
>  
>  		if (uc->handler) {
> -			rc = uc->handler(uc, regs);
> -			WARN(rc & ~UPROBE_HANDLER_MASK,
> +			rc = uc->handler(uc, regs, &cookie);
> +			WARN(rc < 0 || rc > 2,
>  				"bad rc=0x%x from %ps()\n", rc, uc->handler);
>  		}
>  
> -		if (uc->ret_handler)
> -			need_prep = true;
> -
> +		/*
> +		 * The handler can return following values:
> +		 * 0 - execute ret_handler (if it's defined)
> +		 * 1 - remove uprobe
> +		 * 2 - do nothing (ignore ret_handler)
> +		 */
>  		remove &= rc;
>  		has_consumers = true;
> +
> +		if (rc == 0 && uc->ret_handler) {
> +			/*
> +			 * Preallocate return_instance object optimistically with
> +			 * all possible consumers, so we allocate just once.
> +			 */
> +			if (!ri) {
> +				ri = alloc_return_instance(uprobe->consumers_cnt);
> +				if (!ri)
> +					return;
> +			}
> +			ric = return_consumer_next(ri, ric);
> +			ric->cookie = cookie;
> +			ric->id = uc->id;
> +		}
>  	}
>  	current->utask->auprobe = NULL;
>  
> -	if (need_prep && !remove)
> -		prepare_uretprobe(uprobe, regs); /* put bp at return */
> +	if (ri && !remove)
> +		prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
> +	else
> +		kfree(ri);
>  
>  	if (remove && has_consumers) {
>  		down_read(&uprobe->register_rwsem);
> @@ -2160,15 +2230,25 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
>  static void
>  handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
>  {
> +	struct return_consumer *ric = NULL;
>  	struct uprobe *uprobe = ri->uprobe;
>  	struct uprobe_consumer *uc;
> -	int srcu_idx;
> +	int srcu_idx, iter = 0;
>  
>  	srcu_idx = srcu_read_lock(&uprobes_srcu);
>  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
>  				 srcu_read_lock_held(&uprobes_srcu)) {
> +		/*
> +		 * If we don't find return consumer, it means uprobe consumer
> +		 * was added after we hit uprobe and return consumer did not
> +		 * get registered in which case we call the ret_handler only
> +		 * if it's not session consumer.
> +		 */
> +		ric = return_consumer_find(ri, &iter, uc->id);
> +		if (!ric && uc->session)
> +			continue;
>  		if (uc->ret_handler)
> -			uc->ret_handler(uc, ri->func, regs);
> +			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
>  	}
>  	srcu_read_unlock(&uprobes_srcu, srcu_idx);
>  }
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index ac0a01cc8634..de241503c8fb 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -3332,7 +3332,8 @@ uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
>  }
>  
>  static int
> -uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
> +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> +			  __u64 *data)
>  {
>  	struct bpf_uprobe *uprobe;
>  
> @@ -3341,7 +3342,8 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
>  }
>  
>  static int
> -uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
> +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
> +			      __u64 *data)
>  {
>  	struct bpf_uprobe *uprobe;
>  
> diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
> index f7443e996b1b..11103dde897b 100644
> --- a/kernel/trace/trace_uprobe.c
> +++ b/kernel/trace/trace_uprobe.c
> @@ -88,9 +88,11 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
>  static int register_uprobe_event(struct trace_uprobe *tu);
>  static int unregister_uprobe_event(struct trace_uprobe *tu);
>  
> -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
> +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
> +			     __u64 *data);
>  static int uretprobe_dispatcher(struct uprobe_consumer *con,
> -				unsigned long func, struct pt_regs *regs);
> +				unsigned long func, struct pt_regs *regs,
> +				__u64 *data);
>  
>  #ifdef CONFIG_STACK_GROWSUP
>  static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
> @@ -1500,7 +1502,8 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
>  	}
>  }
>  
> -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
> +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
> +			     __u64 *data)
>  {
>  	struct trace_uprobe *tu;
>  	struct uprobe_dispatch_data udd;
> @@ -1530,7 +1533,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
>  }
>  
>  static int uretprobe_dispatcher(struct uprobe_consumer *con,
> -				unsigned long func, struct pt_regs *regs)
> +				unsigned long func, struct pt_regs *regs,
> +				__u64 *data)
>  {
>  	struct trace_uprobe *tu;
>  	struct uprobe_dispatch_data udd;
> diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> index 1fc16657cf42..e91ff5b285f0 100644
> --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> @@ -421,7 +421,7 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
>  
>  static int
>  uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
> -		   struct pt_regs *regs)
> +		   struct pt_regs *regs, __u64 *data)
>  
>  {
>  	regs->ax  = 0x12345678deadbeef;
> -- 
> 2.46.0
>
Jiri Olsa Sept. 11, 2024, 11:48 a.m. UTC | #4
On Tue, Sep 10, 2024 at 11:10:23PM +0900, Masami Hiramatsu wrote:
> On Mon,  9 Sep 2024 09:45:48 +0200
> Jiri Olsa <jolsa@kernel.org> wrote:
> 
> > Adding support for uprobe consumer to be defined as session and have
> > new behaviour for consumer's 'handler' and 'ret_handler' callbacks.
> > 
> > The session means that 'handler' and 'ret_handler' callbacks are
> > connected in a way that allows to:
> > 
> >   - control execution of 'ret_handler' from 'handler' callback
> >   - share data between 'handler' and 'ret_handler' callbacks
> > 
> > The session is enabled by setting new 'session' bool field to true
> > in uprobe_consumer object.
> > 
> > We use return_consumer object to keep track of consumers with
> > 'ret_handler'. This object also carries the shared data between
> > 'handler' and and 'ret_handler' callbacks.
> > 
> > The control of 'ret_handler' callback execution is done via return
> > value of the 'handler' callback. This patch adds new 'ret_handler'
> > return value (2) which means to ignore ret_handler callback.
> > 
> > Actions on 'handler' callback return values are now:
> > 
> >   0 - execute ret_handler (if it's defined)
> >   1 - remove uprobe
> >   2 - do nothing (ignore ret_handler)
> > 
> > The session concept fits to our common use case where we do filtering
> > on entry uprobe and based on the result we decide to run the return
> > uprobe (or not).
> 
> OK, so this allows uprobes handler to record any input parameter and
> access it from ret_handler as fprobe's private entry data, right?

at the moment the shared data is 8 bytes.. I explored the way of having
the generic (configured) sized data and it complicates the code a bit
and we have no use case for that at the moment, so I decided to keep it
simple for now

I mentioned that in the cover email, I think it can be done as follow if
it's needed in future:

  - I kept the session cookie instead of introducing generic session
    data, because it makes the change much more complicated, I think
    it can be added as a followup if it's needed

> 
> The code looks good to me.
> 
> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> trace_uprobe should also support $argN in retuprobe.

could you please share more details on the use case?

thanks,
jirka

> 
> https://lore.kernel.org/all/170952365552.229804.224112990211602895.stgit@devnote2/
> 
> Thank you,
> 
> > 
> > It's also convenient to share the data between session callbacks.
> > 
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> >  include/linux/uprobes.h                       |  17 ++-
> >  kernel/events/uprobes.c                       | 132 ++++++++++++++----
> >  kernel/trace/bpf_trace.c                      |   6 +-
> >  kernel/trace/trace_uprobe.c                   |  12 +-
> >  .../selftests/bpf/bpf_testmod/bpf_testmod.c   |   2 +-
> >  5 files changed, 133 insertions(+), 36 deletions(-)
> > 
> > diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
> > index 2b294bf1881f..557901e04624 100644
> > --- a/include/linux/uprobes.h
> > +++ b/include/linux/uprobes.h
> > @@ -24,7 +24,7 @@ struct notifier_block;
> >  struct page;
> >  
> >  #define UPROBE_HANDLER_REMOVE		1
> > -#define UPROBE_HANDLER_MASK		1
> > +#define UPROBE_HANDLER_IGNORE		2
> >  
> >  #define MAX_URETPROBE_DEPTH		64
> >  
> > @@ -37,13 +37,16 @@ struct uprobe_consumer {
> >  	 * for the current process. If filter() is omitted or returns true,
> >  	 * UPROBE_HANDLER_REMOVE is effectively ignored.
> >  	 */
> > -	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
> > +	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
> >  	int (*ret_handler)(struct uprobe_consumer *self,
> >  				unsigned long func,
> > -				struct pt_regs *regs);
> > +				struct pt_regs *regs, __u64 *data);
> >  	bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
> >  
> >  	struct list_head cons_node;
> > +	bool session;
> > +
> > +	__u64 id;	/* set when uprobe_consumer is registered */
> >  };
> >  
> >  #ifdef CONFIG_UPROBES
> > @@ -83,14 +86,22 @@ struct uprobe_task {
> >  	unsigned int			depth;
> >  };
> >  
> > +struct return_consumer {
> > +	__u64	cookie;
> > +	__u64	id;
> > +};
> > +
> >  struct return_instance {
> >  	struct uprobe		*uprobe;
> >  	unsigned long		func;
> >  	unsigned long		stack;		/* stack pointer */
> >  	unsigned long		orig_ret_vaddr; /* original return address */
> >  	bool			chained;	/* true, if instance is nested */
> > +	int			consumers_cnt;
> >  
> >  	struct return_instance	*next;		/* keep as stack */
> > +
> > +	struct return_consumer	consumers[] __counted_by(consumers_cnt);
> >  };
> >  
> >  enum rp_check {
> > diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> > index 4b7e590dc428..9e971f86afdf 100644
> > --- a/kernel/events/uprobes.c
> > +++ b/kernel/events/uprobes.c
> > @@ -67,6 +67,8 @@ struct uprobe {
> >  	loff_t			ref_ctr_offset;
> >  	unsigned long		flags;
> >  
> > +	unsigned int		consumers_cnt;
> > +
> >  	/*
> >  	 * The generic code assumes that it has two members of unknown type
> >  	 * owned by the arch-specific code:
> > @@ -826,8 +828,12 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
> >  
> >  static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
> >  {
> > +	static atomic64_t id;
> > +
> >  	down_write(&uprobe->consumer_rwsem);
> >  	list_add_rcu(&uc->cons_node, &uprobe->consumers);
> > +	uc->id = (__u64) atomic64_inc_return(&id);
> > +	uprobe->consumers_cnt++;
> >  	up_write(&uprobe->consumer_rwsem);
> >  }
> >  
> > @@ -839,6 +845,7 @@ static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
> >  {
> >  	down_write(&uprobe->consumer_rwsem);
> >  	list_del_rcu(&uc->cons_node);
> > +	uprobe->consumers_cnt--;
> >  	up_write(&uprobe->consumer_rwsem);
> >  }
> >  
> > @@ -1786,6 +1793,30 @@ static struct uprobe_task *get_utask(void)
> >  	return current->utask;
> >  }
> >  
> > +static size_t ri_size(int consumers_cnt)
> > +{
> > +	struct return_instance *ri;
> > +
> > +	return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
> > +}
> > +
> > +static struct return_instance *alloc_return_instance(int consumers_cnt)
> > +{
> > +	struct return_instance *ri;
> > +
> > +	ri = kzalloc(ri_size(consumers_cnt), GFP_KERNEL);
> > +	if (ri)
> > +		ri->consumers_cnt = consumers_cnt;
> > +	return ri;
> > +}
> > +
> > +static struct return_instance *dup_return_instance(struct return_instance *old)
> > +{
> > +	size_t size = ri_size(old->consumers_cnt);
> > +
> > +	return kmemdup(old, size, GFP_KERNEL);
> > +}
> > +
> >  static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
> >  {
> >  	struct uprobe_task *n_utask;
> > @@ -1798,11 +1829,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
> >  
> >  	p = &n_utask->return_instances;
> >  	for (o = o_utask->return_instances; o; o = o->next) {
> > -		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> > +		n = dup_return_instance(o);
> >  		if (!n)
> >  			return -ENOMEM;
> >  
> > -		*n = *o;
> >  		/*
> >  		 * uprobe's refcnt has to be positive at this point, kept by
> >  		 * utask->return_instances items; return_instances can't be
> > @@ -1895,39 +1925,35 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
> >  	utask->return_instances = ri;
> >  }
> >  
> > -static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> > +static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
> > +			      struct return_instance *ri)
> >  {
> > -	struct return_instance *ri;
> >  	struct uprobe_task *utask;
> >  	unsigned long orig_ret_vaddr, trampoline_vaddr;
> >  	bool chained;
> >  
> >  	if (!get_xol_area())
> > -		return;
> > +		goto free;
> >  
> >  	utask = get_utask();
> >  	if (!utask)
> > -		return;
> > +		goto free;
> >  
> >  	if (utask->depth >= MAX_URETPROBE_DEPTH) {
> >  		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
> >  				" nestedness limit pid/tgid=%d/%d\n",
> >  				current->pid, current->tgid);
> > -		return;
> > +		goto free;
> >  	}
> >  
> >  	/* we need to bump refcount to store uprobe in utask */
> >  	if (!try_get_uprobe(uprobe))
> > -		return;
> > -
> > -	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> > -	if (!ri)
> > -		goto fail;
> > +		goto free;
> >  
> >  	trampoline_vaddr = uprobe_get_trampoline_vaddr();
> >  	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
> >  	if (orig_ret_vaddr == -1)
> > -		goto fail;
> > +		goto put;
> >  
> >  	/* drop the entries invalidated by longjmp() */
> >  	chained = (orig_ret_vaddr == trampoline_vaddr);
> > @@ -1945,7 +1971,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> >  			 * attack from user-space.
> >  			 */
> >  			uprobe_warn(current, "handle tail call");
> > -			goto fail;
> > +			goto put;
> >  		}
> >  		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
> >  	}
> > @@ -1960,9 +1986,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> >  	utask->return_instances = ri;
> >  
> >  	return;
> > -fail:
> > -	kfree(ri);
> > +put:
> >  	put_uprobe(uprobe);
> > +free:
> > +	kfree(ri);
> >  }
> >  
> >  /* Prepare to single-step probed instruction out of line. */
> > @@ -2114,35 +2141,78 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
> >  	return uprobe;
> >  }
> >  
> > +static struct return_consumer *
> > +return_consumer_next(struct return_instance *ri, struct return_consumer *ric)
> > +{
> > +	return ric ? ric + 1 : &ri->consumers[0];
> > +}
> > +
> > +static struct return_consumer *
> > +return_consumer_find(struct return_instance *ri, int *iter, int id)
> > +{
> > +	struct return_consumer *ric;
> > +	int idx = *iter;
> > +
> > +	for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
> > +		if (ric->id == id) {
> > +			*iter = idx + 1;
> > +			return ric;
> > +		}
> > +	}
> > +	return NULL;
> > +}
> > +
> >  static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> >  {
> >  	struct uprobe_consumer *uc;
> >  	int remove = UPROBE_HANDLER_REMOVE;
> > -	bool need_prep = false; /* prepare return uprobe, when needed */
> > +	struct return_consumer *ric = NULL;
> > +	struct return_instance *ri = NULL;
> >  	bool has_consumers = false;
> >  
> >  	current->utask->auprobe = &uprobe->arch;
> >  
> >  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> >  				 srcu_read_lock_held(&uprobes_srcu)) {
> > +		__u64 cookie = 0;
> >  		int rc = 0;
> >  
> >  		if (uc->handler) {
> > -			rc = uc->handler(uc, regs);
> > -			WARN(rc & ~UPROBE_HANDLER_MASK,
> > +			rc = uc->handler(uc, regs, &cookie);
> > +			WARN(rc < 0 || rc > 2,
> >  				"bad rc=0x%x from %ps()\n", rc, uc->handler);
> >  		}
> >  
> > -		if (uc->ret_handler)
> > -			need_prep = true;
> > -
> > +		/*
> > +		 * The handler can return following values:
> > +		 * 0 - execute ret_handler (if it's defined)
> > +		 * 1 - remove uprobe
> > +		 * 2 - do nothing (ignore ret_handler)
> > +		 */
> >  		remove &= rc;
> >  		has_consumers = true;
> > +
> > +		if (rc == 0 && uc->ret_handler) {
> > +			/*
> > +			 * Preallocate return_instance object optimistically with
> > +			 * all possible consumers, so we allocate just once.
> > +			 */
> > +			if (!ri) {
> > +				ri = alloc_return_instance(uprobe->consumers_cnt);
> > +				if (!ri)
> > +					return;
> > +			}
> > +			ric = return_consumer_next(ri, ric);
> > +			ric->cookie = cookie;
> > +			ric->id = uc->id;
> > +		}
> >  	}
> >  	current->utask->auprobe = NULL;
> >  
> > -	if (need_prep && !remove)
> > -		prepare_uretprobe(uprobe, regs); /* put bp at return */
> > +	if (ri && !remove)
> > +		prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
> > +	else
> > +		kfree(ri);
> >  
> >  	if (remove && has_consumers) {
> >  		down_read(&uprobe->register_rwsem);
> > @@ -2160,15 +2230,25 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> >  static void
> >  handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
> >  {
> > +	struct return_consumer *ric = NULL;
> >  	struct uprobe *uprobe = ri->uprobe;
> >  	struct uprobe_consumer *uc;
> > -	int srcu_idx;
> > +	int srcu_idx, iter = 0;
> >  
> >  	srcu_idx = srcu_read_lock(&uprobes_srcu);
> >  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> >  				 srcu_read_lock_held(&uprobes_srcu)) {
> > +		/*
> > +		 * If we don't find return consumer, it means uprobe consumer
> > +		 * was added after we hit uprobe and return consumer did not
> > +		 * get registered in which case we call the ret_handler only
> > +		 * if it's not session consumer.
> > +		 */
> > +		ric = return_consumer_find(ri, &iter, uc->id);
> > +		if (!ric && uc->session)
> > +			continue;
> >  		if (uc->ret_handler)
> > -			uc->ret_handler(uc, ri->func, regs);
> > +			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> >  	}
> >  	srcu_read_unlock(&uprobes_srcu, srcu_idx);
> >  }
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index ac0a01cc8634..de241503c8fb 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -3332,7 +3332,8 @@ uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
> >  }
> >  
> >  static int
> > -uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
> > +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> > +			  __u64 *data)
> >  {
> >  	struct bpf_uprobe *uprobe;
> >  
> > @@ -3341,7 +3342,8 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
> >  }
> >  
> >  static int
> > -uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
> > +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
> > +			      __u64 *data)
> >  {
> >  	struct bpf_uprobe *uprobe;
> >  
> > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
> > index f7443e996b1b..11103dde897b 100644
> > --- a/kernel/trace/trace_uprobe.c
> > +++ b/kernel/trace/trace_uprobe.c
> > @@ -88,9 +88,11 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
> >  static int register_uprobe_event(struct trace_uprobe *tu);
> >  static int unregister_uprobe_event(struct trace_uprobe *tu);
> >  
> > -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
> > +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
> > +			     __u64 *data);
> >  static int uretprobe_dispatcher(struct uprobe_consumer *con,
> > -				unsigned long func, struct pt_regs *regs);
> > +				unsigned long func, struct pt_regs *regs,
> > +				__u64 *data);
> >  
> >  #ifdef CONFIG_STACK_GROWSUP
> >  static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
> > @@ -1500,7 +1502,8 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
> >  	}
> >  }
> >  
> > -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
> > +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
> > +			     __u64 *data)
> >  {
> >  	struct trace_uprobe *tu;
> >  	struct uprobe_dispatch_data udd;
> > @@ -1530,7 +1533,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
> >  }
> >  
> >  static int uretprobe_dispatcher(struct uprobe_consumer *con,
> > -				unsigned long func, struct pt_regs *regs)
> > +				unsigned long func, struct pt_regs *regs,
> > +				__u64 *data)
> >  {
> >  	struct trace_uprobe *tu;
> >  	struct uprobe_dispatch_data udd;
> > diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> > index 1fc16657cf42..e91ff5b285f0 100644
> > --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> > +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> > @@ -421,7 +421,7 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
> >  
> >  static int
> >  uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
> > -		   struct pt_regs *regs)
> > +		   struct pt_regs *regs, __u64 *data)
> >  
> >  {
> >  	regs->ax  = 0x12345678deadbeef;
> > -- 
> > 2.46.0
> > 
> 
> 
> -- 
> Masami Hiramatsu (Google) <mhiramat@kernel.org>
Oleg Nesterov Sept. 12, 2024, 4:20 p.m. UTC | #5
On 09/09, Jiri Olsa wrote:
>
>  static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
>  {
>  	struct uprobe_consumer *uc;
>  	int remove = UPROBE_HANDLER_REMOVE;
> -	bool need_prep = false; /* prepare return uprobe, when needed */
> +	struct return_consumer *ric = NULL;
> +	struct return_instance *ri = NULL;
>  	bool has_consumers = false;
>
>  	current->utask->auprobe = &uprobe->arch;
>
>  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
>  				 srcu_read_lock_held(&uprobes_srcu)) {
> +		__u64 cookie = 0;
>  		int rc = 0;
>
>  		if (uc->handler) {
> -			rc = uc->handler(uc, regs);
> -			WARN(rc & ~UPROBE_HANDLER_MASK,
> +			rc = uc->handler(uc, regs, &cookie);
> +			WARN(rc < 0 || rc > 2,
>  				"bad rc=0x%x from %ps()\n", rc, uc->handler);
>  		}
>
> -		if (uc->ret_handler)
> -			need_prep = true;
> -
> +		/*
> +		 * The handler can return following values:
> +		 * 0 - execute ret_handler (if it's defined)
> +		 * 1 - remove uprobe
> +		 * 2 - do nothing (ignore ret_handler)
> +		 */
>  		remove &= rc;
>  		has_consumers = true;
> +
> +		if (rc == 0 && uc->ret_handler) {

should we enter this block if uc->handler == NULL?

> +			/*
> +			 * Preallocate return_instance object optimistically with
> +			 * all possible consumers, so we allocate just once.
> +			 */
> +			if (!ri) {
> +				ri = alloc_return_instance(uprobe->consumers_cnt);

This doesn't look right...

Suppose we have a single consumer C1, so uprobe->consumers_cnt == 1 and
alloc_return_instance() allocates return_instance with for a single consumer,
so that only ri->consumers[0] is valid.

Right after that uprobe_register()->consumer_add() adds another consumer
C2 with ->ret_handler != NULL.

On the next iteration return_consumer_next() will return the invalid addr
== &ri->consumers[1].

perhaps this needs krealloc() ?

> +				if (!ri)
> +					return;

Not sure we should simply return if kzalloc fails... at least it would be better
to clear current->utask->auprobe.

> +	if (ri && !remove)
> +		prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
> +	else
> +		kfree(ri);

Well, if ri != NULL then remove is not possible, afaics... ri != NULL means
that at least one ->handler() returned rc = 0, thus "remove" must be zero.

So it seems you can just do

	if (ri)
		prepare_uretprobe(...);


Didn't read other parts of your patch yet ;)

Oleg.
Oleg Nesterov Sept. 12, 2024, 4:35 p.m. UTC | #6
On 09/09, Jiri Olsa wrote:
>
>  handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
>  {
> +	struct return_consumer *ric = NULL;
>  	struct uprobe *uprobe = ri->uprobe;
>  	struct uprobe_consumer *uc;
> -	int srcu_idx;
> +	int srcu_idx, iter = 0;
>
>  	srcu_idx = srcu_read_lock(&uprobes_srcu);
>  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
>  				 srcu_read_lock_held(&uprobes_srcu)) {
> +		/*
> +		 * If we don't find return consumer, it means uprobe consumer
> +		 * was added after we hit uprobe and return consumer did not
> +		 * get registered in which case we call the ret_handler only
> +		 * if it's not session consumer.
> +		 */
> +		ric = return_consumer_find(ri, &iter, uc->id);
> +		if (!ric && uc->session)
> +			continue;
>  		if (uc->ret_handler)
> -			uc->ret_handler(uc, ri->func, regs);
> +			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);

So why do we need the new uc->session member and the uc->session above ?

If return_consumer_find() returns NULL, uc->ret_handler(..., NULL) can handle
this case itself?

Oleg.
Jiri Olsa Sept. 13, 2024, 8:22 a.m. UTC | #7
On Thu, Sep 12, 2024 at 06:20:29PM +0200, Oleg Nesterov wrote:
> On 09/09, Jiri Olsa wrote:
> >
> >  static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> >  {
> >  	struct uprobe_consumer *uc;
> >  	int remove = UPROBE_HANDLER_REMOVE;
> > -	bool need_prep = false; /* prepare return uprobe, when needed */
> > +	struct return_consumer *ric = NULL;
> > +	struct return_instance *ri = NULL;
> >  	bool has_consumers = false;
> >
> >  	current->utask->auprobe = &uprobe->arch;
> >
> >  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> >  				 srcu_read_lock_held(&uprobes_srcu)) {
> > +		__u64 cookie = 0;
> >  		int rc = 0;
> >
> >  		if (uc->handler) {
> > -			rc = uc->handler(uc, regs);
> > -			WARN(rc & ~UPROBE_HANDLER_MASK,
> > +			rc = uc->handler(uc, regs, &cookie);
> > +			WARN(rc < 0 || rc > 2,
> >  				"bad rc=0x%x from %ps()\n", rc, uc->handler);
> >  		}
> >
> > -		if (uc->ret_handler)
> > -			need_prep = true;
> > -
> > +		/*
> > +		 * The handler can return following values:
> > +		 * 0 - execute ret_handler (if it's defined)
> > +		 * 1 - remove uprobe
> > +		 * 2 - do nothing (ignore ret_handler)
> > +		 */
> >  		remove &= rc;
> >  		has_consumers = true;
> > +
> > +		if (rc == 0 && uc->ret_handler) {
> 
> should we enter this block if uc->handler == NULL?

yes, consumer can have just ret_handler defined

> 
> > +			/*
> > +			 * Preallocate return_instance object optimistically with
> > +			 * all possible consumers, so we allocate just once.
> > +			 */
> > +			if (!ri) {
> > +				ri = alloc_return_instance(uprobe->consumers_cnt);
> 
> This doesn't look right...
> 
> Suppose we have a single consumer C1, so uprobe->consumers_cnt == 1 and
> alloc_return_instance() allocates return_instance with for a single consumer,
> so that only ri->consumers[0] is valid.
> 
> Right after that uprobe_register()->consumer_add() adds another consumer
> C2 with ->ret_handler != NULL.
> 
> On the next iteration return_consumer_next() will return the invalid addr
> == &ri->consumers[1].
> 
> perhaps this needs krealloc() ?

damn.. there used to be a lock ;-) ok, for some reason I thought we are safe
in that list iteration and we are not.. I just made selftest that triggers that

I'm not sure the realloc will help, I feel like we need to allocate return
consumer for each called handler separately to be safe

> 
> > +				if (!ri)
> > +					return;
> 
> Not sure we should simply return if kzalloc fails... at least it would be better
> to clear current->utask->auprobe.
> 
> > +	if (ri && !remove)
> > +		prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
> > +	else
> > +		kfree(ri);
> 
> Well, if ri != NULL then remove is not possible, afaics... ri != NULL means
> that at least one ->handler() returned rc = 0, thus "remove" must be zero.
> 
> So it seems you can just do
> 
> 	if (ri)
> 		prepare_uretprobe(...);

true, I think that should be enough

thanks,
jirka

> 
> 
> Didn't read other parts of your patch yet ;)
> 
> Oleg.
>
Jiri Olsa Sept. 13, 2024, 8:36 a.m. UTC | #8
On Thu, Sep 12, 2024 at 06:35:39PM +0200, Oleg Nesterov wrote:
> On 09/09, Jiri Olsa wrote:
> >
> >  handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
> >  {
> > +	struct return_consumer *ric = NULL;
> >  	struct uprobe *uprobe = ri->uprobe;
> >  	struct uprobe_consumer *uc;
> > -	int srcu_idx;
> > +	int srcu_idx, iter = 0;
> >
> >  	srcu_idx = srcu_read_lock(&uprobes_srcu);
> >  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> >  				 srcu_read_lock_held(&uprobes_srcu)) {
> > +		/*
> > +		 * If we don't find return consumer, it means uprobe consumer
> > +		 * was added after we hit uprobe and return consumer did not
> > +		 * get registered in which case we call the ret_handler only
> > +		 * if it's not session consumer.
> > +		 */
> > +		ric = return_consumer_find(ri, &iter, uc->id);
> > +		if (!ric && uc->session)
> > +			continue;
> >  		if (uc->ret_handler)
> > -			uc->ret_handler(uc, ri->func, regs);
> > +			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> 
> So why do we need the new uc->session member and the uc->session above ?
> 
> If return_consumer_find() returns NULL, uc->ret_handler(..., NULL) can handle
> this case itself?

I tried to explain that in the comment above.. we do not want to
execute session ret_handler at all in this case, because its entry
counterpart did not run

jirka

> 
> Oleg.
>
Oleg Nesterov Sept. 13, 2024, 9:32 a.m. UTC | #9
On 09/13, Jiri Olsa wrote:
>
> On Thu, Sep 12, 2024 at 06:35:39PM +0200, Oleg Nesterov wrote:
> > >  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> > >  				 srcu_read_lock_held(&uprobes_srcu)) {
> > > +		/*
> > > +		 * If we don't find return consumer, it means uprobe consumer
> > > +		 * was added after we hit uprobe and return consumer did not
> > > +		 * get registered in which case we call the ret_handler only
> > > +		 * if it's not session consumer.
> > > +		 */
> > > +		ric = return_consumer_find(ri, &iter, uc->id);
> > > +		if (!ric && uc->session)
> > > +			continue;
> > >  		if (uc->ret_handler)
> > > -			uc->ret_handler(uc, ri->func, regs);
> > > +			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> >
> > So why do we need the new uc->session member and the uc->session above ?
> >
> > If return_consumer_find() returns NULL, uc->ret_handler(..., NULL) can handle
> > this case itself?
>
> I tried to explain that in the comment above.. we do not want to
> execute session ret_handler at all in this case, because its entry
> counterpart did not run

I understand, but the session ret_handler(..., __u64 *data) can simply do

	// my ->handler() didn't run or it didn't return 0
	if (!data)
		return;

at the start?

Oleg.
Oleg Nesterov Sept. 13, 2024, 10:07 a.m. UTC | #10
On 09/13, Jiri Olsa wrote:
>
> On Thu, Sep 12, 2024 at 06:20:29PM +0200, Oleg Nesterov wrote:
> > > +
> > > +		if (rc == 0 && uc->ret_handler) {
> >
> > should we enter this block if uc->handler == NULL?
>
> yes, consumer can have just ret_handler defined

Sorry, I meant we do not need to push { cookie, id } into return_instance
if uc->handler == NULL.

And in fact I'd prefer (but won't insist) the new

	UPROBE_HANDLER_I_WANT_MY_COOKIE_PLEASE

return code to make this logic more explicit.

Oleg.
Jiri Olsa Sept. 13, 2024, 10:17 a.m. UTC | #11
On Fri, Sep 13, 2024 at 11:32:01AM +0200, Oleg Nesterov wrote:
> On 09/13, Jiri Olsa wrote:
> >
> > On Thu, Sep 12, 2024 at 06:35:39PM +0200, Oleg Nesterov wrote:
> > > >  	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> > > >  				 srcu_read_lock_held(&uprobes_srcu)) {
> > > > +		/*
> > > > +		 * If we don't find return consumer, it means uprobe consumer
> > > > +		 * was added after we hit uprobe and return consumer did not
> > > > +		 * get registered in which case we call the ret_handler only
> > > > +		 * if it's not session consumer.
> > > > +		 */
> > > > +		ric = return_consumer_find(ri, &iter, uc->id);
> > > > +		if (!ric && uc->session)
> > > > +			continue;
> > > >  		if (uc->ret_handler)
> > > > -			uc->ret_handler(uc, ri->func, regs);
> > > > +			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> > >
> > > So why do we need the new uc->session member and the uc->session above ?
> > >
> > > If return_consumer_find() returns NULL, uc->ret_handler(..., NULL) can handle
> > > this case itself?
> >
> > I tried to explain that in the comment above.. we do not want to
> > execute session ret_handler at all in this case, because its entry
> > counterpart did not run
> 
> I understand, but the session ret_handler(..., __u64 *data) can simply do
> 
> 	// my ->handler() didn't run or it didn't return 0
> 	if (!data)
> 		return;
> 
> at the start?

I see, that's actualy the only usage of the 'session' flag, so we could
get rid of it and we'd do above check in uprobe_multi layer.. good idea

thanks,
jirka
Oleg Nesterov Sept. 13, 2024, 10:57 a.m. UTC | #12
On 09/13, Jiri Olsa wrote:
>
> I'm not sure the realloc will help, I feel like we need to allocate return
> consumer for each called handler separately to be safe

How about something like the (pseudo) code below? Note that this way
we do not need uprobe->consumers_cnt. Note also that krealloc() should
be unlikely and it checks ksize() before it does another allocation.

Oleg.

static size_t ri_size(int consumers_cnt)
{
	return sizeof(struct return_instance) +
		      sizeof(struct return_consumer) * consumers_cnt;
}

#define DEF_CNT	4	// arbitrary value

static struct return_instance *alloc_return_instance(void)
{
	struct return_instance *ri;

	ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL);
	if (!ri)
		return ZERO_SIZE_PTR;

	ri->consumers_cnt = DEF_CNT;
	return ri;
}

static struct return_instance *push_id_cookie(struct return_instance *ri, int idx,
						__u64 id, __u64 cookie)
{
	if (unlikely(ri == ZERO_SIZE_PTR))
		return ri;

	if (unlikely(idx >= ri->consumers_cnt)) {
		ri->consumers_cnt += DEF_CNT;
		ri = krealloc(ri, ri_size(ri->consumers_cnt), GFP_KERNEL);
		if (!ri) {
			kfree(ri);
			return ZERO_SIZE_PTR;
		}
	}

	ri->consumers[idx].id = id;
	ri->consumers[idx].cookie = cookie;
	return ri;
}

static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
{
	...
	struct return_instance *ri = NULL;
	int push_idx = 0;

	list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
		__u64 cookie = 0;
		int rc = 0;

		if (uc->handler)
			rc = uc->handler(uc, regs, &cookie);

		remove &= rc;
		has_consumers = true;

		if (!uc->ret_handler || rc == UPROBE_HANDLER_REMOVE || rc == 2)
			continue;

		if (!ri)
			ri = alloc_return_instance();

		// or, better if (rc = UPROBE_HANDLER_I_WANT_MY_COOKIE)
		if (uc->handler))
			ri = push_id_cookie(ri, push_idx++, uc->id, cookie);
	}

	if (!ZERO_OR_NULL_PTR(ri)) {
		ri->consumers_cnt = push_idx;
		prepare_uretprobe(uprobe, regs, ri);
	}

	...
}
Jiri Olsa Sept. 13, 2024, 11:34 a.m. UTC | #13
On Fri, Sep 13, 2024 at 12:57:51PM +0200, Oleg Nesterov wrote:
> On 09/13, Jiri Olsa wrote:
> >
> > I'm not sure the realloc will help, I feel like we need to allocate return
> > consumer for each called handler separately to be safe
> 
> How about something like the (pseudo) code below? Note that this way
> we do not need uprobe->consumers_cnt. Note also that krealloc() should
> be unlikely and it checks ksize() before it does another allocation.
> 
> Oleg.
> 
> static size_t ri_size(int consumers_cnt)
> {
> 	return sizeof(struct return_instance) +
> 		      sizeof(struct return_consumer) * consumers_cnt;
> }
> 
> #define DEF_CNT	4	// arbitrary value
> 
> static struct return_instance *alloc_return_instance(void)
> {
> 	struct return_instance *ri;
> 
> 	ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL);
> 	if (!ri)
> 		return ZERO_SIZE_PTR;
> 
> 	ri->consumers_cnt = DEF_CNT;
> 	return ri;
> }
> 
> static struct return_instance *push_id_cookie(struct return_instance *ri, int idx,
> 						__u64 id, __u64 cookie)
> {
> 	if (unlikely(ri == ZERO_SIZE_PTR))
> 		return ri;
> 
> 	if (unlikely(idx >= ri->consumers_cnt)) {
> 		ri->consumers_cnt += DEF_CNT;
> 		ri = krealloc(ri, ri_size(ri->consumers_cnt), GFP_KERNEL);
> 		if (!ri) {
> 			kfree(ri);
> 			return ZERO_SIZE_PTR;
> 		}
> 	}
> 
> 	ri->consumers[idx].id = id;
> 	ri->consumers[idx].cookie = cookie;
> 	return ri;
> }
> 
> static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> {
> 	...
> 	struct return_instance *ri = NULL;
> 	int push_idx = 0;
> 
> 	list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
> 		__u64 cookie = 0;
> 		int rc = 0;
> 
> 		if (uc->handler)
> 			rc = uc->handler(uc, regs, &cookie);
> 
> 		remove &= rc;
> 		has_consumers = true;
> 
> 		if (!uc->ret_handler || rc == UPROBE_HANDLER_REMOVE || rc == 2)
> 			continue;
> 
> 		if (!ri)
> 			ri = alloc_return_instance();
> 
> 		// or, better if (rc = UPROBE_HANDLER_I_WANT_MY_COOKIE)
> 		if (uc->handler))
> 			ri = push_id_cookie(ri, push_idx++, uc->id, cookie);
> 	}
> 
> 	if (!ZERO_OR_NULL_PTR(ri)) {

should we rather bail out right after we fail to allocate ri above?

> 		ri->consumers_cnt = push_idx;
> 		prepare_uretprobe(uprobe, regs, ri);
> 	}
> 
> 	...
> }
> 

nice, I like that, will try to to plug it with the rest

thanks,
jirka
Oleg Nesterov Sept. 13, 2024, 11:41 a.m. UTC | #14
On 09/13, Jiri Olsa wrote:
>
> On Fri, Sep 13, 2024 at 12:57:51PM +0200, Oleg Nesterov wrote:
>
> > static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> > {
> > 	...
> > 	struct return_instance *ri = NULL;
> > 	int push_idx = 0;
> >
> > 	list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
> > 		__u64 cookie = 0;
> > 		int rc = 0;
> >
> > 		if (uc->handler)
> > 			rc = uc->handler(uc, regs, &cookie);
> >
> > 		remove &= rc;
> > 		has_consumers = true;
> >
> > 		if (!uc->ret_handler || rc == UPROBE_HANDLER_REMOVE || rc == 2)
> > 			continue;
> >
> > 		if (!ri)
> > 			ri = alloc_return_instance();
> >
> > 		// or, better if (rc = UPROBE_HANDLER_I_WANT_MY_COOKIE)
> > 		if (uc->handler))
> > 			ri = push_id_cookie(ri, push_idx++, uc->id, cookie);
> > 	}
> >
> > 	if (!ZERO_OR_NULL_PTR(ri)) {
>
> should we rather bail out right after we fail to allocate ri above?

I think handler_chain() should call all the ->handler's even if
kzalloc/krealloc fails.

This is close to what the current code does, all the ->handler's are
called even if then later prepare_uretprobe()->kmalloc() fails.

Oleg.
Oleg Nesterov Sept. 13, 2024, 11:52 a.m. UTC | #15
On 09/09, Jiri Olsa wrote:
>
> @@ -37,13 +37,16 @@ struct uprobe_consumer {
>  	 * for the current process. If filter() is omitted or returns true,
>  	 * UPROBE_HANDLER_REMOVE is effectively ignored.
>  	 */
> -	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
> +	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
>  	int (*ret_handler)(struct uprobe_consumer *self,
>  				unsigned long func,
> -				struct pt_regs *regs);
> +				struct pt_regs *regs, __u64 *data);

And... I won't insist, but I'd suggest to do this in a separate patch
which should also update the current users in bpf_trace.c, trace_uprobe.c
and bpf_testmod.c.

Then it would be easier to review the next "functional" change. But this
is minor, feel free to ignore.


Finally, imo this documentation in handler_chain()

		/*
		 * The handler can return following values:
		 * 0 - execute ret_handler (if it's defined)
		 * 1 - remove uprobe
		 * 2 - do nothing (ignore ret_handler)
		 */

should be moved to uprobes.h and explain UPROBE_HANDLER_REMOVE/IGNORE there.

And note that "remove uprobe" is misleading, it should say something
like "remove the breakpoint from current->mm".

Oleg.
diff mbox series

Patch

diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 2b294bf1881f..557901e04624 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -24,7 +24,7 @@  struct notifier_block;
 struct page;
 
 #define UPROBE_HANDLER_REMOVE		1
-#define UPROBE_HANDLER_MASK		1
+#define UPROBE_HANDLER_IGNORE		2
 
 #define MAX_URETPROBE_DEPTH		64
 
@@ -37,13 +37,16 @@  struct uprobe_consumer {
 	 * for the current process. If filter() is omitted or returns true,
 	 * UPROBE_HANDLER_REMOVE is effectively ignored.
 	 */
-	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
+	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
 	int (*ret_handler)(struct uprobe_consumer *self,
 				unsigned long func,
-				struct pt_regs *regs);
+				struct pt_regs *regs, __u64 *data);
 	bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
 
 	struct list_head cons_node;
+	bool session;
+
+	__u64 id;	/* set when uprobe_consumer is registered */
 };
 
 #ifdef CONFIG_UPROBES
@@ -83,14 +86,22 @@  struct uprobe_task {
 	unsigned int			depth;
 };
 
+struct return_consumer {
+	__u64	cookie;
+	__u64	id;
+};
+
 struct return_instance {
 	struct uprobe		*uprobe;
 	unsigned long		func;
 	unsigned long		stack;		/* stack pointer */
 	unsigned long		orig_ret_vaddr; /* original return address */
 	bool			chained;	/* true, if instance is nested */
+	int			consumers_cnt;
 
 	struct return_instance	*next;		/* keep as stack */
+
+	struct return_consumer	consumers[] __counted_by(consumers_cnt);
 };
 
 enum rp_check {
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4b7e590dc428..9e971f86afdf 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -67,6 +67,8 @@  struct uprobe {
 	loff_t			ref_ctr_offset;
 	unsigned long		flags;
 
+	unsigned int		consumers_cnt;
+
 	/*
 	 * The generic code assumes that it has two members of unknown type
 	 * owned by the arch-specific code:
@@ -826,8 +828,12 @@  static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
 
 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
+	static atomic64_t id;
+
 	down_write(&uprobe->consumer_rwsem);
 	list_add_rcu(&uc->cons_node, &uprobe->consumers);
+	uc->id = (__u64) atomic64_inc_return(&id);
+	uprobe->consumers_cnt++;
 	up_write(&uprobe->consumer_rwsem);
 }
 
@@ -839,6 +845,7 @@  static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
 	down_write(&uprobe->consumer_rwsem);
 	list_del_rcu(&uc->cons_node);
+	uprobe->consumers_cnt--;
 	up_write(&uprobe->consumer_rwsem);
 }
 
@@ -1786,6 +1793,30 @@  static struct uprobe_task *get_utask(void)
 	return current->utask;
 }
 
+static size_t ri_size(int consumers_cnt)
+{
+	struct return_instance *ri;
+
+	return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
+}
+
+static struct return_instance *alloc_return_instance(int consumers_cnt)
+{
+	struct return_instance *ri;
+
+	ri = kzalloc(ri_size(consumers_cnt), GFP_KERNEL);
+	if (ri)
+		ri->consumers_cnt = consumers_cnt;
+	return ri;
+}
+
+static struct return_instance *dup_return_instance(struct return_instance *old)
+{
+	size_t size = ri_size(old->consumers_cnt);
+
+	return kmemdup(old, size, GFP_KERNEL);
+}
+
 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
 {
 	struct uprobe_task *n_utask;
@@ -1798,11 +1829,10 @@  static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
 
 	p = &n_utask->return_instances;
 	for (o = o_utask->return_instances; o; o = o->next) {
-		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
+		n = dup_return_instance(o);
 		if (!n)
 			return -ENOMEM;
 
-		*n = *o;
 		/*
 		 * uprobe's refcnt has to be positive at this point, kept by
 		 * utask->return_instances items; return_instances can't be
@@ -1895,39 +1925,35 @@  static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
 	utask->return_instances = ri;
 }
 
-static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
+static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
+			      struct return_instance *ri)
 {
-	struct return_instance *ri;
 	struct uprobe_task *utask;
 	unsigned long orig_ret_vaddr, trampoline_vaddr;
 	bool chained;
 
 	if (!get_xol_area())
-		return;
+		goto free;
 
 	utask = get_utask();
 	if (!utask)
-		return;
+		goto free;
 
 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
 				" nestedness limit pid/tgid=%d/%d\n",
 				current->pid, current->tgid);
-		return;
+		goto free;
 	}
 
 	/* we need to bump refcount to store uprobe in utask */
 	if (!try_get_uprobe(uprobe))
-		return;
-
-	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
-	if (!ri)
-		goto fail;
+		goto free;
 
 	trampoline_vaddr = uprobe_get_trampoline_vaddr();
 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
 	if (orig_ret_vaddr == -1)
-		goto fail;
+		goto put;
 
 	/* drop the entries invalidated by longjmp() */
 	chained = (orig_ret_vaddr == trampoline_vaddr);
@@ -1945,7 +1971,7 @@  static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
 			 * attack from user-space.
 			 */
 			uprobe_warn(current, "handle tail call");
-			goto fail;
+			goto put;
 		}
 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
 	}
@@ -1960,9 +1986,10 @@  static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
 	utask->return_instances = ri;
 
 	return;
-fail:
-	kfree(ri);
+put:
 	put_uprobe(uprobe);
+free:
+	kfree(ri);
 }
 
 /* Prepare to single-step probed instruction out of line. */
@@ -2114,35 +2141,78 @@  static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
 	return uprobe;
 }
 
+static struct return_consumer *
+return_consumer_next(struct return_instance *ri, struct return_consumer *ric)
+{
+	return ric ? ric + 1 : &ri->consumers[0];
+}
+
+static struct return_consumer *
+return_consumer_find(struct return_instance *ri, int *iter, int id)
+{
+	struct return_consumer *ric;
+	int idx = *iter;
+
+	for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
+		if (ric->id == id) {
+			*iter = idx + 1;
+			return ric;
+		}
+	}
+	return NULL;
+}
+
 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
 {
 	struct uprobe_consumer *uc;
 	int remove = UPROBE_HANDLER_REMOVE;
-	bool need_prep = false; /* prepare return uprobe, when needed */
+	struct return_consumer *ric = NULL;
+	struct return_instance *ri = NULL;
 	bool has_consumers = false;
 
 	current->utask->auprobe = &uprobe->arch;
 
 	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
 				 srcu_read_lock_held(&uprobes_srcu)) {
+		__u64 cookie = 0;
 		int rc = 0;
 
 		if (uc->handler) {
-			rc = uc->handler(uc, regs);
-			WARN(rc & ~UPROBE_HANDLER_MASK,
+			rc = uc->handler(uc, regs, &cookie);
+			WARN(rc < 0 || rc > 2,
 				"bad rc=0x%x from %ps()\n", rc, uc->handler);
 		}
 
-		if (uc->ret_handler)
-			need_prep = true;
-
+		/*
+		 * The handler can return following values:
+		 * 0 - execute ret_handler (if it's defined)
+		 * 1 - remove uprobe
+		 * 2 - do nothing (ignore ret_handler)
+		 */
 		remove &= rc;
 		has_consumers = true;
+
+		if (rc == 0 && uc->ret_handler) {
+			/*
+			 * Preallocate return_instance object optimistically with
+			 * all possible consumers, so we allocate just once.
+			 */
+			if (!ri) {
+				ri = alloc_return_instance(uprobe->consumers_cnt);
+				if (!ri)
+					return;
+			}
+			ric = return_consumer_next(ri, ric);
+			ric->cookie = cookie;
+			ric->id = uc->id;
+		}
 	}
 	current->utask->auprobe = NULL;
 
-	if (need_prep && !remove)
-		prepare_uretprobe(uprobe, regs); /* put bp at return */
+	if (ri && !remove)
+		prepare_uretprobe(uprobe, regs, ri); /* put bp at return */
+	else
+		kfree(ri);
 
 	if (remove && has_consumers) {
 		down_read(&uprobe->register_rwsem);
@@ -2160,15 +2230,25 @@  static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
 static void
 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
 {
+	struct return_consumer *ric = NULL;
 	struct uprobe *uprobe = ri->uprobe;
 	struct uprobe_consumer *uc;
-	int srcu_idx;
+	int srcu_idx, iter = 0;
 
 	srcu_idx = srcu_read_lock(&uprobes_srcu);
 	list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
 				 srcu_read_lock_held(&uprobes_srcu)) {
+		/*
+		 * If we don't find return consumer, it means uprobe consumer
+		 * was added after we hit uprobe and return consumer did not
+		 * get registered in which case we call the ret_handler only
+		 * if it's not session consumer.
+		 */
+		ric = return_consumer_find(ri, &iter, uc->id);
+		if (!ric && uc->session)
+			continue;
 		if (uc->ret_handler)
-			uc->ret_handler(uc, ri->func, regs);
+			uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
 	}
 	srcu_read_unlock(&uprobes_srcu, srcu_idx);
 }
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ac0a01cc8634..de241503c8fb 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3332,7 +3332,8 @@  uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
 }
 
 static int
-uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
+uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
+			  __u64 *data)
 {
 	struct bpf_uprobe *uprobe;
 
@@ -3341,7 +3342,8 @@  uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
 }
 
 static int
-uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
+uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
+			      __u64 *data)
 {
 	struct bpf_uprobe *uprobe;
 
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index f7443e996b1b..11103dde897b 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -88,9 +88,11 @@  static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
 static int register_uprobe_event(struct trace_uprobe *tu);
 static int unregister_uprobe_event(struct trace_uprobe *tu);
 
-static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
+static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
+			     __u64 *data);
 static int uretprobe_dispatcher(struct uprobe_consumer *con,
-				unsigned long func, struct pt_regs *regs);
+				unsigned long func, struct pt_regs *regs,
+				__u64 *data);
 
 #ifdef CONFIG_STACK_GROWSUP
 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
@@ -1500,7 +1502,8 @@  trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
 	}
 }
 
-static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
+			     __u64 *data)
 {
 	struct trace_uprobe *tu;
 	struct uprobe_dispatch_data udd;
@@ -1530,7 +1533,8 @@  static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
 }
 
 static int uretprobe_dispatcher(struct uprobe_consumer *con,
-				unsigned long func, struct pt_regs *regs)
+				unsigned long func, struct pt_regs *regs,
+				__u64 *data)
 {
 	struct trace_uprobe *tu;
 	struct uprobe_dispatch_data udd;
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 1fc16657cf42..e91ff5b285f0 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -421,7 +421,7 @@  static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
 
 static int
 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
-		   struct pt_regs *regs)
+		   struct pt_regs *regs, __u64 *data)
 
 {
 	regs->ax  = 0x12345678deadbeef;