diff mbox series

[V6,7/8] io_uring/uring_cmd: support provide group kernel buffer

Message ID 20240912104933.1875409-8-ming.lei@redhat.com (mailing list archive)
State New
Headers show
Series io_uring: support sqe group and provide group kbuf | expand

Commit Message

Ming Lei Sept. 12, 2024, 10:49 a.m. UTC
Allow uring command to be group leader for providing kernel buffer,
and this way can support generic device zero copy over device buffer.

The following patch will use the way to support zero copy for ublk.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 include/linux/io_uring/cmd.h  |  7 +++++++
 include/uapi/linux/io_uring.h |  7 ++++++-
 io_uring/uring_cmd.c          | 28 ++++++++++++++++++++++++++++
 3 files changed, 41 insertions(+), 1 deletion(-)

Comments

Pavel Begunkov Oct. 4, 2024, 3:44 p.m. UTC | #1
On 9/12/24 11:49, Ming Lei wrote:
> Allow uring command to be group leader for providing kernel buffer,
> and this way can support generic device zero copy over device buffer.
> 
> The following patch will use the way to support zero copy for ublk.
> 
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>   include/linux/io_uring/cmd.h  |  7 +++++++
>   include/uapi/linux/io_uring.h |  7 ++++++-
>   io_uring/uring_cmd.c          | 28 ++++++++++++++++++++++++++++
>   3 files changed, 41 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
> index 447fbfd32215..fde3a2ec7d9a 100644
> --- a/include/linux/io_uring/cmd.h
> +++ b/include/linux/io_uring/cmd.h
> @@ -48,6 +48,8 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
>   void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
>   		unsigned int issue_flags);
>   
> +int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
> +		const struct io_uring_kernel_buf *grp_kbuf);
>   #else
>   static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
>   			      struct iov_iter *iter, void *ioucmd)
> @@ -67,6 +69,11 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
>   		unsigned int issue_flags)
>   {
>   }
> +static inline int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
> +		const struct io_uring_kernel_buf *grp_kbuf)
> +{
> +	return -EOPNOTSUPP;
> +}
>   #endif
>   
>   /*
> diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
> index 2af32745ebd3..11985eeac10e 100644
> --- a/include/uapi/linux/io_uring.h
> +++ b/include/uapi/linux/io_uring.h
> @@ -271,9 +271,14 @@ enum io_uring_op {
>    * sqe->uring_cmd_flags		top 8bits aren't available for userspace
>    * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
>    *				along with setting sqe->buf_index.
> + * IORING_PROVIDE_GROUP_KBUF	this command provides group kernel buffer
> + *				for member requests which can retrieve
> + *				any sub-buffer with offset(sqe->addr) and
> + *				len(sqe->len)

Is there a good reason it needs to be a cmd generic flag instead of
ublk specific?

1. Extra overhead for files / cmds that don't even care about the
feature.

2. As it stands with this patch, the flag is ignored by all other
cmd implementations, which might be quite confusing as an api,
especially so since if we don't set that REQ_F_GROUP_KBUF memeber
requests will silently try to import a buffer the "normal way",
i.e. interpret sqe->addr or such as the target buffer.

3. We can't even put some nice semantics on top since it's
still cmd specific and not generic to all other io_uring
requests.

I'd even think that it'd make sense to implement it as a
new cmd opcode, but that's the business of the file implementing
it, i.e. ublk.

>    */
>   #define IORING_URING_CMD_FIXED	(1U << 0)
> -#define IORING_URING_CMD_MASK	IORING_URING_CMD_FIXED
> +#define IORING_PROVIDE_GROUP_KBUF	(1U << 1)
> +#define IORING_URING_CMD_MASK	(IORING_URING_CMD_FIXED | IORING_PROVIDE_GROUP_KBUF)
>   
>   
>   /*
Ming Lei Oct. 6, 2024, 8:46 a.m. UTC | #2
On Fri, Oct 04, 2024 at 04:44:54PM +0100, Pavel Begunkov wrote:
> On 9/12/24 11:49, Ming Lei wrote:
> > Allow uring command to be group leader for providing kernel buffer,
> > and this way can support generic device zero copy over device buffer.
> > 
> > The following patch will use the way to support zero copy for ublk.
> > 
> > Signed-off-by: Ming Lei <ming.lei@redhat.com>
> > ---
> >   include/linux/io_uring/cmd.h  |  7 +++++++
> >   include/uapi/linux/io_uring.h |  7 ++++++-
> >   io_uring/uring_cmd.c          | 28 ++++++++++++++++++++++++++++
> >   3 files changed, 41 insertions(+), 1 deletion(-)
> > 
> > diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
> > index 447fbfd32215..fde3a2ec7d9a 100644
> > --- a/include/linux/io_uring/cmd.h
> > +++ b/include/linux/io_uring/cmd.h
> > @@ -48,6 +48,8 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
> >   void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> >   		unsigned int issue_flags);
> > +int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
> > +		const struct io_uring_kernel_buf *grp_kbuf);
> >   #else
> >   static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
> >   			      struct iov_iter *iter, void *ioucmd)
> > @@ -67,6 +69,11 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> >   		unsigned int issue_flags)
> >   {
> >   }
> > +static inline int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
> > +		const struct io_uring_kernel_buf *grp_kbuf)
> > +{
> > +	return -EOPNOTSUPP;
> > +}
> >   #endif
> >   /*
> > diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
> > index 2af32745ebd3..11985eeac10e 100644
> > --- a/include/uapi/linux/io_uring.h
> > +++ b/include/uapi/linux/io_uring.h
> > @@ -271,9 +271,14 @@ enum io_uring_op {
> >    * sqe->uring_cmd_flags		top 8bits aren't available for userspace
> >    * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
> >    *				along with setting sqe->buf_index.
> > + * IORING_PROVIDE_GROUP_KBUF	this command provides group kernel buffer
> > + *				for member requests which can retrieve
> > + *				any sub-buffer with offset(sqe->addr) and
> > + *				len(sqe->len)
> 
> Is there a good reason it needs to be a cmd generic flag instead of
> ublk specific?

io_uring request isn't visible for drivers, so driver can't know if the
uring command is one group leader.

Another way is to add new API of io_uring_cmd_may_provide_buffer(ioucmd)
so driver can check if device buffer can be provided with this uring_cmd,
but I prefer to the new uring_cmd flag:

- IORING_PROVIDE_GROUP_KBUF can provide device buffer in generic way.
- ->prep() can fail fast in case that it isn't one group request

> 
> 1. Extra overhead for files / cmds that don't even care about the
> feature.

It is just checking ioucmd->flags in ->prep(), and basically zero cost.

> 
> 2. As it stands with this patch, the flag is ignored by all other
> cmd implementations, which might be quite confusing as an api,
> especially so since if we don't set that REQ_F_GROUP_KBUF memeber
> requests will silently try to import a buffer the "normal way",

The usage is same with buffer select or fixed buffer, and consumer
has to check the flag.

And same with IORING_URING_CMD_FIXED which is ignored by other
implementations except for nvme, :-)

I can understand the concern, but it exits since uring cmd is born.

> i.e. interpret sqe->addr or such as the target buffer.

> 3. We can't even put some nice semantics on top since it's
> still cmd specific and not generic to all other io_uring
> requests.
> 
> I'd even think that it'd make sense to implement it as a
> new cmd opcode, but that's the business of the file implementing
> it, i.e. ublk.
> 
> >    */
> >   #define IORING_URING_CMD_FIXED	(1U << 0)
> > -#define IORING_URING_CMD_MASK	IORING_URING_CMD_FIXED
> > +#define IORING_PROVIDE_GROUP_KBUF	(1U << 1)
> > +#define IORING_URING_CMD_MASK	(IORING_URING_CMD_FIXED | IORING_PROVIDE_GROUP_KBUF)

It needs one new file operation, and we shouldn't work toward
this way.



Thanks,
Ming
Pavel Begunkov Oct. 9, 2024, 3:14 p.m. UTC | #3
On 10/6/24 09:46, Ming Lei wrote:
> On Fri, Oct 04, 2024 at 04:44:54PM +0100, Pavel Begunkov wrote:
>> On 9/12/24 11:49, Ming Lei wrote:
>>> Allow uring command to be group leader for providing kernel buffer,
>>> and this way can support generic device zero copy over device buffer.
>>>
>>> The following patch will use the way to support zero copy for ublk.
>>>
>>> Signed-off-by: Ming Lei <ming.lei@redhat.com>
>>> ---
>>>    include/linux/io_uring/cmd.h  |  7 +++++++
>>>    include/uapi/linux/io_uring.h |  7 ++++++-
>>>    io_uring/uring_cmd.c          | 28 ++++++++++++++++++++++++++++
>>>    3 files changed, 41 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
>>> index 447fbfd32215..fde3a2ec7d9a 100644
>>> --- a/include/linux/io_uring/cmd.h
>>> +++ b/include/linux/io_uring/cmd.h
>>> @@ -48,6 +48,8 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
>>>    void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
>>>    		unsigned int issue_flags);
>>> +int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
>>> +		const struct io_uring_kernel_buf *grp_kbuf);
>>>    #else
>>>    static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
>>>    			      struct iov_iter *iter, void *ioucmd)
>>> @@ -67,6 +69,11 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
>>>    		unsigned int issue_flags)
>>>    {
>>>    }
>>> +static inline int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
>>> +		const struct io_uring_kernel_buf *grp_kbuf)
>>> +{
>>> +	return -EOPNOTSUPP;
>>> +}
>>>    #endif
>>>    /*
>>> diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
>>> index 2af32745ebd3..11985eeac10e 100644
>>> --- a/include/uapi/linux/io_uring.h
>>> +++ b/include/uapi/linux/io_uring.h
>>> @@ -271,9 +271,14 @@ enum io_uring_op {
>>>     * sqe->uring_cmd_flags		top 8bits aren't available for userspace
>>>     * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
>>>     *				along with setting sqe->buf_index.
>>> + * IORING_PROVIDE_GROUP_KBUF	this command provides group kernel buffer
>>> + *				for member requests which can retrieve
>>> + *				any sub-buffer with offset(sqe->addr) and
>>> + *				len(sqe->len)
>>
>> Is there a good reason it needs to be a cmd generic flag instead of
>> ublk specific?
> 
> io_uring request isn't visible for drivers, so driver can't know if the
> uring command is one group leader.

btw, does it have to be in a group at all? Sure, nobody would be
able to consume the buffer, but otherwise should be fine.

> Another way is to add new API of io_uring_cmd_may_provide_buffer(ioucmd)

The checks can be done inside of io_uring_cmd_provide_kbuf()

> so driver can check if device buffer can be provided with this uring_cmd,
> but I prefer to the new uring_cmd flag:
> 
> - IORING_PROVIDE_GROUP_KBUF can provide device buffer in generic way.

Ok, could be.

> - ->prep() can fail fast in case that it isn't one group request

I don't believe that matters, a behaving user should never
see that kind of failure.


>> 1. Extra overhead for files / cmds that don't even care about the
>> feature.
> 
> It is just checking ioucmd->flags in ->prep(), and basically zero cost.

It's not if we add extra code for each every feature, at
which point it becomes a maze of such "ifs".

>> 2. As it stands with this patch, the flag is ignored by all other
>> cmd implementations, which might be quite confusing as an api,
>> especially so since if we don't set that REQ_F_GROUP_KBUF memeber
>> requests will silently try to import a buffer the "normal way",
> 
> The usage is same with buffer select or fixed buffer, and consumer
> has to check the flag.

We fails requests when it's asked to use the feature but
those are not supported, at least non-cmd requests.

> And same with IORING_URING_CMD_FIXED which is ignored by other
> implementations except for nvme, :-)

Oh, that's bad. If you'd try to implement the flag in the
future it might break the uapi. It might be worth to patch it
up on the ublk side, i.e. reject the flag, + backport, and hope
nobody tried to use them together, hmm?

> I can understand the concern, but it exits since uring cmd is born.
> 
>> i.e. interpret sqe->addr or such as the target buffer.
> 
>> 3. We can't even put some nice semantics on top since it's
>> still cmd specific and not generic to all other io_uring
>> requests.
>>
>> I'd even think that it'd make sense to implement it as a
>> new cmd opcode, but that's the business of the file implementing
>> it, i.e. ublk.
>>
>>>     */
>>>    #define IORING_URING_CMD_FIXED	(1U << 0)
>>> -#define IORING_URING_CMD_MASK	IORING_URING_CMD_FIXED
>>> +#define IORING_PROVIDE_GROUP_KBUF	(1U << 1)
>>> +#define IORING_URING_CMD_MASK	(IORING_URING_CMD_FIXED | IORING_PROVIDE_GROUP_KBUF)
> 
> It needs one new file operation, and we shouldn't work toward
> this way.

Not a new io_uring request, I rather meant sqe->cmd_op,
like UBLK_U_IO_FETCH_REQ_PROVIDER_BUFFER.
Ming Lei Oct. 10, 2024, 3:28 a.m. UTC | #4
On Wed, Oct 09, 2024 at 04:14:33PM +0100, Pavel Begunkov wrote:
> On 10/6/24 09:46, Ming Lei wrote:
> > On Fri, Oct 04, 2024 at 04:44:54PM +0100, Pavel Begunkov wrote:
> > > On 9/12/24 11:49, Ming Lei wrote:
> > > > Allow uring command to be group leader for providing kernel buffer,
> > > > and this way can support generic device zero copy over device buffer.
> > > > 
> > > > The following patch will use the way to support zero copy for ublk.
> > > > 
> > > > Signed-off-by: Ming Lei <ming.lei@redhat.com>
> > > > ---
> > > >    include/linux/io_uring/cmd.h  |  7 +++++++
> > > >    include/uapi/linux/io_uring.h |  7 ++++++-
> > > >    io_uring/uring_cmd.c          | 28 ++++++++++++++++++++++++++++
> > > >    3 files changed, 41 insertions(+), 1 deletion(-)
> > > > 
> > > > diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
> > > > index 447fbfd32215..fde3a2ec7d9a 100644
> > > > --- a/include/linux/io_uring/cmd.h
> > > > +++ b/include/linux/io_uring/cmd.h
> > > > @@ -48,6 +48,8 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
> > > >    void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> > > >    		unsigned int issue_flags);
> > > > +int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
> > > > +		const struct io_uring_kernel_buf *grp_kbuf);
> > > >    #else
> > > >    static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
> > > >    			      struct iov_iter *iter, void *ioucmd)
> > > > @@ -67,6 +69,11 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> > > >    		unsigned int issue_flags)
> > > >    {
> > > >    }
> > > > +static inline int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
> > > > +		const struct io_uring_kernel_buf *grp_kbuf)
> > > > +{
> > > > +	return -EOPNOTSUPP;
> > > > +}
> > > >    #endif
> > > >    /*
> > > > diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
> > > > index 2af32745ebd3..11985eeac10e 100644
> > > > --- a/include/uapi/linux/io_uring.h
> > > > +++ b/include/uapi/linux/io_uring.h
> > > > @@ -271,9 +271,14 @@ enum io_uring_op {
> > > >     * sqe->uring_cmd_flags		top 8bits aren't available for userspace
> > > >     * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
> > > >     *				along with setting sqe->buf_index.
> > > > + * IORING_PROVIDE_GROUP_KBUF	this command provides group kernel buffer
> > > > + *				for member requests which can retrieve
> > > > + *				any sub-buffer with offset(sqe->addr) and
> > > > + *				len(sqe->len)
> > > 
> > > Is there a good reason it needs to be a cmd generic flag instead of
> > > ublk specific?
> > 
> > io_uring request isn't visible for drivers, so driver can't know if the
> > uring command is one group leader.
> 
> btw, does it have to be in a group at all? Sure, nobody would be
> able to consume the buffer, but otherwise should be fine.
> 
> > Another way is to add new API of io_uring_cmd_may_provide_buffer(ioucmd)
> 
> The checks can be done inside of io_uring_cmd_provide_kbuf()

Yeah.

Now the difference is just that:

- user may know it explicitly(UAPI flag) or implicitly(driver's ->cmd_op),
- if driver knows this uring_cmd is one group leader

I am fine with either way.

> 
> > so driver can check if device buffer can be provided with this uring_cmd,
> > but I prefer to the new uring_cmd flag:
> > 
> > - IORING_PROVIDE_GROUP_KBUF can provide device buffer in generic way.
> 
> Ok, could be.
> 
> > - ->prep() can fail fast in case that it isn't one group request
> 
> I don't believe that matters, a behaving user should never
> see that kind of failure.
> 
> 
> > > 1. Extra overhead for files / cmds that don't even care about the
> > > feature.
> > 
> > It is just checking ioucmd->flags in ->prep(), and basically zero cost.
> 
> It's not if we add extra code for each every feature, at
> which point it becomes a maze of such "ifs".

Yeah, I guess it can't be avoided in current uring_cmd design, which
serves for different subsystems now, and more in future.

And the situation is similar with ioctl.

> 
> > > 2. As it stands with this patch, the flag is ignored by all other
> > > cmd implementations, which might be quite confusing as an api,
> > > especially so since if we don't set that REQ_F_GROUP_KBUF memeber
> > > requests will silently try to import a buffer the "normal way",
> > 
> > The usage is same with buffer select or fixed buffer, and consumer
> > has to check the flag.
> 
> We fails requests when it's asked to use the feature but
> those are not supported, at least non-cmd requests.
> 
> > And same with IORING_URING_CMD_FIXED which is ignored by other
> > implementations except for nvme, :-)
> 
> Oh, that's bad. If you'd try to implement the flag in the
> future it might break the uapi. It might be worth to patch it
> up on the ublk side, i.e. reject the flag, + backport, and hope
> nobody tried to use them together, hmm?
> 
> > I can understand the concern, but it exits since uring cmd is born.
> > 
> > > i.e. interpret sqe->addr or such as the target buffer.
> > 
> > > 3. We can't even put some nice semantics on top since it's
> > > still cmd specific and not generic to all other io_uring
> > > requests.
> > > 
> > > I'd even think that it'd make sense to implement it as a
> > > new cmd opcode, but that's the business of the file implementing
> > > it, i.e. ublk.
> > > 
> > > >     */
> > > >    #define IORING_URING_CMD_FIXED	(1U << 0)
> > > > -#define IORING_URING_CMD_MASK	IORING_URING_CMD_FIXED
> > > > +#define IORING_PROVIDE_GROUP_KBUF	(1U << 1)
> > > > +#define IORING_URING_CMD_MASK	(IORING_URING_CMD_FIXED | IORING_PROVIDE_GROUP_KBUF)
> > 
> > It needs one new file operation, and we shouldn't work toward
> > this way.
> 
> Not a new io_uring request, I rather meant sqe->cmd_op,
> like UBLK_U_IO_FETCH_REQ_PROVIDER_BUFFER.

`cmd_op` is supposed to be defined by subsystems, but maybe we can
reserve some for generic uring_cmd. Anyway this shouldn't be one big
deal, we can do that in future if there are more such uses.


Thanks,
Ming
Pavel Begunkov Oct. 10, 2024, 3:48 p.m. UTC | #5
On 10/10/24 04:28, Ming Lei wrote:
> On Wed, Oct 09, 2024 at 04:14:33PM +0100, Pavel Begunkov wrote:
>> On 10/6/24 09:46, Ming Lei wrote:
>>> On Fri, Oct 04, 2024 at 04:44:54PM +0100, Pavel Begunkov wrote:
>>>> On 9/12/24 11:49, Ming Lei wrote:
...
>>> so driver can check if device buffer can be provided with this uring_cmd,
>>> but I prefer to the new uring_cmd flag:
>>>
>>> - IORING_PROVIDE_GROUP_KBUF can provide device buffer in generic way.
>>
>> Ok, could be.
>>
>>> - ->prep() can fail fast in case that it isn't one group request
>>
>> I don't believe that matters, a behaving user should never
>> see that kind of failure.
>>
>>
>>>> 1. Extra overhead for files / cmds that don't even care about the
>>>> feature.
>>>
>>> It is just checking ioucmd->flags in ->prep(), and basically zero cost.
>>
>> It's not if we add extra code for each every feature, at
>> which point it becomes a maze of such "ifs".
> 
> Yeah, I guess it can't be avoided in current uring_cmd design, which

If can't only if we keep putting all custom / some specific
command features into the common path. And, for example, I
just named how this one could be avoided.

The real question is whether we deem that buffer providing
feature applicable widely enough so that it could be useful
to many potential command implementations and therefore is
worth of partially handling it generically in the common path.

> serves for different subsystems now, and more in future.
> 
> And the situation is similar with ioctl.

Well, commands look too much as ioctl for my taste, but even
then I naively hope it can avoid regressing to it.

>>>> 2. As it stands with this patch, the flag is ignored by all other
>>>> cmd implementations, which might be quite confusing as an api,
>>>> especially so since if we don't set that REQ_F_GROUP_KBUF memeber
>>>> requests will silently try to import a buffer the "normal way",
>>>
>>> The usage is same with buffer select or fixed buffer, and consumer
>>> has to check the flag.
>>
>> We fails requests when it's asked to use the feature but
>> those are not supported, at least non-cmd requests.
>>
>>> And same with IORING_URING_CMD_FIXED which is ignored by other
>>> implementations except for nvme, :-)
>>
>> Oh, that's bad. If you'd try to implement the flag in the
>> future it might break the uapi. It might be worth to patch it
>> up on the ublk side, i.e. reject the flag, + backport, and hope
>> nobody tried to use them together, hmm?
>>
>>> I can understand the concern, but it exits since uring cmd is born.
>>>
>>>> i.e. interpret sqe->addr or such as the target buffer.
>>>
>>>> 3. We can't even put some nice semantics on top since it's
>>>> still cmd specific and not generic to all other io_uring
>>>> requests.
>>>>
>>>> I'd even think that it'd make sense to implement it as a
>>>> new cmd opcode, but that's the business of the file implementing
>>>> it, i.e. ublk.
>>>>
>>>>>      */
>>>>>     #define IORING_URING_CMD_FIXED	(1U << 0)
>>>>> -#define IORING_URING_CMD_MASK	IORING_URING_CMD_FIXED
>>>>> +#define IORING_PROVIDE_GROUP_KBUF	(1U << 1)
>>>>> +#define IORING_URING_CMD_MASK	(IORING_URING_CMD_FIXED | IORING_PROVIDE_GROUP_KBUF)
>>>
>>> It needs one new file operation, and we shouldn't work toward
>>> this way.
>>
>> Not a new io_uring request, I rather meant sqe->cmd_op,
>> like UBLK_U_IO_FETCH_REQ_PROVIDER_BUFFER.
> 
> `cmd_op` is supposed to be defined by subsystems, but maybe we can
> reserve some for generic uring_cmd. Anyway this shouldn't be one big
> deal, we can do that in future if there are more such uses.

That's if the generic handling is desired, which isn't much
different from a flag, otherwise it can be just a new random
file specific cmd opcode as any other.
Jens Axboe Oct. 10, 2024, 7:31 p.m. UTC | #6
Hi,

Discussed this with Pavel, and on his suggestion, I tried prototyping a
"buffer update" opcode. Basically it works like
IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
registration. But it works as an sqe rather than being a sync opcode.

The idea here is that you could do that upfront, or as part of a chain,
and have it be generically available, just like any other buffer that
was registered upfront. You do need an empty table registered first,
which can just be sparse. And since you can pick the slot it goes into,
you can rely on that slot afterwards (either as a link, or just the
following sqe).

Quick'n dirty obviously, but I did write a quick test case too to verify
that:

1) It actually works (it seems to)
2) It's not too slow (it seems not to be, I can get ~2.5M updates per
   second in a vm on my laptop, which isn't too bad).

Not saying this is perfect, but perhaps it's worth entertaining an idea
like that? It has the added benefit of being persistent across system
calls as well, unless you do another IORING_OP_BUF_UPDATE at the end of
your chain to re-set it.

Comments? Could it be useful for this?

diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 86cb385fe0b5..02d4b66267ef 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -259,6 +259,7 @@ enum io_uring_op {
 	IORING_OP_FTRUNCATE,
 	IORING_OP_BIND,
 	IORING_OP_LISTEN,
+	IORING_OP_BUF_UPDATE,
 
 	/* this goes last, obviously */
 	IORING_OP_LAST,
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index a2be3bbca5ff..cda35d22397d 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -515,6 +515,10 @@ const struct io_issue_def io_issue_defs[] = {
 		.prep			= io_eopnotsupp_prep,
 #endif
 	},
+	[IORING_OP_BUF_UPDATE] = {
+		.prep			= io_buf_update_prep,
+		.issue			= io_buf_update,
+	},
 };
 
 const struct io_cold_def io_cold_defs[] = {
@@ -742,6 +746,9 @@ const struct io_cold_def io_cold_defs[] = {
 	[IORING_OP_LISTEN] = {
 		.name			= "LISTEN",
 	},
+	[IORING_OP_BUF_UPDATE] = {
+		.name			= "BUF_UPDATE",
+	},
 };
 
 const char *io_uring_get_opcode(u8 opcode)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 33a3d156a85b..6f0071733018 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1236,3 +1236,44 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
 		fput(file);
 	return ret;
 }
+
+struct io_buf_update {
+	struct file *file;
+	struct io_uring_rsrc_update2 up;
+};
+
+int io_buf_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	struct io_buf_update *ibu = io_kiocb_to_cmd(req, struct io_buf_update);
+	struct io_uring_rsrc_update2 __user *uaddr;
+
+	if (!req->ctx->buf_data)
+		return -ENXIO;
+	if (sqe->ioprio || sqe->fd || sqe->addr2 || sqe->rw_flags ||
+	    sqe->splice_fd_in)
+		return -EINVAL;
+	if (sqe->len != 1)
+		return -EINVAL;
+
+	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+	if (copy_from_user(&ibu->up, uaddr, sizeof(*uaddr)))
+		return -EFAULT;
+
+	return 0;
+}
+
+int io_buf_update(struct io_kiocb *req, unsigned int issue_flags)
+{
+	struct io_buf_update *ibu = io_kiocb_to_cmd(req, struct io_buf_update);
+	struct io_ring_ctx *ctx = req->ctx;
+	int ret;
+
+	io_ring_submit_lock(ctx, issue_flags);
+	ret = __io_register_rsrc_update(ctx, IORING_RSRC_BUFFER, &ibu->up, ibu->up.nr);
+	io_ring_submit_unlock(ctx, issue_flags);
+
+	if (ret < 0)
+		req_set_fail(req);
+	io_req_set_res(req, ret, 0);
+	return 0;
+}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 8ed588036210..d41e75c956ef 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -142,4 +142,7 @@ static inline void __io_unaccount_mem(struct user_struct *user,
 	atomic_long_sub(nr_pages, &user->locked_vm);
 }
 
+int io_buf_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_buf_update(struct io_kiocb *req, unsigned int issue_flags);
+
 #endif
Ming Lei Oct. 11, 2024, 2:30 a.m. UTC | #7
Hi Jens,

On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
> Hi,
> 
> Discussed this with Pavel, and on his suggestion, I tried prototyping a
> "buffer update" opcode. Basically it works like
> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
> registration. But it works as an sqe rather than being a sync opcode.
> 
> The idea here is that you could do that upfront, or as part of a chain,
> and have it be generically available, just like any other buffer that
> was registered upfront. You do need an empty table registered first,
> which can just be sparse. And since you can pick the slot it goes into,
> you can rely on that slot afterwards (either as a link, or just the
> following sqe).
> 
> Quick'n dirty obviously, but I did write a quick test case too to verify
> that:
> 
> 1) It actually works (it seems to)

It doesn't work for ublk zc since ublk needs to provide one kernel buffer
for fs rw & net send/recv to consume, and the kernel buffer is invisible
to userspace. But  __io_register_rsrc_update() only can register userspace
buffer.

Also multiple OPs may consume the buffer concurrently, which can't be
supported by buffer select.


thanks, 
Ming
Jens Axboe Oct. 11, 2024, 2:39 a.m. UTC | #8
On 10/10/24 8:30 PM, Ming Lei wrote:
> Hi Jens,
> 
> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
>> Hi,
>>
>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
>> "buffer update" opcode. Basically it works like
>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
>> registration. But it works as an sqe rather than being a sync opcode.
>>
>> The idea here is that you could do that upfront, or as part of a chain,
>> and have it be generically available, just like any other buffer that
>> was registered upfront. You do need an empty table registered first,
>> which can just be sparse. And since you can pick the slot it goes into,
>> you can rely on that slot afterwards (either as a link, or just the
>> following sqe).
>>
>> Quick'n dirty obviously, but I did write a quick test case too to verify
>> that:
>>
>> 1) It actually works (it seems to)
> 
> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
> for fs rw & net send/recv to consume, and the kernel buffer is invisible
> to userspace. But  __io_register_rsrc_update() only can register userspace
> buffer.

I'd be surprised if this simple one was enough! In terms of user vs
kernel buffer, you could certainly use the same mechanism, and just
ensure that buffers are tagged appropriately. I need to think about that
a little bit.

There are certainly many different ways that can get propagated which
would not entail a complicated mechanism. I really like the aspect of
having the identifier being the same thing that we already use, and
hence not needing to be something new on the side.

> Also multiple OPs may consume the buffer concurrently, which can't be
> supported by buffer select.

Why not? You can certainly have multiple ops using the same registered
buffer concurrently right now.
Ming Lei Oct. 11, 2024, 3:07 a.m. UTC | #9
On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
> On 10/10/24 8:30 PM, Ming Lei wrote:
> > Hi Jens,
> > 
> > On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
> >> Hi,
> >>
> >> Discussed this with Pavel, and on his suggestion, I tried prototyping a
> >> "buffer update" opcode. Basically it works like
> >> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
> >> registration. But it works as an sqe rather than being a sync opcode.
> >>
> >> The idea here is that you could do that upfront, or as part of a chain,
> >> and have it be generically available, just like any other buffer that
> >> was registered upfront. You do need an empty table registered first,
> >> which can just be sparse. And since you can pick the slot it goes into,
> >> you can rely on that slot afterwards (either as a link, or just the
> >> following sqe).
> >>
> >> Quick'n dirty obviously, but I did write a quick test case too to verify
> >> that:
> >>
> >> 1) It actually works (it seems to)
> > 
> > It doesn't work for ublk zc since ublk needs to provide one kernel buffer
> > for fs rw & net send/recv to consume, and the kernel buffer is invisible
> > to userspace. But  __io_register_rsrc_update() only can register userspace
> > buffer.
> 
> I'd be surprised if this simple one was enough! In terms of user vs
> kernel buffer, you could certainly use the same mechanism, and just
> ensure that buffers are tagged appropriately. I need to think about that
> a little bit.

It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
consumer OPs have to wait until this OP_BUF_UPDATE is completed.

Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.

1) all N OPs are linked with OP_BUF_UPDATE

Or

2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
OPs concurrently.

But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
and 1 extra syscall is introduced in 2).

The same thing exists in the next OP_BUF_UPDATE which has to wait until
all the previous buffer consumers are done. So the same slow thing are
doubled. Not mention the application will become more complicated.

Here the provided buffer is only visible among the N OPs wide, and making
it global isn't necessary, and slow things down. And has kbuf lifetime
issue.

Also it makes error handling more complicated, io_uring has to remove
the kernel buffer when the current task is exit, dependency or order with
buffer provider is introduced.

There could be more problems, will try to remember all related stuff
thought before.

> 
> There are certainly many different ways that can get propagated which
> would not entail a complicated mechanism. I really like the aspect of
> having the identifier being the same thing that we already use, and
> hence not needing to be something new on the side.
> 
> > Also multiple OPs may consume the buffer concurrently, which can't be
> > supported by buffer select.
> 
> Why not? You can certainly have multiple ops using the same registered
> buffer concurrently right now.

Please see the above problem.

Also I remember that the selected buffer is removed from buffer list,
see io_provided_buffer_select(), but maybe I am wrong.


Thanks,
Ming
Jens Axboe Oct. 11, 2024, 1:24 p.m. UTC | #10
On 10/10/24 9:07 PM, Ming Lei wrote:
> On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
>> On 10/10/24 8:30 PM, Ming Lei wrote:
>>> Hi Jens,
>>>
>>> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
>>>> Hi,
>>>>
>>>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
>>>> "buffer update" opcode. Basically it works like
>>>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
>>>> registration. But it works as an sqe rather than being a sync opcode.
>>>>
>>>> The idea here is that you could do that upfront, or as part of a chain,
>>>> and have it be generically available, just like any other buffer that
>>>> was registered upfront. You do need an empty table registered first,
>>>> which can just be sparse. And since you can pick the slot it goes into,
>>>> you can rely on that slot afterwards (either as a link, or just the
>>>> following sqe).
>>>>
>>>> Quick'n dirty obviously, but I did write a quick test case too to verify
>>>> that:
>>>>
>>>> 1) It actually works (it seems to)
>>>
>>> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
>>> for fs rw & net send/recv to consume, and the kernel buffer is invisible
>>> to userspace. But  __io_register_rsrc_update() only can register userspace
>>> buffer.
>>
>> I'd be surprised if this simple one was enough! In terms of user vs
>> kernel buffer, you could certainly use the same mechanism, and just
>> ensure that buffers are tagged appropriately. I need to think about that
>> a little bit.
> 
> It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
> consumer OPs have to wait until this OP_BUF_UPDATE is completed.

See below for the registered vs provided buffer confusion that seems to
be a confusion issue here.

> Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
> 
> 1) all N OPs are linked with OP_BUF_UPDATE
> 
> Or
> 
> 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
> OPs concurrently.

Correct

> But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
> and 1 extra syscall is introduced in 2).

Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
you can just do it upfront. It's not ideal in terms of usage, and I get
where the grouping comes from. But is it possible to do the grouping in
a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
of the other ops in terms of buffer consumption, they'd just need fixed
buffer support and you'd flag the buffer index in sqe->buf_index. And
the nice thing about that is that while fixed/registered buffers aren't
really used on the networking side yet (as they don't bring any benefit
yet), adding support for them could potentially be useful down the line
anyway.

> The same thing exists in the next OP_BUF_UPDATE which has to wait until
> all the previous buffer consumers are done. So the same slow thing are
> doubled. Not mention the application will become more complicated.

It does not, you can do an update on a buffer that's already inflight.

> Here the provided buffer is only visible among the N OPs wide, and making
> it global isn't necessary, and slow things down. And has kbuf lifetime
> issue.

I was worried about it being too slow too, but the basic testing seems
like it's fine. Yes with updates inflight it'll make it a tad bit
slower, but really should not be a concern. I'd argue that even doing
the very basic of things, which would be:

1) Submit OP_BUF_UPDATE, get completion
2) Do the rest of the ops

would be totally fine in terms of performance. OP_BUF_UPDATE will
_always_ completely immediately and inline, which means that it'll
_always_ be immediately available post submission. The only think you'd
ever have to worry about in terms of failure is a badly formed request,
which is a programming issue, or running out of memory on the host.

> Also it makes error handling more complicated, io_uring has to remove
> the kernel buffer when the current task is exit, dependency or order with
> buffer provider is introduced.

Why would that be? They belong to the ring, so should be torn down as
part of the ring anyway? Why would they be task-private, but not
ring-private?

>> There are certainly many different ways that can get propagated which
>> would not entail a complicated mechanism. I really like the aspect of
>> having the identifier being the same thing that we already use, and
>> hence not needing to be something new on the side.
>>
>>> Also multiple OPs may consume the buffer concurrently, which can't be
>>> supported by buffer select.
>>
>> Why not? You can certainly have multiple ops using the same registered
>> buffer concurrently right now.
> 
> Please see the above problem.
> 
> Also I remember that the selected buffer is removed from buffer list,
> see io_provided_buffer_select(), but maybe I am wrong.

You're mixing up provided and registered buffers. Provided buffers are
ones that the applications gives to the kernel, and the kernel grabs and
consumes them. Then the application replenishes, repeat.

Registered buffers are entirely different, those are registered with the
kernel and we can do things like pre-gup the pages so we don't have to
do them for every IO. They are entirely persistent, any multiple ops can
keep using them, concurrently. They don't get consumed by an IO like
provided buffers, they remain in place until they get unregistered (or
updated, like my patch) at some point.
Ming Lei Oct. 11, 2024, 2:20 p.m. UTC | #11
On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
> On 10/10/24 9:07 PM, Ming Lei wrote:
> > On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
> >> On 10/10/24 8:30 PM, Ming Lei wrote:
> >>> Hi Jens,
> >>>
> >>> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
> >>>> Hi,
> >>>>
> >>>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
> >>>> "buffer update" opcode. Basically it works like
> >>>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
> >>>> registration. But it works as an sqe rather than being a sync opcode.
> >>>>
> >>>> The idea here is that you could do that upfront, or as part of a chain,
> >>>> and have it be generically available, just like any other buffer that
> >>>> was registered upfront. You do need an empty table registered first,
> >>>> which can just be sparse. And since you can pick the slot it goes into,
> >>>> you can rely on that slot afterwards (either as a link, or just the
> >>>> following sqe).
> >>>>
> >>>> Quick'n dirty obviously, but I did write a quick test case too to verify
> >>>> that:
> >>>>
> >>>> 1) It actually works (it seems to)
> >>>
> >>> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
> >>> for fs rw & net send/recv to consume, and the kernel buffer is invisible
> >>> to userspace. But  __io_register_rsrc_update() only can register userspace
> >>> buffer.
> >>
> >> I'd be surprised if this simple one was enough! In terms of user vs
> >> kernel buffer, you could certainly use the same mechanism, and just
> >> ensure that buffers are tagged appropriately. I need to think about that
> >> a little bit.
> > 
> > It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
> > consumer OPs have to wait until this OP_BUF_UPDATE is completed.
> 
> See below for the registered vs provided buffer confusion that seems to
> be a confusion issue here.
> 
> > Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
> > 
> > 1) all N OPs are linked with OP_BUF_UPDATE
> > 
> > Or
> > 
> > 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
> > OPs concurrently.
> 
> Correct
> 
> > But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
> > and 1 extra syscall is introduced in 2).
> 
> Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
> you can just do it upfront. It's not ideal in terms of usage, and I get
> where the grouping comes from. But is it possible to do the grouping in
> a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any

The most of 'intrusive' change is just on patch 4, and Pavel has commented
that it is good enough:

https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d

> of the other ops in terms of buffer consumption, they'd just need fixed
> buffer support and you'd flag the buffer index in sqe->buf_index. And
> the nice thing about that is that while fixed/registered buffers aren't
> really used on the networking side yet (as they don't bring any benefit
> yet), adding support for them could potentially be useful down the line
> anyway.

With 2), two extra syscalls are added for each ublk IO, one is provide
buffer, another is remove buffer. The two syscalls have to be sync with
consumer OPs.

I can understand the concern, but if the change can't improve perf or
even slow things done, it loses its value.

> 
> > The same thing exists in the next OP_BUF_UPDATE which has to wait until
> > all the previous buffer consumers are done. So the same slow thing are
> > doubled. Not mention the application will become more complicated.
> 
> It does not, you can do an update on a buffer that's already inflight.

UPDATE may not match the case, actually two OPs are needed, one is
provide buffer OP and the other is remove buffer OP, both have to deal
with the other subsystem(ublk). Remove buffer needs to be done after all
consumer OPs are done immediately.

I guess you mean the buffer is reference-counted, but what if the remove
buffer OP is run before any consumer OP? The order has to be enhanced.

That is why I mention two syscalls are added.

> 
> > Here the provided buffer is only visible among the N OPs wide, and making
> > it global isn't necessary, and slow things down. And has kbuf lifetime
> > issue.
> 
> I was worried about it being too slow too, but the basic testing seems
> like it's fine. Yes with updates inflight it'll make it a tad bit
> slower, but really should not be a concern. I'd argue that even doing
> the very basic of things, which would be:
> 
> 1) Submit OP_BUF_UPDATE, get completion
> 2) Do the rest of the ops

The above adds one syscall for each ublk IO, and the following Remove
buffer adds another syscall.

Not only it slows thing down, but also makes application more
complicated, cause two wait points are added.

> 
> would be totally fine in terms of performance. OP_BUF_UPDATE will
> _always_ completely immediately and inline, which means that it'll
> _always_ be immediately available post submission. The only think you'd
> ever have to worry about in terms of failure is a badly formed request,
> which is a programming issue, or running out of memory on the host.
> 
> > Also it makes error handling more complicated, io_uring has to remove
> > the kernel buffer when the current task is exit, dependency or order with
> > buffer provider is introduced.
> 
> Why would that be? They belong to the ring, so should be torn down as
> part of the ring anyway? Why would they be task-private, but not
> ring-private?

It is kernel buffer, which belongs to provider(such as ublk) instead of uring,
application may panic any time, then io_uring has to remove the buffer for
notifying the buffer owner.

In concept grouping is simpler because:

- buffer lifetime is aligned with group leader lifetime, so we needn't
worry buffer leak because of application accidental exit

- the buffer is borrowed to consumer OPs, and returned back after all
consumers are done, this way avoids any dependency

Meantime OP_BUF_UPDATE(provide buffer OP, remove buffer OP) becomes more
complicated:

- buffer leak because of app panic
- buffer dependency issue: consumer OPs depend on provide buffer OP,
	remove buffer OP depends on consumer OPs; two syscalls has to be
	added for handling single ublk IO.

> 
> >> There are certainly many different ways that can get propagated which
> >> would not entail a complicated mechanism. I really like the aspect of
> >> having the identifier being the same thing that we already use, and
> >> hence not needing to be something new on the side.
> >>
> >>> Also multiple OPs may consume the buffer concurrently, which can't be
> >>> supported by buffer select.
> >>
> >> Why not? You can certainly have multiple ops using the same registered
> >> buffer concurrently right now.
> > 
> > Please see the above problem.
> > 
> > Also I remember that the selected buffer is removed from buffer list,
> > see io_provided_buffer_select(), but maybe I am wrong.
> 
> You're mixing up provided and registered buffers. Provided buffers are
> ones that the applications gives to the kernel, and the kernel grabs and
> consumes them. Then the application replenishes, repeat.
> 
> Registered buffers are entirely different, those are registered with the
> kernel and we can do things like pre-gup the pages so we don't have to
> do them for every IO. They are entirely persistent, any multiple ops can
> keep using them, concurrently. They don't get consumed by an IO like
> provided buffers, they remain in place until they get unregistered (or
> updated, like my patch) at some point.

I know the difference.

The thing is that here we can't register the kernel buffer in ->prep(),
and it has to be provided in ->issue() of uring command. That is similar
with provided buffer.


Thanks,
Ming
Jens Axboe Oct. 11, 2024, 2:41 p.m. UTC | #12
On 10/11/24 8:20 AM, Ming Lei wrote:
> On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
>> On 10/10/24 9:07 PM, Ming Lei wrote:
>>> On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
>>>> On 10/10/24 8:30 PM, Ming Lei wrote:
>>>>> Hi Jens,
>>>>>
>>>>> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
>>>>>> Hi,
>>>>>>
>>>>>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
>>>>>> "buffer update" opcode. Basically it works like
>>>>>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
>>>>>> registration. But it works as an sqe rather than being a sync opcode.
>>>>>>
>>>>>> The idea here is that you could do that upfront, or as part of a chain,
>>>>>> and have it be generically available, just like any other buffer that
>>>>>> was registered upfront. You do need an empty table registered first,
>>>>>> which can just be sparse. And since you can pick the slot it goes into,
>>>>>> you can rely on that slot afterwards (either as a link, or just the
>>>>>> following sqe).
>>>>>>
>>>>>> Quick'n dirty obviously, but I did write a quick test case too to verify
>>>>>> that:
>>>>>>
>>>>>> 1) It actually works (it seems to)
>>>>>
>>>>> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
>>>>> for fs rw & net send/recv to consume, and the kernel buffer is invisible
>>>>> to userspace. But  __io_register_rsrc_update() only can register userspace
>>>>> buffer.
>>>>
>>>> I'd be surprised if this simple one was enough! In terms of user vs
>>>> kernel buffer, you could certainly use the same mechanism, and just
>>>> ensure that buffers are tagged appropriately. I need to think about that
>>>> a little bit.
>>>
>>> It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
>>> consumer OPs have to wait until this OP_BUF_UPDATE is completed.
>>
>> See below for the registered vs provided buffer confusion that seems to
>> be a confusion issue here.
>>
>>> Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
>>>
>>> 1) all N OPs are linked with OP_BUF_UPDATE
>>>
>>> Or
>>>
>>> 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
>>> OPs concurrently.
>>
>> Correct
>>
>>> But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
>>> and 1 extra syscall is introduced in 2).
>>
>> Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
>> you can just do it upfront. It's not ideal in terms of usage, and I get
>> where the grouping comes from. But is it possible to do the grouping in
>> a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
> 
> The most of 'intrusive' change is just on patch 4, and Pavel has commented
> that it is good enough:
> 
> https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d

At least for me, patch 4 looks fine. The problem occurs when you start
needing to support this different buffer type, which is in patch 6. I'm
not saying we can necessarily solve this with OP_BUF_UPDATE, I just want
to explore that path because if we can, then patch 6 turns into "oh
let's just added registered/fixed buffer support to these ops that don't
currently support it". And that would be much nicer indeed.


>> of the other ops in terms of buffer consumption, they'd just need fixed
>> buffer support and you'd flag the buffer index in sqe->buf_index. And
>> the nice thing about that is that while fixed/registered buffers aren't
>> really used on the networking side yet (as they don't bring any benefit
>> yet), adding support for them could potentially be useful down the line
>> anyway.
> 
> With 2), two extra syscalls are added for each ublk IO, one is provide
> buffer, another is remove buffer. The two syscalls have to be sync with
> consumer OPs.
> 
> I can understand the concern, but if the change can't improve perf or
> even slow things done, it loses its value.

It'd be one extra syscall, as the remove can get bundled with the next
add. But your point still stands, yes it will add extra overhead,
although be it pretty darn minimal. I'm actually more concerned with the
complexity for handling it. While the OP_BUF_UPDATE will always
complete immediately, there's no guarantee it's the next cqe you pull
out when peeking post submission.

>>> The same thing exists in the next OP_BUF_UPDATE which has to wait until
>>> all the previous buffer consumers are done. So the same slow thing are
>>> doubled. Not mention the application will become more complicated.
>>
>> It does not, you can do an update on a buffer that's already inflight.
> 
> UPDATE may not match the case, actually two OPs are needed, one is
> provide buffer OP and the other is remove buffer OP, both have to deal
> with the other subsystem(ublk). Remove buffer needs to be done after all
> consumer OPs are done immediately.

You don't necessarily need the remove. If you always just use the same
slot for these, then the OP_BUF_UPDATE will just update the current
location.

> I guess you mean the buffer is reference-counted, but what if the remove
> buffer OP is run before any consumer OP? The order has to be enhanced.
> 
> That is why I mention two syscalls are added.

See above, you can just update in place, and if you do want remove, it
can get bundled with the next one. But it would be pointless to remove
only then to update right after, a single update would suffice.

>>> Here the provided buffer is only visible among the N OPs wide, and making
>>> it global isn't necessary, and slow things down. And has kbuf lifetime
>>> issue.
>>
>> I was worried about it being too slow too, but the basic testing seems
>> like it's fine. Yes with updates inflight it'll make it a tad bit
>> slower, but really should not be a concern. I'd argue that even doing
>> the very basic of things, which would be:
>>
>> 1) Submit OP_BUF_UPDATE, get completion
>> 2) Do the rest of the ops
> 
> The above adds one syscall for each ublk IO, and the following Remove
> buffer adds another syscall.
> 
> Not only it slows thing down, but also makes application more
> complicated, cause two wait points are added.

I don't think the extra overhead would be noticeable though, but the
extra complication is the main issue here.

>> would be totally fine in terms of performance. OP_BUF_UPDATE will
>> _always_ completely immediately and inline, which means that it'll
>> _always_ be immediately available post submission. The only think you'd
>> ever have to worry about in terms of failure is a badly formed request,
>> which is a programming issue, or running out of memory on the host.
>>
>>> Also it makes error handling more complicated, io_uring has to remove
>>> the kernel buffer when the current task is exit, dependency or order with
>>> buffer provider is introduced.
>>
>> Why would that be? They belong to the ring, so should be torn down as
>> part of the ring anyway? Why would they be task-private, but not
>> ring-private?
> 
> It is kernel buffer, which belongs to provider(such as ublk) instead
> of uring, application may panic any time, then io_uring has to remove
> the buffer for notifying the buffer owner.

But it could be an application buffer, no? You'd just need the
application to provide it to ublk and have it mapped, rather than have
ublk allocate it in-kernel and then use that.

> In concept grouping is simpler because:
> 
> - buffer lifetime is aligned with group leader lifetime, so we needn't
> worry buffer leak because of application accidental exit

But if it was an application buffer, that would not be a concern.

> - the buffer is borrowed to consumer OPs, and returned back after all
> consumers are done, this way avoids any dependency
> 
> Meantime OP_BUF_UPDATE(provide buffer OP, remove buffer OP) becomes more
> complicated:
> 
> - buffer leak because of app panic
> - buffer dependency issue: consumer OPs depend on provide buffer OP,
> 	remove buffer OP depends on consumer OPs; two syscalls has to be
> 	added for handling single ublk IO.

Seems like most of this is because of the kernel buffer too, no?

I do like the concept of the ephemeral buffer, the downside is that we
need per-op support for it too. And while I'm not totally against doing
that, it would be lovely if we could utilize and existing mechanism for
that rather than add another one.

>>>> There are certainly many different ways that can get propagated which
>>>> would not entail a complicated mechanism. I really like the aspect of
>>>> having the identifier being the same thing that we already use, and
>>>> hence not needing to be something new on the side.
>>>>
>>>>> Also multiple OPs may consume the buffer concurrently, which can't be
>>>>> supported by buffer select.
>>>>
>>>> Why not? You can certainly have multiple ops using the same registered
>>>> buffer concurrently right now.
>>>
>>> Please see the above problem.
>>>
>>> Also I remember that the selected buffer is removed from buffer list,
>>> see io_provided_buffer_select(), but maybe I am wrong.
>>
>> You're mixing up provided and registered buffers. Provided buffers are
>> ones that the applications gives to the kernel, and the kernel grabs and
>> consumes them. Then the application replenishes, repeat.
>>
>> Registered buffers are entirely different, those are registered with the
>> kernel and we can do things like pre-gup the pages so we don't have to
>> do them for every IO. They are entirely persistent, any multiple ops can
>> keep using them, concurrently. They don't get consumed by an IO like
>> provided buffers, they remain in place until they get unregistered (or
>> updated, like my patch) at some point.
> 
> I know the difference.

But io_provided_buffer_select() has nothing to do with registered/fixed
buffers or this use case, the above "remove from buffer list" is an
entirely different buffer concept. So there's some confusion here, just
wanted to make that clear.

> The thing is that here we can't register the kernel buffer in ->prep(),
> and it has to be provided in ->issue() of uring command. That is similar
> with provided buffer.

What's preventing it from registering it in ->prep()? It would be a bit
odd, but there would be nothing preventing it codewise, outside of the
oddity of ->prep() not being idempotent at that point. Don't follow why
that would be necessary, though, can you expand?
Ming Lei Oct. 11, 2024, 3:45 p.m. UTC | #13
On Fri, Oct 11, 2024 at 08:41:03AM -0600, Jens Axboe wrote:
> On 10/11/24 8:20 AM, Ming Lei wrote:
> > On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
> >> On 10/10/24 9:07 PM, Ming Lei wrote:
> >>> On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
> >>>> On 10/10/24 8:30 PM, Ming Lei wrote:
> >>>>> Hi Jens,
> >>>>>
> >>>>> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
> >>>>>> Hi,
> >>>>>>
> >>>>>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
> >>>>>> "buffer update" opcode. Basically it works like
> >>>>>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
> >>>>>> registration. But it works as an sqe rather than being a sync opcode.
> >>>>>>
> >>>>>> The idea here is that you could do that upfront, or as part of a chain,
> >>>>>> and have it be generically available, just like any other buffer that
> >>>>>> was registered upfront. You do need an empty table registered first,
> >>>>>> which can just be sparse. And since you can pick the slot it goes into,
> >>>>>> you can rely on that slot afterwards (either as a link, or just the
> >>>>>> following sqe).
> >>>>>>
> >>>>>> Quick'n dirty obviously, but I did write a quick test case too to verify
> >>>>>> that:
> >>>>>>
> >>>>>> 1) It actually works (it seems to)
> >>>>>
> >>>>> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
> >>>>> for fs rw & net send/recv to consume, and the kernel buffer is invisible
> >>>>> to userspace. But  __io_register_rsrc_update() only can register userspace
> >>>>> buffer.
> >>>>
> >>>> I'd be surprised if this simple one was enough! In terms of user vs
> >>>> kernel buffer, you could certainly use the same mechanism, and just
> >>>> ensure that buffers are tagged appropriately. I need to think about that
> >>>> a little bit.
> >>>
> >>> It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
> >>> consumer OPs have to wait until this OP_BUF_UPDATE is completed.
> >>
> >> See below for the registered vs provided buffer confusion that seems to
> >> be a confusion issue here.
> >>
> >>> Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
> >>>
> >>> 1) all N OPs are linked with OP_BUF_UPDATE
> >>>
> >>> Or
> >>>
> >>> 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
> >>> OPs concurrently.
> >>
> >> Correct
> >>
> >>> But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
> >>> and 1 extra syscall is introduced in 2).
> >>
> >> Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
> >> you can just do it upfront. It's not ideal in terms of usage, and I get
> >> where the grouping comes from. But is it possible to do the grouping in
> >> a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
> > 
> > The most of 'intrusive' change is just on patch 4, and Pavel has commented
> > that it is good enough:
> > 
> > https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d
> 
> At least for me, patch 4 looks fine. The problem occurs when you start
> needing to support this different buffer type, which is in patch 6. I'm
> not saying we can necessarily solve this with OP_BUF_UPDATE, I just want
> to explore that path because if we can, then patch 6 turns into "oh
> let's just added registered/fixed buffer support to these ops that don't
> currently support it". And that would be much nicer indeed.

OK, in my local V7, the buffer type is actually aligned with
BUFFER_SELECT from both interface & use viewpoint, since member SQE have three
empty flags available.

I will post V7 for review.

> 
> 
> >> of the other ops in terms of buffer consumption, they'd just need fixed
> >> buffer support and you'd flag the buffer index in sqe->buf_index. And
> >> the nice thing about that is that while fixed/registered buffers aren't
> >> really used on the networking side yet (as they don't bring any benefit
> >> yet), adding support for them could potentially be useful down the line
> >> anyway.
> > 
> > With 2), two extra syscalls are added for each ublk IO, one is provide
> > buffer, another is remove buffer. The two syscalls have to be sync with
> > consumer OPs.
> > 
> > I can understand the concern, but if the change can't improve perf or
> > even slow things done, it loses its value.
> 
> It'd be one extra syscall, as the remove can get bundled with the next
> add. But your point still stands, yes it will add extra overhead,

It can't be bundled.

And the kernel buffer is blk-mq's request pages, which is per tag.

Such as, for ublk-target, IO comes to tag 0, after this IO(tag 0) is
handled, how can we know if there is new IO comes to tag 0 immediately? :-)

> although be it pretty darn minimal. I'm actually more concerned with the
> complexity for handling it. While the OP_BUF_UPDATE will always
> complete immediately, there's no guarantee it's the next cqe you pull
> out when peeking post submission.
> 
> >>> The same thing exists in the next OP_BUF_UPDATE which has to wait until
> >>> all the previous buffer consumers are done. So the same slow thing are
> >>> doubled. Not mention the application will become more complicated.
> >>
> >> It does not, you can do an update on a buffer that's already inflight.
> > 
> > UPDATE may not match the case, actually two OPs are needed, one is
> > provide buffer OP and the other is remove buffer OP, both have to deal
> > with the other subsystem(ublk). Remove buffer needs to be done after all
> > consumer OPs are done immediately.
> 
> You don't necessarily need the remove. If you always just use the same
> slot for these, then the OP_BUF_UPDATE will just update the current
> location.

The buffer is per tag, and can't guarantee to be reused immediately,
otherwise it isn't zero copy any more.

> 
> > I guess you mean the buffer is reference-counted, but what if the remove
> > buffer OP is run before any consumer OP? The order has to be enhanced.
> > 
> > That is why I mention two syscalls are added.
> 
> See above, you can just update in place, and if you do want remove, it
> can get bundled with the next one. But it would be pointless to remove
> only then to update right after, a single update would suffice.
> 
> >>> Here the provided buffer is only visible among the N OPs wide, and making
> >>> it global isn't necessary, and slow things down. And has kbuf lifetime
> >>> issue.
> >>
> >> I was worried about it being too slow too, but the basic testing seems
> >> like it's fine. Yes with updates inflight it'll make it a tad bit
> >> slower, but really should not be a concern. I'd argue that even doing
> >> the very basic of things, which would be:
> >>
> >> 1) Submit OP_BUF_UPDATE, get completion
> >> 2) Do the rest of the ops
> > 
> > The above adds one syscall for each ublk IO, and the following Remove
> > buffer adds another syscall.
> > 
> > Not only it slows thing down, but also makes application more
> > complicated, cause two wait points are added.
> 
> I don't think the extra overhead would be noticeable though, but the
> extra complication is the main issue here.

Can't agree more.

> 
> >> would be totally fine in terms of performance. OP_BUF_UPDATE will
> >> _always_ completely immediately and inline, which means that it'll
> >> _always_ be immediately available post submission. The only think you'd
> >> ever have to worry about in terms of failure is a badly formed request,
> >> which is a programming issue, or running out of memory on the host.
> >>
> >>> Also it makes error handling more complicated, io_uring has to remove
> >>> the kernel buffer when the current task is exit, dependency or order with
> >>> buffer provider is introduced.
> >>
> >> Why would that be? They belong to the ring, so should be torn down as
> >> part of the ring anyway? Why would they be task-private, but not
> >> ring-private?
> > 
> > It is kernel buffer, which belongs to provider(such as ublk) instead
> > of uring, application may panic any time, then io_uring has to remove
> > the buffer for notifying the buffer owner.
> 
> But it could be an application buffer, no? You'd just need the
> application to provide it to ublk and have it mapped, rather than have
> ublk allocate it in-kernel and then use that.

The buffer is actually kernel 'request/bio' pages of /dev/ublkbN, and now we
forward and borrow it to io_uring OPs(fs rw, net send/recv), so it can't be
application buffer, not same with net rx.

> 
> > In concept grouping is simpler because:
> > 
> > - buffer lifetime is aligned with group leader lifetime, so we needn't
> > worry buffer leak because of application accidental exit
> 
> But if it was an application buffer, that would not be a concern.

Yeah, but storage isn't same with network, here application buffer can't
support zc.

> 
> > - the buffer is borrowed to consumer OPs, and returned back after all
> > consumers are done, this way avoids any dependency
> > 
> > Meantime OP_BUF_UPDATE(provide buffer OP, remove buffer OP) becomes more
> > complicated:
> > 
> > - buffer leak because of app panic
> > - buffer dependency issue: consumer OPs depend on provide buffer OP,
> > 	remove buffer OP depends on consumer OPs; two syscalls has to be
> > 	added for handling single ublk IO.
> 
> Seems like most of this is because of the kernel buffer too, no?

Yeah.

> 
> I do like the concept of the ephemeral buffer, the downside is that we
> need per-op support for it too. And while I'm not totally against doing

Can you explain per-op support a bit?

Now the buffer has been provided by one single uring command.

> that, it would be lovely if we could utilize and existing mechanism for
> that rather than add another one.

If existing mechanism can cover everything, our linux may not progress any
more.

> 
> >>>> There are certainly many different ways that can get propagated which
> >>>> would not entail a complicated mechanism. I really like the aspect of
> >>>> having the identifier being the same thing that we already use, and
> >>>> hence not needing to be something new on the side.
> >>>>
> >>>>> Also multiple OPs may consume the buffer concurrently, which can't be
> >>>>> supported by buffer select.
> >>>>
> >>>> Why not? You can certainly have multiple ops using the same registered
> >>>> buffer concurrently right now.
> >>>
> >>> Please see the above problem.
> >>>
> >>> Also I remember that the selected buffer is removed from buffer list,
> >>> see io_provided_buffer_select(), but maybe I am wrong.
> >>
> >> You're mixing up provided and registered buffers. Provided buffers are
> >> ones that the applications gives to the kernel, and the kernel grabs and
> >> consumes them. Then the application replenishes, repeat.
> >>
> >> Registered buffers are entirely different, those are registered with the
> >> kernel and we can do things like pre-gup the pages so we don't have to
> >> do them for every IO. They are entirely persistent, any multiple ops can
> >> keep using them, concurrently. They don't get consumed by an IO like
> >> provided buffers, they remain in place until they get unregistered (or
> >> updated, like my patch) at some point.
> > 
> > I know the difference.
> 
> But io_provided_buffer_select() has nothing to do with registered/fixed
> buffers or this use case, the above "remove from buffer list" is an
> entirely different buffer concept. So there's some confusion here, just
> wanted to make that clear.
> 
> > The thing is that here we can't register the kernel buffer in ->prep(),
> > and it has to be provided in ->issue() of uring command. That is similar
> > with provided buffer.
> 
> What's preventing it from registering it in ->prep()? It would be a bit
> odd, but there would be nothing preventing it codewise, outside of the
> oddity of ->prep() not being idempotent at that point. Don't follow why
> that would be necessary, though, can you expand?

->prep() doesn't export to uring cmd, and we may not want to bother
drivers.

Also remove buffer still can't be done in ->prep().

Not dig into further, one big thing could be that dependency isn't
respected in ->prep().


thanks,
Ming
Jens Axboe Oct. 11, 2024, 4:49 p.m. UTC | #14
On 10/11/24 9:45 AM, Ming Lei wrote:
> On Fri, Oct 11, 2024 at 08:41:03AM -0600, Jens Axboe wrote:
>> On 10/11/24 8:20 AM, Ming Lei wrote:
>>> On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
>>>> On 10/10/24 9:07 PM, Ming Lei wrote:
>>>>> On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
>>>>>> On 10/10/24 8:30 PM, Ming Lei wrote:
>>>>>>> Hi Jens,
>>>>>>>
>>>>>>> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
>>>>>>>> Hi,
>>>>>>>>
>>>>>>>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
>>>>>>>> "buffer update" opcode. Basically it works like
>>>>>>>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
>>>>>>>> registration. But it works as an sqe rather than being a sync opcode.
>>>>>>>>
>>>>>>>> The idea here is that you could do that upfront, or as part of a chain,
>>>>>>>> and have it be generically available, just like any other buffer that
>>>>>>>> was registered upfront. You do need an empty table registered first,
>>>>>>>> which can just be sparse. And since you can pick the slot it goes into,
>>>>>>>> you can rely on that slot afterwards (either as a link, or just the
>>>>>>>> following sqe).
>>>>>>>>
>>>>>>>> Quick'n dirty obviously, but I did write a quick test case too to verify
>>>>>>>> that:
>>>>>>>>
>>>>>>>> 1) It actually works (it seems to)
>>>>>>>
>>>>>>> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
>>>>>>> for fs rw & net send/recv to consume, and the kernel buffer is invisible
>>>>>>> to userspace. But  __io_register_rsrc_update() only can register userspace
>>>>>>> buffer.
>>>>>>
>>>>>> I'd be surprised if this simple one was enough! In terms of user vs
>>>>>> kernel buffer, you could certainly use the same mechanism, and just
>>>>>> ensure that buffers are tagged appropriately. I need to think about that
>>>>>> a little bit.
>>>>>
>>>>> It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
>>>>> consumer OPs have to wait until this OP_BUF_UPDATE is completed.
>>>>
>>>> See below for the registered vs provided buffer confusion that seems to
>>>> be a confusion issue here.
>>>>
>>>>> Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
>>>>>
>>>>> 1) all N OPs are linked with OP_BUF_UPDATE
>>>>>
>>>>> Or
>>>>>
>>>>> 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
>>>>> OPs concurrently.
>>>>
>>>> Correct
>>>>
>>>>> But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
>>>>> and 1 extra syscall is introduced in 2).
>>>>
>>>> Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
>>>> you can just do it upfront. It's not ideal in terms of usage, and I get
>>>> where the grouping comes from. But is it possible to do the grouping in
>>>> a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
>>>
>>> The most of 'intrusive' change is just on patch 4, and Pavel has commented
>>> that it is good enough:
>>>
>>> https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d
>>
>> At least for me, patch 4 looks fine. The problem occurs when you start
>> needing to support this different buffer type, which is in patch 6. I'm
>> not saying we can necessarily solve this with OP_BUF_UPDATE, I just want
>> to explore that path because if we can, then patch 6 turns into "oh
>> let's just added registered/fixed buffer support to these ops that don't
>> currently support it". And that would be much nicer indeed.
> 
> OK, in my local V7, the buffer type is actually aligned with
> BUFFER_SELECT from both interface & use viewpoint, since member SQE
> have three empty flags available.
> 
> I will post V7 for review.

OK, I'll take a look once posted.

>>>> of the other ops in terms of buffer consumption, they'd just need fixed
>>>> buffer support and you'd flag the buffer index in sqe->buf_index. And
>>>> the nice thing about that is that while fixed/registered buffers aren't
>>>> really used on the networking side yet (as they don't bring any benefit
>>>> yet), adding support for them could potentially be useful down the line
>>>> anyway.
>>>
>>> With 2), two extra syscalls are added for each ublk IO, one is provide
>>> buffer, another is remove buffer. The two syscalls have to be sync with
>>> consumer OPs.
>>>
>>> I can understand the concern, but if the change can't improve perf or
>>> even slow things done, it loses its value.
>>
>> It'd be one extra syscall, as the remove can get bundled with the next
>> add. But your point still stands, yes it will add extra overhead,
> 
> It can't be bundled.

Don't see why not, but let's review v7 and see what comes up.

> And the kernel buffer is blk-mq's request pages, which is per tag.
> 
> Such as, for ublk-target, IO comes to tag 0, after this IO(tag 0) is
> handled, how can we know if there is new IO comes to tag 0 immediately? :-)

Gotcha, yeah sounds like that needs to remain a kernel buffer.

>> although be it pretty darn minimal. I'm actually more concerned with the
>> complexity for handling it. While the OP_BUF_UPDATE will always
>> complete immediately, there's no guarantee it's the next cqe you pull
>> out when peeking post submission.
>>
>>>>> The same thing exists in the next OP_BUF_UPDATE which has to wait until
>>>>> all the previous buffer consumers are done. So the same slow thing are
>>>>> doubled. Not mention the application will become more complicated.
>>>>
>>>> It does not, you can do an update on a buffer that's already inflight.
>>>
>>> UPDATE may not match the case, actually two OPs are needed, one is
>>> provide buffer OP and the other is remove buffer OP, both have to deal
>>> with the other subsystem(ublk). Remove buffer needs to be done after all
>>> consumer OPs are done immediately.
>>
>> You don't necessarily need the remove. If you always just use the same
>> slot for these, then the OP_BUF_UPDATE will just update the current
>> location.
> 
> The buffer is per tag, and can't guarantee to be reused immediately,
> otherwise it isn't zero copy any more.

Don't follow this one either. As long as reuse keeps existing IO fine,
then it should be fine? I'm not talking about reusing the buffer, just
the slot it belongs to.

>>>> would be totally fine in terms of performance. OP_BUF_UPDATE will
>>>> _always_ completely immediately and inline, which means that it'll
>>>> _always_ be immediately available post submission. The only think you'd
>>>> ever have to worry about in terms of failure is a badly formed request,
>>>> which is a programming issue, or running out of memory on the host.
>>>>
>>>>> Also it makes error handling more complicated, io_uring has to remove
>>>>> the kernel buffer when the current task is exit, dependency or order with
>>>>> buffer provider is introduced.
>>>>
>>>> Why would that be? They belong to the ring, so should be torn down as
>>>> part of the ring anyway? Why would they be task-private, but not
>>>> ring-private?
>>>
>>> It is kernel buffer, which belongs to provider(such as ublk) instead
>>> of uring, application may panic any time, then io_uring has to remove
>>> the buffer for notifying the buffer owner.
>>
>> But it could be an application buffer, no? You'd just need the
>> application to provide it to ublk and have it mapped, rather than have
>> ublk allocate it in-kernel and then use that.
> 
> The buffer is actually kernel 'request/bio' pages of /dev/ublkbN, and now we
> forward and borrow it to io_uring OPs(fs rw, net send/recv), so it can't be
> application buffer, not same with net rx.

So you borrow the kernel pages, but presumably these are all from
O_DIRECT and have a user mapping?

>>> In concept grouping is simpler because:
>>>
>>> - buffer lifetime is aligned with group leader lifetime, so we needn't
>>> worry buffer leak because of application accidental exit
>>
>> But if it was an application buffer, that would not be a concern.
> 
> Yeah, but storage isn't same with network, here application buffer can't
> support zc.

Maybe I'm dense, but can you expand on why that's the case?

>> I do like the concept of the ephemeral buffer, the downside is that we
>> need per-op support for it too. And while I'm not totally against doing
> 
> Can you explain per-op support a bit?
> 
> Now the buffer has been provided by one single uring command.

I mean the need to do:

+	if (req->flags & REQ_F_GROUP_KBUF) {
+		ret = io_import_group_kbuf(req, rw->addr, rw->len, ITER_SOURCE,
+				&io->iter);
+		if (unlikely(ret))
+			return ret;
+	}

for picking such a buffer.

>> that, it would be lovely if we could utilize and existing mechanism for
>> that rather than add another one.
> 
> If existing mechanism can cover everything, our linux may not progress any
> more.

That's not what I mean at all. We already have essentially three ways to
get a buffer destination for IO:

1) Just pass in an uaddr+len or an iovec
2) Set ->buf_index, the op needs to support this separately to grab a
   registered buffer for IO.
3) For pollable stuff, provided buffers, either via the ring or the
   legacy/classic approach.

This adds a 4th method, which shares the characteristics of 2+3 that the
op needs to support it. This is the whole motivation to poke at having a
way to use the normal registered buffer table for this, because then
this falls into method 2 above.

I'm not at all saying "oh we can't add this new feature", the only thing
I'm addressing is HOW we do that. I don't think anybody disagrees that
we need zero copy for ublk, and honestly I would love to see that sooner
rather than later!

>>>>>> There are certainly many different ways that can get propagated which
>>>>>> would not entail a complicated mechanism. I really like the aspect of
>>>>>> having the identifier being the same thing that we already use, and
>>>>>> hence not needing to be something new on the side.
>>>>>>
>>>>>>> Also multiple OPs may consume the buffer concurrently, which can't be
>>>>>>> supported by buffer select.
>>>>>>
>>>>>> Why not? You can certainly have multiple ops using the same registered
>>>>>> buffer concurrently right now.
>>>>>
>>>>> Please see the above problem.
>>>>>
>>>>> Also I remember that the selected buffer is removed from buffer list,
>>>>> see io_provided_buffer_select(), but maybe I am wrong.
>>>>
>>>> You're mixing up provided and registered buffers. Provided buffers are
>>>> ones that the applications gives to the kernel, and the kernel grabs and
>>>> consumes them. Then the application replenishes, repeat.
>>>>
>>>> Registered buffers are entirely different, those are registered with the
>>>> kernel and we can do things like pre-gup the pages so we don't have to
>>>> do them for every IO. They are entirely persistent, any multiple ops can
>>>> keep using them, concurrently. They don't get consumed by an IO like
>>>> provided buffers, they remain in place until they get unregistered (or
>>>> updated, like my patch) at some point.
>>>
>>> I know the difference.
>>
>> But io_provided_buffer_select() has nothing to do with registered/fixed
>> buffers or this use case, the above "remove from buffer list" is an
>> entirely different buffer concept. So there's some confusion here, just
>> wanted to make that clear.
>>
>>> The thing is that here we can't register the kernel buffer in ->prep(),
>>> and it has to be provided in ->issue() of uring command. That is similar
>>> with provided buffer.
>>
>> What's preventing it from registering it in ->prep()? It would be a bit
>> odd, but there would be nothing preventing it codewise, outside of the
>> oddity of ->prep() not being idempotent at that point. Don't follow why
>> that would be necessary, though, can you expand?
> 
> ->prep() doesn't export to uring cmd, and we may not want to bother
> drivers.

Sure, we don't want it off ->uring_cmd() or anything like that.

> Also remove buffer still can't be done in ->prep().

I mean, technically it could... Same restrictions as add, however.

> Not dig into further, one big thing could be that dependency isn't
> respected in ->prep().

This is the main thing I was considering, because there's nothing
preventing it from happening outside of the fact that it makes ->prep()
not idempotent. Which is a big enough reason already, but...
Ming Lei Oct. 12, 2024, 3:35 a.m. UTC | #15
On Fri, Oct 11, 2024 at 10:49:10AM -0600, Jens Axboe wrote:
> On 10/11/24 9:45 AM, Ming Lei wrote:
> > On Fri, Oct 11, 2024 at 08:41:03AM -0600, Jens Axboe wrote:
> >> On 10/11/24 8:20 AM, Ming Lei wrote:
> >>> On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
> >>>> On 10/10/24 9:07 PM, Ming Lei wrote:
> >>>>> On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
> >>>>>> On 10/10/24 8:30 PM, Ming Lei wrote:
> >>>>>>> Hi Jens,
> >>>>>>>
> >>>>>>> On Thu, Oct 10, 2024 at 01:31:21PM -0600, Jens Axboe wrote:
> >>>>>>>> Hi,
> >>>>>>>>
> >>>>>>>> Discussed this with Pavel, and on his suggestion, I tried prototyping a
> >>>>>>>> "buffer update" opcode. Basically it works like
> >>>>>>>> IORING_REGISTER_BUFFERS_UPDATE in that it can update an existing buffer
> >>>>>>>> registration. But it works as an sqe rather than being a sync opcode.
> >>>>>>>>
> >>>>>>>> The idea here is that you could do that upfront, or as part of a chain,
> >>>>>>>> and have it be generically available, just like any other buffer that
> >>>>>>>> was registered upfront. You do need an empty table registered first,
> >>>>>>>> which can just be sparse. And since you can pick the slot it goes into,
> >>>>>>>> you can rely on that slot afterwards (either as a link, or just the
> >>>>>>>> following sqe).
> >>>>>>>>
> >>>>>>>> Quick'n dirty obviously, but I did write a quick test case too to verify
> >>>>>>>> that:
> >>>>>>>>
> >>>>>>>> 1) It actually works (it seems to)
> >>>>>>>
> >>>>>>> It doesn't work for ublk zc since ublk needs to provide one kernel buffer
> >>>>>>> for fs rw & net send/recv to consume, and the kernel buffer is invisible
> >>>>>>> to userspace. But  __io_register_rsrc_update() only can register userspace
> >>>>>>> buffer.
> >>>>>>
> >>>>>> I'd be surprised if this simple one was enough! In terms of user vs
> >>>>>> kernel buffer, you could certainly use the same mechanism, and just
> >>>>>> ensure that buffers are tagged appropriately. I need to think about that
> >>>>>> a little bit.
> >>>>>
> >>>>> It is actually same with IORING_OP_PROVIDE_BUFFERS, so the following
> >>>>> consumer OPs have to wait until this OP_BUF_UPDATE is completed.
> >>>>
> >>>> See below for the registered vs provided buffer confusion that seems to
> >>>> be a confusion issue here.
> >>>>
> >>>>> Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
> >>>>>
> >>>>> 1) all N OPs are linked with OP_BUF_UPDATE
> >>>>>
> >>>>> Or
> >>>>>
> >>>>> 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
> >>>>> OPs concurrently.
> >>>>
> >>>> Correct
> >>>>
> >>>>> But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
> >>>>> and 1 extra syscall is introduced in 2).
> >>>>
> >>>> Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
> >>>> you can just do it upfront. It's not ideal in terms of usage, and I get
> >>>> where the grouping comes from. But is it possible to do the grouping in
> >>>> a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
> >>>
> >>> The most of 'intrusive' change is just on patch 4, and Pavel has commented
> >>> that it is good enough:
> >>>
> >>> https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d
> >>
> >> At least for me, patch 4 looks fine. The problem occurs when you start
> >> needing to support this different buffer type, which is in patch 6. I'm
> >> not saying we can necessarily solve this with OP_BUF_UPDATE, I just want
> >> to explore that path because if we can, then patch 6 turns into "oh
> >> let's just added registered/fixed buffer support to these ops that don't
> >> currently support it". And that would be much nicer indeed.
> > 
> > OK, in my local V7, the buffer type is actually aligned with
> > BUFFER_SELECT from both interface & use viewpoint, since member SQE
> > have three empty flags available.
> > 
> > I will post V7 for review.
> 
> OK, I'll take a look once posted.
> 
> >>>> of the other ops in terms of buffer consumption, they'd just need fixed
> >>>> buffer support and you'd flag the buffer index in sqe->buf_index. And
> >>>> the nice thing about that is that while fixed/registered buffers aren't
> >>>> really used on the networking side yet (as they don't bring any benefit
> >>>> yet), adding support for them could potentially be useful down the line
> >>>> anyway.
> >>>
> >>> With 2), two extra syscalls are added for each ublk IO, one is provide
> >>> buffer, another is remove buffer. The two syscalls have to be sync with
> >>> consumer OPs.
> >>>
> >>> I can understand the concern, but if the change can't improve perf or
> >>> even slow things done, it loses its value.
> >>
> >> It'd be one extra syscall, as the remove can get bundled with the next
> >> add. But your point still stands, yes it will add extra overhead,
> > 
> > It can't be bundled.
> 
> Don't see why not, but let's review v7 and see what comes up.
> 
> > And the kernel buffer is blk-mq's request pages, which is per tag.
> > 
> > Such as, for ublk-target, IO comes to tag 0, after this IO(tag 0) is
> > handled, how can we know if there is new IO comes to tag 0 immediately? :-)
> 
> Gotcha, yeah sounds like that needs to remain a kernel buffer.
> 
> >> although be it pretty darn minimal. I'm actually more concerned with the
> >> complexity for handling it. While the OP_BUF_UPDATE will always
> >> complete immediately, there's no guarantee it's the next cqe you pull
> >> out when peeking post submission.
> >>
> >>>>> The same thing exists in the next OP_BUF_UPDATE which has to wait until
> >>>>> all the previous buffer consumers are done. So the same slow thing are
> >>>>> doubled. Not mention the application will become more complicated.
> >>>>
> >>>> It does not, you can do an update on a buffer that's already inflight.
> >>>
> >>> UPDATE may not match the case, actually two OPs are needed, one is
> >>> provide buffer OP and the other is remove buffer OP, both have to deal
> >>> with the other subsystem(ublk). Remove buffer needs to be done after all
> >>> consumer OPs are done immediately.
> >>
> >> You don't necessarily need the remove. If you always just use the same
> >> slot for these, then the OP_BUF_UPDATE will just update the current
> >> location.
> > 
> > The buffer is per tag, and can't guarantee to be reused immediately,
> > otherwise it isn't zero copy any more.
> 
> Don't follow this one either. As long as reuse keeps existing IO fine,
> then it should be fine? I'm not talking about reusing the buffer, just
> the slot it belongs to.

And both provide/remove buffer OP is dealing with IO buffer with same
unique tag, and the buffer is indexed by one key provided by user, which
is similar with ->buf_index. It is definitely not possible to remove one
old buffer and add new buffer in single command with same key. Another
reason is that we don't know if any new IO(with buffer) comes at that time.

Also anytime there is only 1 inflight IO with same tag in storage world,
there can't be new IO coming on current slot, since the old IO can't
completed until the kernel buffer is removed.

> 
> >>>> would be totally fine in terms of performance. OP_BUF_UPDATE will
> >>>> _always_ completely immediately and inline, which means that it'll
> >>>> _always_ be immediately available post submission. The only think you'd
> >>>> ever have to worry about in terms of failure is a badly formed request,
> >>>> which is a programming issue, or running out of memory on the host.
> >>>>
> >>>>> Also it makes error handling more complicated, io_uring has to remove
> >>>>> the kernel buffer when the current task is exit, dependency or order with
> >>>>> buffer provider is introduced.
> >>>>
> >>>> Why would that be? They belong to the ring, so should be torn down as
> >>>> part of the ring anyway? Why would they be task-private, but not
> >>>> ring-private?
> >>>
> >>> It is kernel buffer, which belongs to provider(such as ublk) instead
> >>> of uring, application may panic any time, then io_uring has to remove
> >>> the buffer for notifying the buffer owner.
> >>
> >> But it could be an application buffer, no? You'd just need the
> >> application to provide it to ublk and have it mapped, rather than have
> >> ublk allocate it in-kernel and then use that.
> > 
> > The buffer is actually kernel 'request/bio' pages of /dev/ublkbN, and now we
> > forward and borrow it to io_uring OPs(fs rw, net send/recv), so it can't be
> > application buffer, not same with net rx.
> 
> So you borrow the kernel pages, but presumably these are all from
> O_DIRECT and have a user mapping?

Yes.

> 
> >>> In concept grouping is simpler because:
> >>>
> >>> - buffer lifetime is aligned with group leader lifetime, so we needn't
> >>> worry buffer leak because of application accidental exit
> >>
> >> But if it was an application buffer, that would not be a concern.
> > 
> > Yeah, but storage isn't same with network, here application buffer can't
> > support zc.
> 
> Maybe I'm dense, but can you expand on why that's the case?

network data can come anytime, so I guess rx buffer has to be provided beforehand,
so it is just one buffer, which can be built from application or kernel.

Storage is client/server model, and data can only come after request
is sent to device, so buffer is prepared with request together before
sending the request, which is built in kernel in current linux, so it
has to be one kernel buffer(bio->bi_bvec).

> 
> >> I do like the concept of the ephemeral buffer, the downside is that we
> >> need per-op support for it too. And while I'm not totally against doing
> > 
> > Can you explain per-op support a bit?
> > 
> > Now the buffer has been provided by one single uring command.
> 
> I mean the need to do:
> 
> +	if (req->flags & REQ_F_GROUP_KBUF) {
> +		ret = io_import_group_kbuf(req, rw->addr, rw->len, ITER_SOURCE,
> +				&io->iter);
> +		if (unlikely(ret))
> +			return ret;
> +	}
> 
> for picking such a buffer.

The above is for starting to consume the buffer, which usage is same
with buffer_select case, in which the buffer still need to be imported.

And this patchset provides this buffer(REQ_F_GROUP_KBUF) in single
uring_cmd.

The use model is basically that we use driver specific commands for
providing & removing the kernel buffer, and the buffer is consumed
by generic io_uring OP with generic interface in group style.

Looks you and Pavel hope that generic buffer provide/remove kernel
buffer OPs can be added from beginning.

> 
> >> that, it would be lovely if we could utilize and existing mechanism for
> >> that rather than add another one.
> > 
> > If existing mechanism can cover everything, our linux may not progress any
> > more.
> 
> That's not what I mean at all. We already have essentially three ways to
> get a buffer destination for IO:
> 
> 1) Just pass in an uaddr+len or an iovec
> 2) Set ->buf_index, the op needs to support this separately to grab a
>    registered buffer for IO.
> 3) For pollable stuff, provided buffers, either via the ring or the
>    legacy/classic approach.
> 
> This adds a 4th method, which shares the characteristics of 2+3 that the
> op needs to support it. This is the whole motivation to poke at having a
> way to use the normal registered buffer table for this, because then
> this falls into method 2 above.

Here the kernel buffer has very short lifetime, and the lifetime is aligned
with block IO request in ublk use case, which is another big difference
with 2#, and could have shorter time than provided buffer too.

As we discussed, there are several disadvantages with existed mechanism
for this use case:

1) it is actually the above 3# provided buffer instead of registered buffer,
because:
- registered buffer is long-live, without dependency problem
- registered buffer can be imported in ->prep()

here the kernel buffer has short lifetime, can't be imported in ->prep()
because of buffer dependency.

2) dependency between consumer OPs and provide buffer & remove buffer,
which adds two extra syscalls for handling each ublk IO; and makes
application to become more complicated.

3) application may exit with exception or panic, uring has to
remove this kernel buffer from table when that happens, but removing
the kernel buffer need to return this buffer to the buffer provider.

4) The existed provide & register buffer needs big change for support to
provide/remove kernel buffer, and the model is more complicated than
group buffer method. The existed buffer use code(import buffer) has to
change too because it isn't real registered buffer. But if we treat it
as provided buffer, that is basically what this patch is doing.

Looks you and Pavel are fine with patch 4, which adds sqe or IO group concept,
just wondering if you may take one step further and consider the group buffer
concept, which is valid only in group wide(local buffer), won't need register
and needn't to be global, and implementation & use is simple.

The cons could be that one new buffer type is added, even BUFFER_SELECT
can be reused, but not flexible.

> 
> I'm not at all saying "oh we can't add this new feature", the only thing
> I'm addressing is HOW we do that. I don't think anybody disagrees that
> we need zero copy for ublk, and honestly I would love to see that sooner
> rather than later!

If fuse will switch to uring_cmd, it may benefit from it too.

The current fuse can only support WRITE zero copy, and READ zc never
gets supported, because of splice/pipe's limit. I discussed with
Miklos, turns out it is one impossible task to support fuse READ zc with
splice.

> 
> >>>>>> There are certainly many different ways that can get propagated which
> >>>>>> would not entail a complicated mechanism. I really like the aspect of
> >>>>>> having the identifier being the same thing that we already use, and
> >>>>>> hence not needing to be something new on the side.
> >>>>>>
> >>>>>>> Also multiple OPs may consume the buffer concurrently, which can't be
> >>>>>>> supported by buffer select.
> >>>>>>
> >>>>>> Why not? You can certainly have multiple ops using the same registered
> >>>>>> buffer concurrently right now.
> >>>>>
> >>>>> Please see the above problem.
> >>>>>
> >>>>> Also I remember that the selected buffer is removed from buffer list,
> >>>>> see io_provided_buffer_select(), but maybe I am wrong.
> >>>>
> >>>> You're mixing up provided and registered buffers. Provided buffers are
> >>>> ones that the applications gives to the kernel, and the kernel grabs and
> >>>> consumes them. Then the application replenishes, repeat.
> >>>>
> >>>> Registered buffers are entirely different, those are registered with the
> >>>> kernel and we can do things like pre-gup the pages so we don't have to
> >>>> do them for every IO. They are entirely persistent, any multiple ops can
> >>>> keep using them, concurrently. They don't get consumed by an IO like
> >>>> provided buffers, they remain in place until they get unregistered (or
> >>>> updated, like my patch) at some point.
> >>>
> >>> I know the difference.
> >>
> >> But io_provided_buffer_select() has nothing to do with registered/fixed
> >> buffers or this use case, the above "remove from buffer list" is an
> >> entirely different buffer concept. So there's some confusion here, just
> >> wanted to make that clear.
> >>
> >>> The thing is that here we can't register the kernel buffer in ->prep(),
> >>> and it has to be provided in ->issue() of uring command. That is similar
> >>> with provided buffer.
> >>
> >> What's preventing it from registering it in ->prep()? It would be a bit
> >> odd, but there would be nothing preventing it codewise, outside of the
> >> oddity of ->prep() not being idempotent at that point. Don't follow why
> >> that would be necessary, though, can you expand?
> > 
> > ->prep() doesn't export to uring cmd, and we may not want to bother
> > drivers.
> 
> Sure, we don't want it off ->uring_cmd() or anything like that.
> 
> > Also remove buffer still can't be done in ->prep().
> 
> I mean, technically it could... Same restrictions as add, however.
> 
> > Not dig into further, one big thing could be that dependency isn't
> > respected in ->prep().
> 
> This is the main thing I was considering, because there's nothing
> preventing it from happening outside of the fact that it makes ->prep()
> not idempotent. Which is a big enough reason already, but...

It depends on if the OP need to support IO_LINK, if yes, it can't be
done in ->prep(), otherwise the link rule is broken.

But here IO_LINK is important, because buffer dependency really exists,
IMO, we shouldn't put the limit on this OP from user viewpoint.


Thanks
Ming
Pavel Begunkov Oct. 14, 2024, 6:40 p.m. UTC | #16
On 10/11/24 16:45, Ming Lei wrote:
> On Fri, Oct 11, 2024 at 08:41:03AM -0600, Jens Axboe wrote:
>> On 10/11/24 8:20 AM, Ming Lei wrote:
>>> On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
>>>> On 10/10/24 9:07 PM, Ming Lei wrote:
>>>>> On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
>>>>>> On 10/10/24 8:30 PM, Ming Lei wrote:
>>>>>>> Hi Jens,
...
>>>>> Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
>>>>>
>>>>> 1) all N OPs are linked with OP_BUF_UPDATE
>>>>>
>>>>> Or
>>>>>
>>>>> 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
>>>>> OPs concurrently.
>>>>
>>>> Correct
>>>>
>>>>> But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
>>>>> and 1 extra syscall is introduced in 2).
>>>>
>>>> Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
>>>> you can just do it upfront. It's not ideal in terms of usage, and I get
>>>> where the grouping comes from. But is it possible to do the grouping in
>>>> a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
>>>
>>> The most of 'intrusive' change is just on patch 4, and Pavel has commented
>>> that it is good enough:
>>>
>>> https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d

Trying to catch up on the thread. I do think the patch is tolerable and
mergeable, but I do it adds quite a bit of complication to the path if
you try to have a map in what state a request can be and what
dependencies are there, and then patches after has to go to every each
io_uring opcode and add support for leased buffers. And I'm afraid
that we'll also need to feedback from completion of those to let
the buffer know what ranges now has data / initialised. One typical
problem for page flipping rx, for example, is that you need to have
a full page of data to map it, otherwise it should be prezeroed,
which is too expensive, same problem you can have without mmap'ing
and directly exposing pages to the user.

>> At least for me, patch 4 looks fine. The problem occurs when you start
>> needing to support this different buffer type, which is in patch 6. I'm
>> not saying we can necessarily solve this with OP_BUF_UPDATE, I just want
>> to explore that path because if we can, then patch 6 turns into "oh
>> let's just added registered/fixed buffer support to these ops that don't
>> currently support it". And that would be much nicer indeed.
...
>>>> would be totally fine in terms of performance. OP_BUF_UPDATE will
>>>> _always_ completely immediately and inline, which means that it'll
>>>> _always_ be immediately available post submission. The only think you'd
>>>> ever have to worry about in terms of failure is a badly formed request,
>>>> which is a programming issue, or running out of memory on the host.
>>>>
>>>>> Also it makes error handling more complicated, io_uring has to remove
>>>>> the kernel buffer when the current task is exit, dependency or order with
>>>>> buffer provider is introduced.
>>>>
>>>> Why would that be? They belong to the ring, so should be torn down as
>>>> part of the ring anyway? Why would they be task-private, but not
>>>> ring-private?
>>>
>>> It is kernel buffer, which belongs to provider(such as ublk) instead
>>> of uring, application may panic any time, then io_uring has to remove
>>> the buffer for notifying the buffer owner.
>>
>> But it could be an application buffer, no? You'd just need the
>> application to provide it to ublk and have it mapped, rather than have
>> ublk allocate it in-kernel and then use that.
> 
> The buffer is actually kernel 'request/bio' pages of /dev/ublkbN, and now we
> forward and borrow it to io_uring OPs(fs rw, net send/recv), so it can't be
> application buffer, not same with net rx.

I don't see any problem in dropping buffers from the table
on exit, we have a lot of stuff a thread does for io_uring
when it exits.


>>> In concept grouping is simpler because:
>>>
>>> - buffer lifetime is aligned with group leader lifetime, so we needn't
>>> worry buffer leak because of application accidental exit
>>
>> But if it was an application buffer, that would not be a concern.
> 
> Yeah, but storage isn't same with network, here application buffer can't
> support zc.

Maybe I missed how it came to app buffers, but the thing I
initially mentioned is about storing the kernel buffer in
the table, without any user pointers and user buffers.

>>> - the buffer is borrowed to consumer OPs, and returned back after all
>>> consumers are done, this way avoids any dependency
>>>
>>> Meantime OP_BUF_UPDATE(provide buffer OP, remove buffer OP) becomes more
>>> complicated:
>>>
>>> - buffer leak because of app panic

Then io_uring dies and releases buffers. Or we can even add
some code removing it, as mentioned, any task that has ever
submitted a request already runs some io_uring code on exit.

>>> - buffer dependency issue: consumer OPs depend on provide buffer OP,
>>> 	remove buffer OP depends on consumer OPs; two syscalls has to be
>>> 	added for handling single ublk IO.
>>
>> Seems like most of this is because of the kernel buffer too, no?
> 
> Yeah.
> 
>>
>> I do like the concept of the ephemeral buffer, the downside is that we
>> need per-op support for it too. And while I'm not totally against doing
> 
> Can you explain per-op support a bit?
> 
> Now the buffer has been provided by one single uring command.
> 
>> that, it would be lovely if we could utilize and existing mechanism for
>> that rather than add another one.

That would also be more flexible as not everything can be
handled by linked request logic, and wouldn't require hacking
into every each request type to support "consuming" leased
buffers.

Overhead wise, let's say we fix buffer binding order and delay it
as elaborated on below, then you can provide a buffer and link a
consumer (e.g. send request or anything else) just as you do
it now. You can also link a request returning the buffer to the
same chain if you don't need extra flexibility.

As for groups, they're complicated because of the order inversion,
the notion of a leader and so. If we get rid of the need to impose
more semantics onto it by mediating buffer transition through the
table, I think we can do groups if needed but make it simpler.

>> What's preventing it from registering it in ->prep()? It would be a bit
>> odd, but there would be nothing preventing it codewise, outside of the
>> oddity of ->prep() not being idempotent at that point. Don't follow why
>> that would be necessary, though, can you expand?
> 
> ->prep() doesn't export to uring cmd, and we may not want to bother
> drivers.
> 
> Also remove buffer still can't be done in ->prep().
> 
> Not dig into further, one big thing could be that dependency isn't
> respected in ->prep().

And we can just fix that and move the choosing of a buffer
to ->issue(), in which case a buffer provided by one request
will be observable to its linked requests.
Ming Lei Oct. 15, 2024, 11:05 a.m. UTC | #17
On Mon, Oct 14, 2024 at 07:40:40PM +0100, Pavel Begunkov wrote:
> On 10/11/24 16:45, Ming Lei wrote:
> > On Fri, Oct 11, 2024 at 08:41:03AM -0600, Jens Axboe wrote:
> > > On 10/11/24 8:20 AM, Ming Lei wrote:
> > > > On Fri, Oct 11, 2024 at 07:24:27AM -0600, Jens Axboe wrote:
> > > > > On 10/10/24 9:07 PM, Ming Lei wrote:
> > > > > > On Thu, Oct 10, 2024 at 08:39:12PM -0600, Jens Axboe wrote:
> > > > > > > On 10/10/24 8:30 PM, Ming Lei wrote:
> > > > > > > > Hi Jens,
> ...
> > > > > > Suppose we have N consumers OPs which depends on OP_BUF_UPDATE.
> > > > > > 
> > > > > > 1) all N OPs are linked with OP_BUF_UPDATE
> > > > > > 
> > > > > > Or
> > > > > > 
> > > > > > 2) submit OP_BUF_UPDATE first, and wait its completion, then submit N
> > > > > > OPs concurrently.
> > > > > 
> > > > > Correct
> > > > > 
> > > > > > But 1) and 2) may slow the IO handing.  In 1) all N OPs are serialized,
> > > > > > and 1 extra syscall is introduced in 2).
> > > > > 
> > > > > Yes you don't want do do #1. But the OP_BUF_UPDATE is cheap enough that
> > > > > you can just do it upfront. It's not ideal in terms of usage, and I get
> > > > > where the grouping comes from. But is it possible to do the grouping in
> > > > > a less intrusive fashion with OP_BUF_UPDATE? Because it won't change any
> > > > 
> > > > The most of 'intrusive' change is just on patch 4, and Pavel has commented
> > > > that it is good enough:
> > > > 
> > > > https://lore.kernel.org/linux-block/ZwZzsPcXyazyeZnu@fedora/T/#m551e94f080b80ccbd2561e01da5ea8e17f7ee15d
> 
> Trying to catch up on the thread. I do think the patch is tolerable and
> mergeable, but I do it adds quite a bit of complication to the path if
> you try to have a map in what state a request can be and what

I admit that sqe group adds a little complexity to the submission &
completion code, especially dealing with completion code.

But with your help, patch 4 has become easy to follow and sqe group
is well-defined now, and it does add new feature of N:M dependency,
otherwise one extra syscall is required for supporting N:M dependency,
this way not only saves one syscall, but also simplify application.

> dependencies are there, and then patches after has to go to every each
> io_uring opcode and add support for leased buffers. And I'm afraid

Only fast IO(net, fs) needs it, not see other OPs for such support.

> that we'll also need to feedback from completion of those to let
> the buffer know what ranges now has data / initialised. One typical
> problem for page flipping rx, for example, is that you need to have
> a full page of data to map it, otherwise it should be prezeroed,
> which is too expensive, same problem you can have without mmap'ing
> and directly exposing pages to the user.

From current design, the callback is only for returning the leased
buffer to owner, and we just need io_uring to do the favor for driver
by running aio with the leased buffer.

It can becomes quite complicated if we add feedback from completion.

Your catch on short read/recv is good, which may leak kernel
data, the problem exists on any other approach(provide kbuf) too, the
point is that it is kernel buffer, what do you think of the
following approach?

diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index d72a6bbbbd12..c1bc4179b390 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -242,4 +242,14 @@ static inline void io_drop_leased_grp_kbuf(struct io_kiocb *req)
 	if (gbuf)
 		gbuf->grp_kbuf_ack(gbuf);
 }
+
+/* zero remained bytes of kernel buffer for avoiding to leak data */
+static inline void io_req_zero_remained(struct io_kiocb *req, struct iov_iter *iter)
+{
+	size_t left = iov_iter_count(iter);
+
+	printk("iter type %d, left %lu\n", iov_iter_rw(iter), left);
+	if (iov_iter_rw(iter) == READ && left > 0)
+		iov_iter_zero(left, iter);
+}
 #endif
diff --git a/io_uring/net.c b/io_uring/net.c
index 6c32be92646f..022d81b6fc65 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -899,6 +899,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 		*ret = IOU_STOP_MULTISHOT;
 	else
 		*ret = IOU_OK;
+	if (io_use_leased_grp_kbuf(req))
+		io_req_zero_remained(req, &kmsg->msg.msg_iter);
 	io_req_msg_cleanup(req, issue_flags);
 	return true;
 }
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 76a443fa593c..565b0e742ee5 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -479,6 +479,11 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 		}
 		req_set_fail(req);
 		req->cqe.res = res;
+		if (io_use_leased_grp_kbuf(req)) {
+			struct io_async_rw *io = req->async_data;
+
+			io_req_zero_remained(req, &io->iter);
+		}
 	}
 	return false;
 }

> 
> > > At least for me, patch 4 looks fine. The problem occurs when you start
> > > needing to support this different buffer type, which is in patch 6. I'm
> > > not saying we can necessarily solve this with OP_BUF_UPDATE, I just want
> > > to explore that path because if we can, then patch 6 turns into "oh
> > > let's just added registered/fixed buffer support to these ops that don't
> > > currently support it". And that would be much nicer indeed.
> ...
> > > > > would be totally fine in terms of performance. OP_BUF_UPDATE will
> > > > > _always_ completely immediately and inline, which means that it'll
> > > > > _always_ be immediately available post submission. The only think you'd
> > > > > ever have to worry about in terms of failure is a badly formed request,
> > > > > which is a programming issue, or running out of memory on the host.
> > > > > 
> > > > > > Also it makes error handling more complicated, io_uring has to remove
> > > > > > the kernel buffer when the current task is exit, dependency or order with
> > > > > > buffer provider is introduced.
> > > > > 
> > > > > Why would that be? They belong to the ring, so should be torn down as
> > > > > part of the ring anyway? Why would they be task-private, but not
> > > > > ring-private?
> > > > 
> > > > It is kernel buffer, which belongs to provider(such as ublk) instead
> > > > of uring, application may panic any time, then io_uring has to remove
> > > > the buffer for notifying the buffer owner.
> > > 
> > > But it could be an application buffer, no? You'd just need the
> > > application to provide it to ublk and have it mapped, rather than have
> > > ublk allocate it in-kernel and then use that.
> > 
> > The buffer is actually kernel 'request/bio' pages of /dev/ublkbN, and now we
> > forward and borrow it to io_uring OPs(fs rw, net send/recv), so it can't be
> > application buffer, not same with net rx.
> 
> I don't see any problem in dropping buffers from the table
> on exit, we have a lot of stuff a thread does for io_uring
> when it exits.

io_uring cancel handling has been complicated enough, now uring
command have two cancel code paths if provide kernel buffer is
added:

1) io_uring_try_cancel_uring_cmd()

2) the kernel buffer cancel code path

There might be dependency for the two.

> 
> 
> > > > In concept grouping is simpler because:
> > > > 
> > > > - buffer lifetime is aligned with group leader lifetime, so we needn't
> > > > worry buffer leak because of application accidental exit
> > > 
> > > But if it was an application buffer, that would not be a concern.
> > 
> > Yeah, but storage isn't same with network, here application buffer can't
> > support zc.
> 
> Maybe I missed how it came to app buffers, but the thing I
> initially mentioned is about storing the kernel buffer in
> the table, without any user pointers and user buffers.

Yeah, just some random words, please ignore it.

> 
> > > > - the buffer is borrowed to consumer OPs, and returned back after all
> > > > consumers are done, this way avoids any dependency
> > > > 
> > > > Meantime OP_BUF_UPDATE(provide buffer OP, remove buffer OP) becomes more
> > > > complicated:
> > > > 
> > > > - buffer leak because of app panic
> 
> Then io_uring dies and releases buffers. Or we can even add
> some code removing it, as mentioned, any task that has ever
> submitted a request already runs some io_uring code on exit.
> 
> > > > - buffer dependency issue: consumer OPs depend on provide buffer OP,
> > > > 	remove buffer OP depends on consumer OPs; two syscalls has to be
> > > > 	added for handling single ublk IO.
> > > 
> > > Seems like most of this is because of the kernel buffer too, no?
> > 
> > Yeah.
> > 
> > > 
> > > I do like the concept of the ephemeral buffer, the downside is that we
> > > need per-op support for it too. And while I'm not totally against doing
> > 
> > Can you explain per-op support a bit?
> > 
> > Now the buffer has been provided by one single uring command.
> > 
> > > that, it would be lovely if we could utilize and existing mechanism for
> > > that rather than add another one.
> 
> That would also be more flexible as not everything can be
> handled by linked request logic, and wouldn't require hacking
> into every each request type to support "consuming" leased
> buffers.

I guess you mean 'consuming' the code added in net.c and rw.c, which
can't be avoided, because it is kernel buffer, and we are supporting
it first time:

- there isn't userspace address, not like buffer select & fixed buffer
- the kernel buffer has to be returned to the provider
- the buffer has to be imported in ->issue(), can't be done in ->prep()
- short read/recv has to be dealt with

> 
> Overhead wise, let's say we fix buffer binding order and delay it
> as elaborated on below, then you can provide a buffer and link a
> consumer (e.g. send request or anything else) just as you do
> it now. You can also link a request returning the buffer to the
> same chain if you don't need extra flexibility.
> 
> As for groups, they're complicated because of the order inversion,

IMO, group complication only exists in the completion side, fortunately
it is well defined now.

buffer and table causes more complicated application, with bad
performance:

- two syscalls(uring_enter trips) are added for each ublk IO
- one extra request is added(group needs 2 requests, and add buffer
needs 3 requests for the simples case), then bigger SQ & CQ size
- extra cancel handling

group simplifies buffer lifetime a lot, since io_uring needn't to
care it at all.

> the notion of a leader and so. If we get rid of the need to impose
> more semantics onto it by mediating buffer transition through the
> table, I think we can do groups if needed but make it simpler.

The situation is just that driver leases the buffer to io_uring, not
have to transfer it to io_uring. Once it is added to table, it has to
be removed from table.

It is just like local variable vs global variable, the latter is more
complicated to use.

> 
> > > What's preventing it from registering it in ->prep()? It would be a bit
> > > odd, but there would be nothing preventing it codewise, outside of the
> > > oddity of ->prep() not being idempotent at that point. Don't follow why
> > > that would be necessary, though, can you expand?
> > 
> > ->prep() doesn't export to uring cmd, and we may not want to bother
> > drivers.
> > 
> > Also remove buffer still can't be done in ->prep().
> > 
> > Not dig into further, one big thing could be that dependency isn't
> > respected in ->prep().
> 
> And we can just fix that and move the choosing of a buffer
> to ->issue(), in which case a buffer provided by one request
> will be observable to its linked requests.

This patch does import buffer in ->issue(), as I explained to Jens:

- either all OPs are linked together with add_kbuf  & remove_kbuf, then
all OPs can't be issued concurrently

- or two syscalls are added for handling single ublk IO

The two are not great from performance viewpoint, but also complicates
application.

I don't think the above two can be avoided, or can you explain how to
do it?


thanks,
Ming
diff mbox series

Patch

diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 447fbfd32215..fde3a2ec7d9a 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -48,6 +48,8 @@  void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
 		unsigned int issue_flags);
 
+int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
+		const struct io_uring_kernel_buf *grp_kbuf);
 #else
 static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
 			      struct iov_iter *iter, void *ioucmd)
@@ -67,6 +69,11 @@  static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
 		unsigned int issue_flags)
 {
 }
+static inline int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
+		const struct io_uring_kernel_buf *grp_kbuf)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 /*
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 2af32745ebd3..11985eeac10e 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -271,9 +271,14 @@  enum io_uring_op {
  * sqe->uring_cmd_flags		top 8bits aren't available for userspace
  * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
  *				along with setting sqe->buf_index.
+ * IORING_PROVIDE_GROUP_KBUF	this command provides group kernel buffer
+ *				for member requests which can retrieve
+ *				any sub-buffer with offset(sqe->addr) and
+ *				len(sqe->len)
  */
 #define IORING_URING_CMD_FIXED	(1U << 0)
-#define IORING_URING_CMD_MASK	IORING_URING_CMD_FIXED
+#define IORING_PROVIDE_GROUP_KBUF	(1U << 1)
+#define IORING_URING_CMD_MASK	(IORING_URING_CMD_FIXED | IORING_PROVIDE_GROUP_KBUF)
 
 
 /*
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 8391c7c7c1ec..ac92ba70de9d 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -15,6 +15,7 @@ 
 #include "alloc_cache.h"
 #include "rsrc.h"
 #include "uring_cmd.h"
+#include "kbuf.h"
 
 static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
 {
@@ -175,6 +176,26 @@  void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
+/*
+ * Provide kernel buffer for sqe group members to consume, and the caller
+ * has to guarantee that the provided buffer and the callback are valid
+ * until the callback is called.
+ */
+int io_uring_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
+		const struct io_uring_kernel_buf *grp_kbuf)
+{
+	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+	if (unlikely(!(ioucmd->flags & IORING_PROVIDE_GROUP_KBUF)))
+		return -EINVAL;
+
+	if (unlikely(!req_support_group_dep(req)))
+		return -EINVAL;
+
+	return io_provide_group_kbuf(req, grp_kbuf);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_provide_kbuf);
+
 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
 				   const struct io_uring_sqe *sqe)
 {
@@ -207,6 +228,13 @@  int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (ioucmd->flags & ~IORING_URING_CMD_MASK)
 		return -EINVAL;
 
+	if (ioucmd->flags & IORING_PROVIDE_GROUP_KBUF) {
+		/* LEADER flag isn't set yet, so check GROUP only */
+		if (!(req->flags & REQ_F_SQE_GROUP))
+			return -EINVAL;
+		req->flags |= REQ_F_SQE_GROUP_DEP;
+	}
+
 	if (ioucmd->flags & IORING_URING_CMD_FIXED) {
 		struct io_ring_ctx *ctx = req->ctx;
 		u16 index;