diff mbox series

[v4,1/3] vduse: avoid using __GFP_NOFAIL

Message ID 20240830202823.21478-2-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series mm/vdpa: correct misuse of non-direct-reclaim __GFP_NOFAIL and improve related doc and warn | expand

Commit Message

Barry Song Aug. 30, 2024, 8:28 p.m. UTC
From: Jason Wang <jasowang@redhat.com>

mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
persisting in providing __GFP_NOFAIL services for non-block users
who cannot perform direct memory reclaim may only result in an
endless busy loop.

Therefore, in such cases, the current mm-core may directly return
a NULL pointer:

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
{
        ...
        if (gfp_mask & __GFP_NOFAIL) {
                /*
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
                if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
                ...
        }
        ...
fail:
        warn_alloc(gfp_mask, ac->nodemask,
                        "page allocation failure: order:%u", order);
got_pg:
        return page;
}

Unfortuantely, vpda does that nofail allocation under non-sleepable
lock. A possible way to fix that is to move the pages allocation out
of the lock into the caller, but having to allocate a huge number of
pages and auxiliary page array seems to be problematic as well per
Tetsuon: " You should implement proper error handling instead of
using __GFP_NOFAIL if count can become large."

So I choose another way, which does not release kernel bounce pages
when user tries to register userspace bounce pages. Then we can
avoid allocating in paths where failure is not expected.(e.g in
the release). We pay this for more memory usage as we don't release
kernel bounce pages but further optimizations could be done on top.

Fixes: 6c77ed22880d ("vduse: Support using userspace pages as bounce buffer")
Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
Tested-by: Xie Yongji <xieyongji@bytedance.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
[v-songbaohua@oppo.com: Refine the changelog]
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 drivers/vdpa/vdpa_user/iova_domain.c | 19 +++++++++++--------
 drivers/vdpa/vdpa_user/iova_domain.h |  1 +
 2 files changed, 12 insertions(+), 8 deletions(-)

Comments

David Hildenbrand Sept. 2, 2024, 7:33 a.m. UTC | #1
On 30.08.24 22:28, Barry Song wrote:
> From: Jason Wang <jasowang@redhat.com>
> 
> mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> persisting in providing __GFP_NOFAIL services for non-block users
> who cannot perform direct memory reclaim may only result in an
> endless busy loop.
> 
> Therefore, in such cases, the current mm-core may directly return
> a NULL pointer:
> 
> static inline struct page *
> __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>                                                  struct alloc_context *ac)
> {
>          ...
>          if (gfp_mask & __GFP_NOFAIL) {
>                  /*
>                   * All existing users of the __GFP_NOFAIL are blockable, so warn
>                   * of any new users that actually require GFP_NOWAIT
>                   */
>                  if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>                          goto fail;
>                  ...
>          }
>          ...
> fail:
>          warn_alloc(gfp_mask, ac->nodemask,
>                          "page allocation failure: order:%u", order);
> got_pg:
>          return page;
> }
> 
> Unfortuantely, vpda does that nofail allocation under non-sleepable
> lock. A possible way to fix that is to move the pages allocation out
> of the lock into the caller, but having to allocate a huge number of
> pages and auxiliary page array seems to be problematic as well per
> Tetsuon: " You should implement proper error handling instead of
> using __GFP_NOFAIL if count can become large."
> 
> So I choose another way, which does not release kernel bounce pages
> when user tries to register userspace bounce pages. Then we can
> avoid allocating in paths where failure is not expected.(e.g in
> the release). We pay this for more memory usage as we don't release
> kernel bounce pages but further optimizations could be done on top.
> 
> Fixes: 6c77ed22880d ("vduse: Support using userspace pages as bounce buffer")
> Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
> Tested-by: Xie Yongji <xieyongji@bytedance.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> [v-songbaohua@oppo.com: Refine the changelog]
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>   drivers/vdpa/vdpa_user/iova_domain.c | 19 +++++++++++--------
>   drivers/vdpa/vdpa_user/iova_domain.h |  1 +
>   2 files changed, 12 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> index 791d38d6284c..58116f89d8da 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> @@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
>   				enum dma_data_direction dir)
>   {
>   	struct vduse_bounce_map *map;
> +	struct page *page;
>   	unsigned int offset;
>   	void *addr;
>   	size_t sz;
> @@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
>   			    map->orig_phys == INVALID_PHYS_ADDR))
>   			return;
>   
> -		addr = kmap_local_page(map->bounce_page);
> +		page = domain->user_bounce_pages ?
> +		       map->user_bounce_page : map->bounce_page;
> +
> +		addr = kmap_local_page(page);
>   		do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
>   		kunmap_local(addr);
>   		size -= sz;
> @@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
>   				memcpy_to_page(pages[i], 0,
>   					       page_address(map->bounce_page),
>   					       PAGE_SIZE);
> -			__free_page(map->bounce_page);
>   		}
> -		map->bounce_page = pages[i];
> +		map->user_bounce_page = pages[i];
>   		get_page(pages[i]);
>   	}
>   	domain->user_bounce_pages = true;
> @@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
>   		struct page *page = NULL;
>   
>   		map = &domain->bounce_maps[i];
> -		if (WARN_ON(!map->bounce_page))
> +		if (WARN_ON(!map->user_bounce_page))
>   			continue;
>   
>   		/* Copy user page to kernel page if it's in use */
>   		if (map->orig_phys != INVALID_PHYS_ADDR) {
> -			page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> +			page = map->bounce_page;

Why don't we need a kmap_local_page(map->bounce_page) here, but we might 
perform one / have performed one in vduse_domain_bounce?

Maybe we should simply use

memcpy_page(map->bounce_page, 0, map->user_bounce_page, 0, PAGE_SIZE)

?
Jason Wang Sept. 2, 2024, 7:58 a.m. UTC | #2
On Mon, Sep 2, 2024 at 3:33 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 30.08.24 22:28, Barry Song wrote:
> > From: Jason Wang <jasowang@redhat.com>
> >
> > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > persisting in providing __GFP_NOFAIL services for non-block users
> > who cannot perform direct memory reclaim may only result in an
> > endless busy loop.
> >
> > Therefore, in such cases, the current mm-core may directly return
> > a NULL pointer:
> >
> > static inline struct page *
> > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> >                                                  struct alloc_context *ac)
> > {
> >          ...
> >          if (gfp_mask & __GFP_NOFAIL) {
> >                  /*
> >                   * All existing users of the __GFP_NOFAIL are blockable, so warn
> >                   * of any new users that actually require GFP_NOWAIT
> >                   */
> >                  if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> >                          goto fail;
> >                  ...
> >          }
> >          ...
> > fail:
> >          warn_alloc(gfp_mask, ac->nodemask,
> >                          "page allocation failure: order:%u", order);
> > got_pg:
> >          return page;
> > }
> >
> > Unfortuantely, vpda does that nofail allocation under non-sleepable
> > lock. A possible way to fix that is to move the pages allocation out
> > of the lock into the caller, but having to allocate a huge number of
> > pages and auxiliary page array seems to be problematic as well per
> > Tetsuon: " You should implement proper error handling instead of
> > using __GFP_NOFAIL if count can become large."
> >
> > So I choose another way, which does not release kernel bounce pages
> > when user tries to register userspace bounce pages. Then we can
> > avoid allocating in paths where failure is not expected.(e.g in
> > the release). We pay this for more memory usage as we don't release
> > kernel bounce pages but further optimizations could be done on top.
> >
> > Fixes: 6c77ed22880d ("vduse: Support using userspace pages as bounce buffer")
> > Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
> > Tested-by: Xie Yongji <xieyongji@bytedance.com>
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > [v-songbaohua@oppo.com: Refine the changelog]
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > ---
> >   drivers/vdpa/vdpa_user/iova_domain.c | 19 +++++++++++--------
> >   drivers/vdpa/vdpa_user/iova_domain.h |  1 +
> >   2 files changed, 12 insertions(+), 8 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > index 791d38d6284c..58116f89d8da 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > @@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
> >                               enum dma_data_direction dir)
> >   {
> >       struct vduse_bounce_map *map;
> > +     struct page *page;
> >       unsigned int offset;
> >       void *addr;
> >       size_t sz;
> > @@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
> >                           map->orig_phys == INVALID_PHYS_ADDR))
> >                       return;
> >
> > -             addr = kmap_local_page(map->bounce_page);
> > +             page = domain->user_bounce_pages ?
> > +                    map->user_bounce_page : map->bounce_page;
> > +
> > +             addr = kmap_local_page(page);
> >               do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
> >               kunmap_local(addr);
> >               size -= sz;
> > @@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> >                               memcpy_to_page(pages[i], 0,
> >                                              page_address(map->bounce_page),
> >                                              PAGE_SIZE);
> > -                     __free_page(map->bounce_page);
> >               }
> > -             map->bounce_page = pages[i];
> > +             map->user_bounce_page = pages[i];
> >               get_page(pages[i]);
> >       }
> >       domain->user_bounce_pages = true;
> > @@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> >               struct page *page = NULL;
> >
> >               map = &domain->bounce_maps[i];
> > -             if (WARN_ON(!map->bounce_page))
> > +             if (WARN_ON(!map->user_bounce_page))
> >                       continue;
> >
> >               /* Copy user page to kernel page if it's in use */
> >               if (map->orig_phys != INVALID_PHYS_ADDR) {
> > -                     page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> > +                     page = map->bounce_page;
>
> Why don't we need a kmap_local_page(map->bounce_page) here, but we might
> perform one / have performed one in vduse_domain_bounce?

I think it's another bug that needs to be fixed.

Yongji, do you want to fix this?

Thanks

>
> Maybe we should simply use
>
> memcpy_page(map->bounce_page, 0, map->user_bounce_page, 0, PAGE_SIZE)
>
> ?
>
>
> --
> Cheers,
>
> David / dhildenb
>
David Hildenbrand Sept. 2, 2024, 8:30 a.m. UTC | #3
On 02.09.24 09:58, Jason Wang wrote:
> On Mon, Sep 2, 2024 at 3:33 PM David Hildenbrand <david@redhat.com> wrote:
>>
>> On 30.08.24 22:28, Barry Song wrote:
>>> From: Jason Wang <jasowang@redhat.com>
>>>
>>> mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
>>> persisting in providing __GFP_NOFAIL services for non-block users
>>> who cannot perform direct memory reclaim may only result in an
>>> endless busy loop.
>>>
>>> Therefore, in such cases, the current mm-core may directly return
>>> a NULL pointer:
>>>
>>> static inline struct page *
>>> __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>>>                                                   struct alloc_context *ac)
>>> {
>>>           ...
>>>           if (gfp_mask & __GFP_NOFAIL) {
>>>                   /*
>>>                    * All existing users of the __GFP_NOFAIL are blockable, so warn
>>>                    * of any new users that actually require GFP_NOWAIT
>>>                    */
>>>                   if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>>>                           goto fail;
>>>                   ...
>>>           }
>>>           ...
>>> fail:
>>>           warn_alloc(gfp_mask, ac->nodemask,
>>>                           "page allocation failure: order:%u", order);
>>> got_pg:
>>>           return page;
>>> }
>>>
>>> Unfortuantely, vpda does that nofail allocation under non-sleepable
>>> lock. A possible way to fix that is to move the pages allocation out
>>> of the lock into the caller, but having to allocate a huge number of
>>> pages and auxiliary page array seems to be problematic as well per
>>> Tetsuon: " You should implement proper error handling instead of
>>> using __GFP_NOFAIL if count can become large."
>>>
>>> So I choose another way, which does not release kernel bounce pages
>>> when user tries to register userspace bounce pages. Then we can
>>> avoid allocating in paths where failure is not expected.(e.g in
>>> the release). We pay this for more memory usage as we don't release
>>> kernel bounce pages but further optimizations could be done on top.
>>>
>>> Fixes: 6c77ed22880d ("vduse: Support using userspace pages as bounce buffer")
>>> Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
>>> Tested-by: Xie Yongji <xieyongji@bytedance.com>
>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>> [v-songbaohua@oppo.com: Refine the changelog]
>>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>>> ---
>>>    drivers/vdpa/vdpa_user/iova_domain.c | 19 +++++++++++--------
>>>    drivers/vdpa/vdpa_user/iova_domain.h |  1 +
>>>    2 files changed, 12 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
>>> index 791d38d6284c..58116f89d8da 100644
>>> --- a/drivers/vdpa/vdpa_user/iova_domain.c
>>> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
>>> @@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
>>>                                enum dma_data_direction dir)
>>>    {
>>>        struct vduse_bounce_map *map;
>>> +     struct page *page;
>>>        unsigned int offset;
>>>        void *addr;
>>>        size_t sz;
>>> @@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
>>>                            map->orig_phys == INVALID_PHYS_ADDR))
>>>                        return;
>>>
>>> -             addr = kmap_local_page(map->bounce_page);
>>> +             page = domain->user_bounce_pages ?
>>> +                    map->user_bounce_page : map->bounce_page;
>>> +
>>> +             addr = kmap_local_page(page);
>>>                do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
>>>                kunmap_local(addr);
>>>                size -= sz;
>>> @@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
>>>                                memcpy_to_page(pages[i], 0,
>>>                                               page_address(map->bounce_page),
>>>                                               PAGE_SIZE);
>>> -                     __free_page(map->bounce_page);
>>>                }
>>> -             map->bounce_page = pages[i];
>>> +             map->user_bounce_page = pages[i];
>>>                get_page(pages[i]);
>>>        }
>>>        domain->user_bounce_pages = true;
>>> @@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
>>>                struct page *page = NULL;
>>>
>>>                map = &domain->bounce_maps[i];
>>> -             if (WARN_ON(!map->bounce_page))
>>> +             if (WARN_ON(!map->user_bounce_page))
>>>                        continue;
>>>
>>>                /* Copy user page to kernel page if it's in use */
>>>                if (map->orig_phys != INVALID_PHYS_ADDR) {
>>> -                     page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
>>> +                     page = map->bounce_page;
>>
>> Why don't we need a kmap_local_page(map->bounce_page) here, but we might
>> perform one / have performed one in vduse_domain_bounce?
> 
> I think it's another bug that needs to be fixed.
> 
> Yongji, do you want to fix this?

Or maybe it works because "map->bounce_page" is now always a kernel 
page, and never one from user space that might reside in highmem.
Jason Wang Sept. 3, 2024, 12:35 a.m. UTC | #4
On Mon, Sep 2, 2024 at 4:30 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 02.09.24 09:58, Jason Wang wrote:
> > On Mon, Sep 2, 2024 at 3:33 PM David Hildenbrand <david@redhat.com> wrote:
> >>
> >> On 30.08.24 22:28, Barry Song wrote:
> >>> From: Jason Wang <jasowang@redhat.com>
> >>>
> >>> mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> >>> persisting in providing __GFP_NOFAIL services for non-block users
> >>> who cannot perform direct memory reclaim may only result in an
> >>> endless busy loop.
> >>>
> >>> Therefore, in such cases, the current mm-core may directly return
> >>> a NULL pointer:
> >>>
> >>> static inline struct page *
> >>> __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> >>>                                                   struct alloc_context *ac)
> >>> {
> >>>           ...
> >>>           if (gfp_mask & __GFP_NOFAIL) {
> >>>                   /*
> >>>                    * All existing users of the __GFP_NOFAIL are blockable, so warn
> >>>                    * of any new users that actually require GFP_NOWAIT
> >>>                    */
> >>>                   if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> >>>                           goto fail;
> >>>                   ...
> >>>           }
> >>>           ...
> >>> fail:
> >>>           warn_alloc(gfp_mask, ac->nodemask,
> >>>                           "page allocation failure: order:%u", order);
> >>> got_pg:
> >>>           return page;
> >>> }
> >>>
> >>> Unfortuantely, vpda does that nofail allocation under non-sleepable
> >>> lock. A possible way to fix that is to move the pages allocation out
> >>> of the lock into the caller, but having to allocate a huge number of
> >>> pages and auxiliary page array seems to be problematic as well per
> >>> Tetsuon: " You should implement proper error handling instead of
> >>> using __GFP_NOFAIL if count can become large."
> >>>
> >>> So I choose another way, which does not release kernel bounce pages
> >>> when user tries to register userspace bounce pages. Then we can
> >>> avoid allocating in paths where failure is not expected.(e.g in
> >>> the release). We pay this for more memory usage as we don't release
> >>> kernel bounce pages but further optimizations could be done on top.
> >>>
> >>> Fixes: 6c77ed22880d ("vduse: Support using userspace pages as bounce buffer")
> >>> Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
> >>> Tested-by: Xie Yongji <xieyongji@bytedance.com>
> >>> Signed-off-by: Jason Wang <jasowang@redhat.com>
> >>> [v-songbaohua@oppo.com: Refine the changelog]
> >>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> >>> ---
> >>>    drivers/vdpa/vdpa_user/iova_domain.c | 19 +++++++++++--------
> >>>    drivers/vdpa/vdpa_user/iova_domain.h |  1 +
> >>>    2 files changed, 12 insertions(+), 8 deletions(-)
> >>>
> >>> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> >>> index 791d38d6284c..58116f89d8da 100644
> >>> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> >>> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> >>> @@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
> >>>                                enum dma_data_direction dir)
> >>>    {
> >>>        struct vduse_bounce_map *map;
> >>> +     struct page *page;
> >>>        unsigned int offset;
> >>>        void *addr;
> >>>        size_t sz;
> >>> @@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
> >>>                            map->orig_phys == INVALID_PHYS_ADDR))
> >>>                        return;
> >>>
> >>> -             addr = kmap_local_page(map->bounce_page);
> >>> +             page = domain->user_bounce_pages ?
> >>> +                    map->user_bounce_page : map->bounce_page;
> >>> +
> >>> +             addr = kmap_local_page(page);
> >>>                do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
> >>>                kunmap_local(addr);
> >>>                size -= sz;
> >>> @@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> >>>                                memcpy_to_page(pages[i], 0,
> >>>                                               page_address(map->bounce_page),
> >>>                                               PAGE_SIZE);
> >>> -                     __free_page(map->bounce_page);
> >>>                }
> >>> -             map->bounce_page = pages[i];
> >>> +             map->user_bounce_page = pages[i];
> >>>                get_page(pages[i]);
> >>>        }
> >>>        domain->user_bounce_pages = true;
> >>> @@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> >>>                struct page *page = NULL;
> >>>
> >>>                map = &domain->bounce_maps[i];
> >>> -             if (WARN_ON(!map->bounce_page))
> >>> +             if (WARN_ON(!map->user_bounce_page))
> >>>                        continue;
> >>>
> >>>                /* Copy user page to kernel page if it's in use */
> >>>                if (map->orig_phys != INVALID_PHYS_ADDR) {
> >>> -                     page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> >>> +                     page = map->bounce_page;
> >>
> >> Why don't we need a kmap_local_page(map->bounce_page) here, but we might
> >> perform one / have performed one in vduse_domain_bounce?
> >
> > I think it's another bug that needs to be fixed.
> >
> > Yongji, do you want to fix this?
>
> Or maybe it works because "map->bounce_page" is now always a kernel
> page,

Yes, the userspace bounce page is not user_bounce_page.

> and never one from user space that might reside in highmem.

Right. So we are actually fine :)

Thanks

>
> --
> Cheers,
>
> David / dhildenb
>
diff mbox series

Patch

diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..58116f89d8da 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -162,6 +162,7 @@  static void vduse_domain_bounce(struct vduse_iova_domain *domain,
 				enum dma_data_direction dir)
 {
 	struct vduse_bounce_map *map;
+	struct page *page;
 	unsigned int offset;
 	void *addr;
 	size_t sz;
@@ -178,7 +179,10 @@  static void vduse_domain_bounce(struct vduse_iova_domain *domain,
 			    map->orig_phys == INVALID_PHYS_ADDR))
 			return;
 
-		addr = kmap_local_page(map->bounce_page);
+		page = domain->user_bounce_pages ?
+		       map->user_bounce_page : map->bounce_page;
+
+		addr = kmap_local_page(page);
 		do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
 		kunmap_local(addr);
 		size -= sz;
@@ -270,9 +274,8 @@  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
 				memcpy_to_page(pages[i], 0,
 					       page_address(map->bounce_page),
 					       PAGE_SIZE);
-			__free_page(map->bounce_page);
 		}
-		map->bounce_page = pages[i];
+		map->user_bounce_page = pages[i];
 		get_page(pages[i]);
 	}
 	domain->user_bounce_pages = true;
@@ -297,17 +300,17 @@  void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
 		struct page *page = NULL;
 
 		map = &domain->bounce_maps[i];
-		if (WARN_ON(!map->bounce_page))
+		if (WARN_ON(!map->user_bounce_page))
 			continue;
 
 		/* Copy user page to kernel page if it's in use */
 		if (map->orig_phys != INVALID_PHYS_ADDR) {
-			page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+			page = map->bounce_page;
 			memcpy_from_page(page_address(page),
-					 map->bounce_page, 0, PAGE_SIZE);
+					 map->user_bounce_page, 0, PAGE_SIZE);
 		}
-		put_page(map->bounce_page);
-		map->bounce_page = page;
+		put_page(map->user_bounce_page);
+		map->user_bounce_page = NULL;
 	}
 	domain->user_bounce_pages = false;
 out:
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index f92f22a7267d..7f3f0928ec78 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -21,6 +21,7 @@ 
 
 struct vduse_bounce_map {
 	struct page *bounce_page;
+	struct page *user_bounce_page;
 	u64 orig_phys;
 };