Message ID | 20201103153329.531942-6-trondmy@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Readdir enhancements | expand |
On 3 Nov 2020, at 10:33, trondmy@kernel.org wrote: > From: Trond Myklebust <trond.myklebust@hammerspace.com> > > If a readdir call returns more data than we can fit into one page > cache page, then allocate a new one for that data rather than > discarding the data. > > Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> > --- > fs/nfs/dir.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- > 1 file changed, 42 insertions(+), 4 deletions(-) > > diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c > index b4861a33ad60..788c2a2eeaa3 100644 > --- a/fs/nfs/dir.c > +++ b/fs/nfs/dir.c > @@ -321,6 +321,26 @@ static void nfs_readdir_page_set_eof(struct page > *page) > kunmap_atomic(array); > } > > +static void nfs_readdir_page_unlock_and_put(struct page *page) > +{ > + unlock_page(page); > + put_page(page); > +} > + > +static struct page *nfs_readdir_page_get_next(struct address_space > *mapping, > + pgoff_t index, u64 cookie) > +{ > + struct page *page; > + > + page = nfs_readdir_page_get_locked(mapping, index, cookie); > + if (page) { > + if (nfs_readdir_page_last_cookie(page) == cookie) > + return page; > + nfs_readdir_page_unlock_and_put(page); > + } > + return NULL; > +} > + > static inline > int is_32bit_api(void) > { > @@ -638,13 +658,15 @@ void nfs_prime_dcache(struct dentry *parent, > struct nfs_entry *entry, > } > > /* Perform conversion from xdr to cache array */ > -static > -int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct > nfs_entry *entry, > - struct page **xdr_pages, struct page *page, unsigned int buflen) > +static int nfs_readdir_page_filler(struct nfs_readdir_descriptor > *desc, > + struct nfs_entry *entry, > + struct page **xdr_pages, > + struct page *fillme, unsigned int buflen) > { > + struct address_space *mapping = desc->file->f_mapping; > struct xdr_stream stream; > struct xdr_buf buf; > - struct page *scratch; > + struct page *scratch, *new, *page = fillme; > int status; > > scratch = alloc_page(GFP_KERNEL); > @@ -667,6 +689,19 @@ int > nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct > nfs_entry *en > desc->dir_verifier); > > status = nfs_readdir_add_to_array(entry, page); > + if (status != -ENOSPC) > + continue; > + > + if (page->mapping != mapping) > + break; ^^ How can this happen? Ben
On Tue, 2020-11-03 at 10:55 -0500, Benjamin Coddington wrote: > On 3 Nov 2020, at 10:33, trondmy@kernel.org wrote: > > > From: Trond Myklebust <trond.myklebust@hammerspace.com> > > > > If a readdir call returns more data than we can fit into one page > > cache page, then allocate a new one for that data rather than > > discarding the data. > > > > Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> > > --- > > fs/nfs/dir.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- > > 1 file changed, 42 insertions(+), 4 deletions(-) > > > > diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c > > index b4861a33ad60..788c2a2eeaa3 100644 > > --- a/fs/nfs/dir.c > > +++ b/fs/nfs/dir.c > > @@ -321,6 +321,26 @@ static void nfs_readdir_page_set_eof(struct > > page > > *page) > > kunmap_atomic(array); > > } > > > > +static void nfs_readdir_page_unlock_and_put(struct page *page) > > +{ > > + unlock_page(page); > > + put_page(page); > > +} > > + > > +static struct page *nfs_readdir_page_get_next(struct address_space > > *mapping, > > + pgoff_t index, u64 > > cookie) > > +{ > > + struct page *page; > > + > > + page = nfs_readdir_page_get_locked(mapping, index, cookie); > > + if (page) { > > + if (nfs_readdir_page_last_cookie(page) == cookie) > > + return page; > > + nfs_readdir_page_unlock_and_put(page); > > + } > > + return NULL; > > +} > > + > > static inline > > int is_32bit_api(void) > > { > > @@ -638,13 +658,15 @@ void nfs_prime_dcache(struct dentry *parent, > > struct nfs_entry *entry, > > } > > > > /* Perform conversion from xdr to cache array */ > > -static > > -int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct > > nfs_entry *entry, > > - struct page **xdr_pages, struct > > page *page, unsigned int buflen) > > +static int nfs_readdir_page_filler(struct nfs_readdir_descriptor > > *desc, > > + struct nfs_entry *entry, > > + struct page **xdr_pages, > > + struct page *fillme, unsigned > > int buflen) > > { > > + struct address_space *mapping = desc->file->f_mapping; > > struct xdr_stream stream; > > struct xdr_buf buf; > > - struct page *scratch; > > + struct page *scratch, *new, *page = fillme; > > int status; > > > > scratch = alloc_page(GFP_KERNEL); > > @@ -667,6 +689,19 @@ int > > nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct > > nfs_entry *en > > desc->dir_verifier); > > > > status = nfs_readdir_add_to_array(entry, page); > > + if (status != -ENOSPC) > > + continue; > > + > > + if (page->mapping != mapping) > > + break; > > ^^ How can this happen? > We call the same routines from uncached_readdir(), so the above is really there in order to exit when we see those anonymous pages.
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index b4861a33ad60..788c2a2eeaa3 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -321,6 +321,26 @@ static void nfs_readdir_page_set_eof(struct page *page) kunmap_atomic(array); } +static void nfs_readdir_page_unlock_and_put(struct page *page) +{ + unlock_page(page); + put_page(page); +} + +static struct page *nfs_readdir_page_get_next(struct address_space *mapping, + pgoff_t index, u64 cookie) +{ + struct page *page; + + page = nfs_readdir_page_get_locked(mapping, index, cookie); + if (page) { + if (nfs_readdir_page_last_cookie(page) == cookie) + return page; + nfs_readdir_page_unlock_and_put(page); + } + return NULL; +} + static inline int is_32bit_api(void) { @@ -638,13 +658,15 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry, } /* Perform conversion from xdr to cache array */ -static -int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, - struct page **xdr_pages, struct page *page, unsigned int buflen) +static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, + struct nfs_entry *entry, + struct page **xdr_pages, + struct page *fillme, unsigned int buflen) { + struct address_space *mapping = desc->file->f_mapping; struct xdr_stream stream; struct xdr_buf buf; - struct page *scratch; + struct page *scratch, *new, *page = fillme; int status; scratch = alloc_page(GFP_KERNEL); @@ -667,6 +689,19 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en desc->dir_verifier); status = nfs_readdir_add_to_array(entry, page); + if (status != -ENOSPC) + continue; + + if (page->mapping != mapping) + break; + new = nfs_readdir_page_get_next(mapping, page->index + 1, + entry->prev_cookie); + if (!new) + break; + if (page != fillme) + nfs_readdir_page_unlock_and_put(page); + page = new; + status = nfs_readdir_add_to_array(entry, page); } while (!status && !entry->eof); switch (status) { @@ -682,6 +717,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en break; } + if (page != fillme) + nfs_readdir_page_unlock_and_put(page); + put_page(scratch); return status; }