@@ -131,11 +131,9 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
struct folio_queue *tail = rreq->buffer_tail, *new;
size_t added;
- new = kmalloc(sizeof(*new), GFP_NOFS);
+ new = netfs_folioq_alloc(GFP_NOFS);
if (!new)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(new);
new->prev = tail;
tail->next = new;
rreq->buffer_tail = new;
@@ -363,11 +361,9 @@ static int netfs_prime_buffer(struct netfs_io_request *rreq)
struct folio_batch put_batch;
size_t added;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ folioq = netfs_folioq_alloc(GFP_KERNEL);
if (!folioq)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
rreq->buffer = folioq;
rreq->buffer_tail = folioq;
rreq->submitted = rreq->start;
@@ -440,12 +436,10 @@ static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct fo
{
struct folio_queue *folioq;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ folioq = netfs_folioq_alloc(GFP_KERNEL);
if (!folioq)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
folioq_append(folioq, folio);
BUG_ON(folioq_folio(folioq, 0) != folio);
BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio));
@@ -8,6 +8,38 @@
#include <linux/swap.h>
#include "internal.h"
+/**
+ * netfs_folioq_alloc - Allocate a folio_queue struct
+ * @gfp: Allocation constraints
+ *
+ * Allocate, initialise and account the folio_queue struct.
+ */
+struct folio_queue *netfs_folioq_alloc(gfp_t gfp)
+{
+ struct folio_queue *fq;
+
+ fq = kmalloc(sizeof(*fq), gfp);
+ if (fq) {
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(fq);
+ }
+ return fq;
+}
+EXPORT_SYMBOL(netfs_folioq_alloc);
+
+/**
+ * netfs_folioq_free - Free a folio_queue struct
+ * @folioq: The object to free
+ *
+ * Free and unaccount the folio_queue struct.
+ */
+void netfs_folioq_free(struct folio_queue *folioq)
+{
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(folioq);
+}
+EXPORT_SYMBOL(netfs_folioq_free);
+
/*
* Make sure there's space in the rolling queue.
*/
@@ -87,8 +119,7 @@ struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
if (next)
next->prev = NULL;
- netfs_stat_d(&netfs_n_folioq);
- kfree(head);
+ netfs_folioq_free(head);
wreq->buffer = next;
return next;
}
@@ -111,8 +142,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
folio_put(folio);
}
}
- netfs_stat_d(&netfs_n_folioq);
- kfree(p);
+ netfs_folioq_free(p);
}
}
@@ -21,6 +21,7 @@
enum netfs_sreq_ref_trace;
typedef struct mempool_s mempool_t;
+struct folio_queue;
/**
* folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
@@ -453,6 +454,10 @@ void netfs_end_io_write(struct inode *inode);
int netfs_start_io_direct(struct inode *inode);
void netfs_end_io_direct(struct inode *inode);
+/* Miscellaneous APIs. */
+struct folio_queue *netfs_folioq_alloc(gfp_t gfp);
+void netfs_folioq_free(struct folio_queue *folioq);
+
/**
* netfs_inode - Get the netfs inode context from the inode
* @inode: The inode to query
Provide and use folio_queue allocation and free functions to combine the allocation, initialisation and stat (un)accounting steps that are repeated in several places. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org --- fs/netfs/buffered_read.c | 12 +++--------- fs/netfs/misc.c | 38 ++++++++++++++++++++++++++++++++++---- include/linux/netfs.h | 5 +++++ 3 files changed, 42 insertions(+), 13 deletions(-)