===================================================================
@@ -1657,6 +1657,9 @@ static void crypt_free_buffer_pages(stru
* In order to not degrade performance with excessive locking, we try
* non-blocking allocations without a mutex first but on failure we fallback
* to blocking allocations with a mutex.
+ *
+ * In order to reduce allocation overhead, we try to allocate compound pages in
+ * the first pass. If they are not available, we fall back to the mempool.
*/
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
@@ -1664,8 +1667,8 @@ static struct bio *crypt_alloc_buffer(st
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
- unsigned i, len, remaining_size;
- struct page *page;
+ unsigned remaining_size;
+ unsigned order = MAX_ORDER - 1;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -1678,20 +1681,34 @@ retry:
remaining_size = size;
- for (i = 0; i < nr_iovecs; i++) {
- page = mempool_alloc(&cc->page_pool, gfp_mask);
- if (!page) {
+ while (remaining_size) {
+ struct page *pages;
+ unsigned size_to_add;
+ unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ order = min(order, remaining_order);
+
+ while (order > 0) {
+ pages = alloc_pages(gfp_mask
+ | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
+ order);
+ if (likely(pages != NULL))
+ goto have_pages;
+ order--;
+ }
+
+ pages = mempool_alloc(&cc->page_pool, gfp_mask);
+ if (!pages) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
gfp_mask |= __GFP_DIRECT_RECLAIM;
+ order = 0;
goto retry;
}
- len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
-
- bio_add_page(clone, page, len, 0);
-
- remaining_size -= len;
+have_pages:
+ size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
+ bio_add_page(clone, pages, size_to_add, 0);
+ remaining_size -= size_to_add;
}
/* Allocate space for integrity tags */
@@ -1709,12 +1726,13 @@ retry:
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bv, clone, iter_all) {
- BUG_ON(!bv->bv_page);
- mempool_free(bv->bv_page, &cc->page_pool);
+ bio_for_each_folio_all(fi, clone) {
+ if (folio_test_large(fi.folio))
+ folio_put(fi.folio);
+ else
+ mempool_free(&fi.folio->page, &cc->page_pool);
}
}
It was reported that allocating pages for the write buffer in dm-crypt causes measurable overhead [1]. This patch changes dm-crypt to allocate compound pages if they are available. If not, we fall back to the mempool. [1] https://listman.redhat.com/archives/dm-devel/2023-February/053284.html Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> --- drivers/md/dm-crypt.c | 48 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 15 deletions(-)