Message ID | 20230913165648.2570623-2-dhowells@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iov_iter: Convert the iterator macros into inline funcs | expand |
On 13.09.23 18:58, David Howells wrote: > --- > lib/kunit_iov_iter.c | 181 +++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 181 insertions(+) Hi David, #1 this is missing a SoB #2 looks like all the KUNIT_ASSERT_NOT_NULL() macros are indented wrong #3 a bit more of a commit message would be nice Byte, Johannes > > diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c > index 859b67c4d697..478fea956f58 100644 > --- a/lib/kunit_iov_iter.c > +++ b/lib/kunit_iov_iter.c > @@ -756,6 +756,184 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test) > KUNIT_SUCCEED(); > } > > +static void iov_kunit_free_page(void *data) > +{ > + __free_page(data); > +} > + > +static void __init iov_kunit_benchmark_print_stats(struct kunit *test, > + unsigned int *samples) > +{ > + unsigned long total = 0; > + int i; > + > + for (i = 0; i < 16; i++) { > + total += samples[i]; > + kunit_info(test, "run %x: %u uS\n", i, samples[i]); > + } > + > + kunit_info(test, "avg %lu uS\n", total / 16); > +} > + > +/* > + * Time copying 256MiB through an ITER_BVEC. > + */ > +static void __init iov_kunit_benchmark_bvec(struct kunit *test) > +{ > + struct iov_iter iter; > + struct bio_vec *bvec; > + struct page *page, **pages; > + unsigned int samples[16]; > + ktime_t a, b; > + ssize_t copied; > + size_t size = 256 * 1024 * 1024, npages = size / PAGE_SIZE; > + void *scratch; > + int i; > + > + /* Allocate a page and tile it repeatedly in the buffer. */ > + page = alloc_page(GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, page); > + kunit_add_action_or_reset(test, iov_kunit_free_page, page); > + > + bvec = kunit_kmalloc_array(test, npages, sizeof(bvec[0]), GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, bvec); > + for (i = 0; i < npages; i++) > + bvec_set_page(&bvec[i], page, PAGE_SIZE, 0); > + > + /* Create a single large buffer to copy to/from. */ > + pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, pages); > + for (i = 0; i < npages; i++) > + pages[i] = page; > + > + scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, scratch); > + kunit_add_action_or_reset(test, iov_kunit_unmap, scratch); > + > + /* Perform and time a bunch of copies. */ > + kunit_info(test, "Benchmarking copy_to_iter() over BVEC:\n"); > + for (i = 0; i < 16; i++) { > + iov_iter_bvec(&iter, ITER_DEST, bvec, npages, size); > + a = ktime_get_real(); > + copied = copy_to_iter(scratch, size, &iter); > + b = ktime_get_real(); > + KUNIT_EXPECT_EQ(test, copied, size); > + samples[i] = ktime_to_us(ktime_sub(b, a)); > + } > + > + iov_kunit_benchmark_print_stats(test, samples); > + KUNIT_SUCCEED(); > +} > + > +/* > + * Time copying 256MiB through an ITER_BVEC in 256 page chunks. > + */ > +static void __init iov_kunit_benchmark_bvec_split(struct kunit *test) > +{ > + struct iov_iter iter; > + struct bio_vec *bvec; > + struct page *page, **pages; > + unsigned int samples[16]; > + ktime_t a, b; > + ssize_t copied; > + size_t size, npages = 64; > + void *scratch; > + int i, j; > + > + /* Allocate a page and tile it repeatedly in the buffer. */ > + page = alloc_page(GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, page); > + kunit_add_action_or_reset(test, iov_kunit_free_page, page); > + > + /* Create a single large buffer to copy to/from. */ > + pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, pages); > + for (i = 0; i < npages; i++) > + pages[i] = page; > + > + scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, scratch); > + kunit_add_action_or_reset(test, iov_kunit_unmap, scratch); > + > + /* Perform and time a bunch of copies. */ > + kunit_info(test, "Benchmarking copy_to_iter() over BVEC:\n"); > + for (i = 0; i < 16; i++) { > + size = 256 * 1024 * 1024; > + a = ktime_get_real(); > + do { > + size_t part = min(size, npages * PAGE_SIZE); > + > + bvec = kunit_kmalloc_array(test, npages, sizeof(bvec[0]), GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, bvec); > + for (j = 0; j < npages; j++) > + bvec_set_page(&bvec[j], page, PAGE_SIZE, 0); > + > + iov_iter_bvec(&iter, ITER_DEST, bvec, npages, part); > + copied = copy_to_iter(scratch, part, &iter); > + KUNIT_EXPECT_EQ(test, copied, part); > + size -= part; > + } while (size > 0); > + b = ktime_get_real(); > + samples[i] = ktime_to_us(ktime_sub(b, a)); > + } > + > + iov_kunit_benchmark_print_stats(test, samples); > + KUNIT_SUCCEED(); > +} > + > +/* > + * Time copying 256MiB through an ITER_XARRAY. > + */ > +static void __init iov_kunit_benchmark_xarray(struct kunit *test) > +{ > + struct iov_iter iter; > + struct xarray *xarray; > + struct page *page, **pages; > + unsigned int samples[16]; > + ktime_t a, b; > + ssize_t copied; > + size_t size = 256 * 1024 * 1024, npages = size / PAGE_SIZE; > + void *scratch; > + int i; > + > + /* Allocate a page and tile it repeatedly in the buffer. */ > + page = alloc_page(GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, page); > + kunit_add_action_or_reset(test, iov_kunit_free_page, page); > + > + xarray = iov_kunit_create_xarray(test); > + > + for (i = 0; i < npages; i++) { > + void *x = xa_store(xarray, i, page, GFP_KERNEL); > + > + KUNIT_ASSERT_FALSE(test, xa_is_err(x)); > + } > + > + /* Create a single large buffer to copy to/from. */ > + pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, pages); > + for (i = 0; i < npages; i++) > + pages[i] = page; > + > + scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); > + KUNIT_ASSERT_NOT_NULL(test, scratch); > + kunit_add_action_or_reset(test, iov_kunit_unmap, scratch); > + > + /* Perform and time a bunch of copies. */ > + kunit_info(test, "Benchmarking copy_to_iter() over XARRAY:\n"); > + for (i = 0; i < 16; i++) { > + iov_iter_xarray(&iter, ITER_DEST, xarray, 0, size); > + a = ktime_get_real(); > + copied = copy_to_iter(scratch, size, &iter); > + b = ktime_get_real(); > + KUNIT_EXPECT_EQ(test, copied, size); > + samples[i] = ktime_to_us(ktime_sub(b, a)); > + } > + > + iov_kunit_benchmark_print_stats(test, samples); > + KUNIT_SUCCEED(); > +} > + > static struct kunit_case __refdata iov_kunit_cases[] = { > KUNIT_CASE(iov_kunit_copy_to_kvec), > KUNIT_CASE(iov_kunit_copy_from_kvec), > @@ -766,6 +944,9 @@ static struct kunit_case __refdata iov_kunit_cases[] = { > KUNIT_CASE(iov_kunit_extract_pages_kvec), > KUNIT_CASE(iov_kunit_extract_pages_bvec), > KUNIT_CASE(iov_kunit_extract_pages_xarray), > + KUNIT_CASE(iov_kunit_benchmark_bvec), > + KUNIT_CASE(iov_kunit_benchmark_bvec_split), > + KUNIT_CASE(iov_kunit_benchmark_xarray), > {} > }; > > >
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 859b67c4d697..478fea956f58 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -756,6 +756,184 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test) KUNIT_SUCCEED(); } +static void iov_kunit_free_page(void *data) +{ + __free_page(data); +} + +static void __init iov_kunit_benchmark_print_stats(struct kunit *test, + unsigned int *samples) +{ + unsigned long total = 0; + int i; + + for (i = 0; i < 16; i++) { + total += samples[i]; + kunit_info(test, "run %x: %u uS\n", i, samples[i]); + } + + kunit_info(test, "avg %lu uS\n", total / 16); +} + +/* + * Time copying 256MiB through an ITER_BVEC. + */ +static void __init iov_kunit_benchmark_bvec(struct kunit *test) +{ + struct iov_iter iter; + struct bio_vec *bvec; + struct page *page, **pages; + unsigned int samples[16]; + ktime_t a, b; + ssize_t copied; + size_t size = 256 * 1024 * 1024, npages = size / PAGE_SIZE; + void *scratch; + int i; + + /* Allocate a page and tile it repeatedly in the buffer. */ + page = alloc_page(GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, page); + kunit_add_action_or_reset(test, iov_kunit_free_page, page); + + bvec = kunit_kmalloc_array(test, npages, sizeof(bvec[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, bvec); + for (i = 0; i < npages; i++) + bvec_set_page(&bvec[i], page, PAGE_SIZE, 0); + + /* Create a single large buffer to copy to/from. */ + pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, pages); + for (i = 0; i < npages; i++) + pages[i] = page; + + scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, scratch); + kunit_add_action_or_reset(test, iov_kunit_unmap, scratch); + + /* Perform and time a bunch of copies. */ + kunit_info(test, "Benchmarking copy_to_iter() over BVEC:\n"); + for (i = 0; i < 16; i++) { + iov_iter_bvec(&iter, ITER_DEST, bvec, npages, size); + a = ktime_get_real(); + copied = copy_to_iter(scratch, size, &iter); + b = ktime_get_real(); + KUNIT_EXPECT_EQ(test, copied, size); + samples[i] = ktime_to_us(ktime_sub(b, a)); + } + + iov_kunit_benchmark_print_stats(test, samples); + KUNIT_SUCCEED(); +} + +/* + * Time copying 256MiB through an ITER_BVEC in 256 page chunks. + */ +static void __init iov_kunit_benchmark_bvec_split(struct kunit *test) +{ + struct iov_iter iter; + struct bio_vec *bvec; + struct page *page, **pages; + unsigned int samples[16]; + ktime_t a, b; + ssize_t copied; + size_t size, npages = 64; + void *scratch; + int i, j; + + /* Allocate a page and tile it repeatedly in the buffer. */ + page = alloc_page(GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, page); + kunit_add_action_or_reset(test, iov_kunit_free_page, page); + + /* Create a single large buffer to copy to/from. */ + pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, pages); + for (i = 0; i < npages; i++) + pages[i] = page; + + scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, scratch); + kunit_add_action_or_reset(test, iov_kunit_unmap, scratch); + + /* Perform and time a bunch of copies. */ + kunit_info(test, "Benchmarking copy_to_iter() over BVEC:\n"); + for (i = 0; i < 16; i++) { + size = 256 * 1024 * 1024; + a = ktime_get_real(); + do { + size_t part = min(size, npages * PAGE_SIZE); + + bvec = kunit_kmalloc_array(test, npages, sizeof(bvec[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, bvec); + for (j = 0; j < npages; j++) + bvec_set_page(&bvec[j], page, PAGE_SIZE, 0); + + iov_iter_bvec(&iter, ITER_DEST, bvec, npages, part); + copied = copy_to_iter(scratch, part, &iter); + KUNIT_EXPECT_EQ(test, copied, part); + size -= part; + } while (size > 0); + b = ktime_get_real(); + samples[i] = ktime_to_us(ktime_sub(b, a)); + } + + iov_kunit_benchmark_print_stats(test, samples); + KUNIT_SUCCEED(); +} + +/* + * Time copying 256MiB through an ITER_XARRAY. + */ +static void __init iov_kunit_benchmark_xarray(struct kunit *test) +{ + struct iov_iter iter; + struct xarray *xarray; + struct page *page, **pages; + unsigned int samples[16]; + ktime_t a, b; + ssize_t copied; + size_t size = 256 * 1024 * 1024, npages = size / PAGE_SIZE; + void *scratch; + int i; + + /* Allocate a page and tile it repeatedly in the buffer. */ + page = alloc_page(GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, page); + kunit_add_action_or_reset(test, iov_kunit_free_page, page); + + xarray = iov_kunit_create_xarray(test); + + for (i = 0; i < npages; i++) { + void *x = xa_store(xarray, i, page, GFP_KERNEL); + + KUNIT_ASSERT_FALSE(test, xa_is_err(x)); + } + + /* Create a single large buffer to copy to/from. */ + pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, pages); + for (i = 0; i < npages; i++) + pages[i] = page; + + scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, scratch); + kunit_add_action_or_reset(test, iov_kunit_unmap, scratch); + + /* Perform and time a bunch of copies. */ + kunit_info(test, "Benchmarking copy_to_iter() over XARRAY:\n"); + for (i = 0; i < 16; i++) { + iov_iter_xarray(&iter, ITER_DEST, xarray, 0, size); + a = ktime_get_real(); + copied = copy_to_iter(scratch, size, &iter); + b = ktime_get_real(); + KUNIT_EXPECT_EQ(test, copied, size); + samples[i] = ktime_to_us(ktime_sub(b, a)); + } + + iov_kunit_benchmark_print_stats(test, samples); + KUNIT_SUCCEED(); +} + static struct kunit_case __refdata iov_kunit_cases[] = { KUNIT_CASE(iov_kunit_copy_to_kvec), KUNIT_CASE(iov_kunit_copy_from_kvec), @@ -766,6 +944,9 @@ static struct kunit_case __refdata iov_kunit_cases[] = { KUNIT_CASE(iov_kunit_extract_pages_kvec), KUNIT_CASE(iov_kunit_extract_pages_bvec), KUNIT_CASE(iov_kunit_extract_pages_xarray), + KUNIT_CASE(iov_kunit_benchmark_bvec), + KUNIT_CASE(iov_kunit_benchmark_bvec_split), + KUNIT_CASE(iov_kunit_benchmark_xarray), {} };