@@ -76,6 +76,19 @@ struct workqueue_struct *xfsconvertd_workqueue;
#define xfs_buf_deallocate(bp) \
kmem_zone_free(xfs_buf_zone, (bp));
+STATIC int
+xfs_bp_is_vmapped(
+ xfs_buf_t *bp)
+{
+ /* return true if the buffer is vmapped. The XBF_MAPPED flag
+ * is set if the buffer should be mapped, but the code is
+ * clever enough to know it doesn't have to map a single page,
+ * so the check has to be both for XBF_MAPPED and
+ * bp->b_page_count > 1 */
+ return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
+}
+
+
/*
* Page Region interfaces.
*
@@ -314,7 +327,7 @@ xfs_buf_free(
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i;
- if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
+ if (xfs_bp_is_vmapped(bp))
free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) {
@@ -1107,6 +1120,9 @@ xfs_buf_bio_end_io(
xfs_buf_ioerror(bp, -error);
+ if (!error && xfs_bp_is_vmapped(bp))
+ invalidate_kernel_vmap_range(bp->b_addr, (bp->b_page_count * PAGE_SIZE) - bp->b_offset);
+
do {
struct page *page = bvec->bv_page;
@@ -1216,6 +1232,8 @@ next_chunk:
submit_io:
if (likely(bio->bi_size)) {
+ if (xfs_bp_is_vmapped(bp))
+ flush_kernel_vmap_range(bp->b_addr, (bp->b_page_count * PAGE_SIZE) - bp->b_offset);
submit_bio(rw, bio);
if (size)
goto next_chunk;