@@ -1361,7 +1361,11 @@ void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
- while (index <= end_index) {
+ /* Don't miss unaligned end */
+ if (!IS_ALIGNED(end, PAGE_SIZE))
+ end_index++;
+
+ while (index < end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
clear_page_dirty_for_io(page);
@@ -1376,7 +1380,11 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
- while (index <= end_index) {
+ /* Don't miss unaligned end */
+ if (!IS_ALIGNED(end, PAGE_SIZE))
+ end_index++;
+
+ while (index < end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
__set_page_dirty_nobuffers(page);
@@ -10710,7 +10710,11 @@ void btrfs_set_range_writeback(void *private_data, u64 start, u64 end)
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
- while (index <= end_index) {
+ /* Don't miss unaligned end */
+ if (!IS_ALIGNED(end, PAGE_SIZE))
+ end_index++;
+
+ while (index < end_index) {
page = find_get_page(inode->i_mapping, index);
ASSERT(page); /* Pages should be in the extent_io_tree */
set_page_writeback(page);
At now while switch page bits in data ranges we always hande +1 page, for cover case where end of data range is not page aligned Let's handle that case more obvious and efficient Check end aligment directly and touch +1 page only then needed Signed-off-by: Timofey Titovets <nefelim4ag@gmail.com> --- fs/btrfs/extent_io.c | 12 ++++++++++-- fs/btrfs/inode.c | 6 +++++- 2 files changed, 15 insertions(+), 3 deletions(-) -- 2.14.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html