@@ -958,11 +958,18 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ);
if (page) {
+ struct page *head = thp_head(page);
unsigned int top = PAGE_SIZE;
if (start > end) {
top = partial_end;
partial_end = 0;
}
+ if (head != page) {
+ unsigned int diff = start - 1 - head->index;
+ partial_start += diff << PAGE_SHIFT;
+ top += diff << PAGE_SHIFT;
+ page = head;
+ }
zero_user_segment(page, partial_start, top);
set_page_dirty(page);
unlock_page(page);
@@ -374,12 +374,19 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
+ struct page *head = thp_head(page);
unsigned int top = PAGE_SIZE;
if (start > end) {
/* Truncation within a single page */
top = partial_end;
partial_end = 0;
}
+ if (head != page) {
+ unsigned int diff = start - 1 - head->index;
+ partial_start += diff << PAGE_SHIFT;
+ top += diff << PAGE_SHIFT;
+ page = head;
+ }
wait_on_page_writeback(page);
zero_user_segment(page, partial_start, top);
cleancache_invalidate_page(mapping, page);
Pass the head page to zero_user_segment(), not the tail page, and adjust the byte offsets appropriately. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/shmem.c | 7 +++++++ mm/truncate.c | 7 +++++++ 2 files changed, 14 insertions(+)