@@ -143,9 +143,9 @@ static inline void invalidate_kernel_vma
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
#endif
@@ -177,9 +177,9 @@ alloc_zeroed_user_highpage_movable(struc
static inline void clear_highpage(struct page *page)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
clear_page(kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
@@ -202,7 +202,7 @@ static inline void zero_user_segments(st
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
unsigned int i;
BUG_ON(end1 > page_size(page) || end2 > page_size(page));
@@ -213,7 +213,7 @@ static inline void zero_user_segments(st
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
@@ -238,11 +238,11 @@ static inline void copy_user_highpage(st
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
@@ -253,11 +253,11 @@ static inline void copy_highpage(struct
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
@@ -383,7 +383,7 @@ void zero_user_segments(struct page *pag
unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
if (end1 > start1) {
- kaddr = kmap_atomic(page + i);
+ kaddr = kmap_local_page(page + i);
memset(kaddr + start1, 0, this_end - start1);
}
end1 -= this_end;
@@ -398,7 +398,7 @@ void zero_user_segments(struct page *pag
if (end2 > start2) {
if (!kaddr)
- kaddr = kmap_atomic(page + i);
+ kaddr = kmap_local_page(page + i);
memset(kaddr + start2, 0, this_end - start2);
}
end2 -= this_end;
@@ -406,7 +406,7 @@ void zero_user_segments(struct page *pag
}
if (kaddr) {
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
flush_dcache_page(page + i);
}