@@ -912,7 +912,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
int err;
- VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
+ VM_BUG_ON_VMA(next - addr != HPAGE_PUD_SIZE, vma);
err = copy_huge_pud(dst_mm, src_mm,
dst_pud, src_pud, addr, vma);
if (err == -ENOMEM)
@@ -1245,7 +1245,7 @@ void unmap_page_range(struct mmu_gather *tlb,
pgd_t *pgd;
unsigned long next;
- BUG_ON(addr >= end);
+ VM_BUG_ON(addr >= end);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
@@ -1507,8 +1507,8 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
- BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
- BUG_ON(vma->vm_flags & VM_PFNMAP);
+ VM_BUG_ON_VMA(down_read_trylock(&vma->vm_mm->mmap_sem), vma);
+ VM_BUG_ON_VMA(vma->vm_flags & VM_PFNMAP, vma);
vma->vm_flags |= VM_MIXEDMAP;
}
return insert_page(vma, addr, page, vma->vm_page_prot);
@@ -1679,11 +1679,12 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
* consistency in testing and feature parity among all, so we should
* try to keep these invariants in place for everybody.
*/
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
- BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
- (VM_PFNMAP|VM_MIXEDMAP));
- BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
- BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+ VM_BUG_ON_VMA(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)), vma);
+ VM_BUG_ON_VMA((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+ (VM_PFNMAP|VM_MIXEDMAP), vma);
+ VM_BUG_ON_VMA((vma->vm_flags & VM_PFNMAP) &&
+ is_cow_mapping(vma->vm_flags), vma);
+ VM_BUG_ON_VMA((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn), vma);
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
@@ -1987,7 +1988,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- BUG_ON(addr >= end);
+ VM_BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
@@ -2075,7 +2076,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
pte_offset_map_lock(mm, pmd, addr, &ptl);
}
- BUG_ON(pmd_huge(*pmd));
+ VM_BUG_ON(pmd_huge(*pmd));
arch_enter_lazy_mmu_mode();
@@ -2102,7 +2103,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long next;
int err = 0;
- BUG_ON(pud_huge(*pud));
+ VM_BUG_ON(pud_huge(*pud));
if (create) {
pmd = pmd_alloc(mm, pud, addr);
@@ -3192,7 +3192,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
* Similarly in do_mmap_pgoff and in do_brk.
*/
if (vma_is_anonymous(vma)) {
- BUG_ON(vma->anon_vma);
+ VM_BUG_ON_VMA(vma->anon_vma, vma);
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
@@ -3550,7 +3550,7 @@ int mm_take_all_locks(struct mm_struct *mm)
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(down_read_trylock(&mm->mmap_sem));
+ VM_BUG_ON_MM(down_read_trylock(&mm->mmap_sem), mm);
mutex_lock(&mm_all_locks_mutex);
@@ -3630,8 +3630,8 @@ void mm_drop_all_locks(struct mm_struct *mm)
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(down_read_trylock(&mm->mmap_sem));
- BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
+ VM_BUG_ON_MM(down_read_trylock(&mm->mmap_sem), mm);
+ VM_BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma)
@@ -999,7 +999,7 @@ int page_mkclean(struct page *page)
.invalid_vma = invalid_mkclean_vma,
};
- BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
if (!page_mapped(page))
return 0;
@@ -1054,7 +1054,7 @@ static void __page_set_anon_rmap(struct page *page,
{
struct anon_vma *anon_vma = vma->anon_vma;
- BUG_ON(!anon_vma);
+ VM_BUG_ON_VMA(!anon_vma, vma);
if (PageAnon(page))
return;
@@ -1965,8 +1965,8 @@ void hugepage_add_anon_rmap(struct page *page,
struct anon_vma *anon_vma = vma->anon_vma;
int first;
- BUG_ON(!PageLocked(page));
- BUG_ON(!anon_vma);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_VMA(!anon_vma, vma);
/* address might be in next vma when migration races vma_adjust */
first = atomic_inc_and_test(compound_mapcount_ptr(page));
if (first)
@@ -1976,7 +1976,7 @@ void hugepage_add_anon_rmap(struct page *page,
void hugepage_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+ VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
atomic_set(compound_mapcount_ptr(page), 0);
if (hpage_pincount_available(page))
atomic_set(compound_pincount_ptr(page), 0);
@@ -2326,7 +2326,7 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
if (parent) {
se = rb_entry(parent, struct swap_extent, rb_node);
- BUG_ON(se->start_page + se->nr_pages != start_page);
+ VM_BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
/* Merge it */
se->nr_pages += nr_pages;
@@ -2528,7 +2528,7 @@ bool has_usable_swap(void)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- BUG_ON(!current->mm);
+ VM_BUG_ON(!current->mm);
pathname = getname(specialfile);
if (IS_ERR(pathname))
@@ -3586,7 +3586,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
* but it does always reset its private field.
*/
if (!page_private(head)) {
- BUG_ON(count & COUNT_CONTINUED);
+ VM_BUG_ON(count & COUNT_CONTINUED);
INIT_LIST_HEAD(&head->lru);
set_page_private(head, SWP_CONTINUED);
si->flags |= SWP_CONTINUED;
@@ -3647,7 +3647,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
- BUG_ON(count & COUNT_CONTINUED);
+ VM_BUG_ON_PAGE(count & COUNT_CONTINUED, head);
return false; /* need to add count continuation */
}
@@ -3666,7 +3666,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
- BUG_ON(page == head);
+ VM_BUG_ON_PAGE(page == head, page);
map = kmap_atomic(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
@@ -3694,14 +3694,14 @@ static bool swap_count_continued(struct swap_info_struct *si,
/*
* Think of how you subtract 1 from 1000
*/
- BUG_ON(count != COUNT_CONTINUED);
+ VM_BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
- BUG_ON(page == head);
+ VM_BUG_ON_PAGE(page == head, page);
map = kmap_atomic(page) + offset;
}
- BUG_ON(*map == 0);
+ VM_BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
@@ -231,7 +231,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
int id = shrinker->id;
- BUG_ON(id < 0);
+ VM_BUG_ON(id < 0);
down_write(&shrinker_rwsem);
idr_remove(&shrinker_idr, id);
@@ -854,8 +854,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
unsigned long flags;
int refcount;
- BUG_ON(!PageLocked(page));
- BUG_ON(mapping != page_mapping(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(mapping != page_mapping(page), page);
xa_lock_irqsave(&mapping->i_pages, flags);
/*