@@ -50,7 +50,7 @@ static inline unsigned int pe_order(enum page_entry_size pe_size)
#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
/* The order of a PMD entry */
-#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#define PMD_PAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
@@ -98,7 +98,7 @@ static bool dax_is_locked(void *entry)
static unsigned int dax_entry_order(void *entry)
{
if (xa_to_value(entry) & DAX_PMD)
- return PMD_ORDER;
+ return PMD_PAGE_ORDER;
return 0;
}
@@ -1456,7 +1456,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
- XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
+ XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_PAGE_ORDER);
unsigned long pmd_addr = vmf->address & PMD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool sync;
@@ -1515,7 +1515,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* entry is already in the array, for instance), it will return
* VM_FAULT_FALLBACK.
*/
- entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
+ entry = grab_mapping_entry(&xas, mapping, PMD_PAGE_ORDER);
if (xa_is_internal(entry)) {
result = xa_to_internal(entry);
goto fallback;
@@ -1681,7 +1681,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
if (order == 0)
ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
#ifdef CONFIG_FS_DAX_PMD
- else if (order == PMD_ORDER)
+ else if (order == PMD_PAGE_ORDER)
ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
#endif
else
@@ -28,6 +28,9 @@
#define USER_PGTABLES_CEILING 0UL
#endif
+/* Number of base pages in a second level leaf page */
+#define PMD_PAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
+
/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
*