@@ -50,7 +50,8 @@ config FS_DAX_PMD
bool
default FS_DAX
depends on FS_DAX
- depends on BROKEN
+ depends on ZONE_DEVICE
+ depends on TRANSPARENT_HUGEPAGE
endif # BLOCK
@@ -592,7 +592,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
sector_t block;
int result = 0;
- /* dax pmd mappings are broken wrt gup and fork */
+ /* dax pmd mappings require pfn_t_devmap() */
if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
return VM_FAULT_FALLBACK;
@@ -717,11 +717,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
goto fallback;
}
- /*
- * TODO: teach vmf_insert_pfn_pmd() to support
- * 'pte_special' for pmds
- */
- if (pfn_t_has_page(dax.pfn)) {
+ if (!pfn_t_devmap(dax.pfn)) {
dax_unmap_atomic(bdev, &dax);
dax_pmd_dbg(&bh, address, "pfn not in memmap");
goto fallback;
Now that the get_user_pages() path knows how to handle dax-pmd mappings, remove the protections that disabled dax-pmd support. Tests available from github.com/pmem/ndctl: make TESTS="lib/test-dax.sh lib/test-mmap.sh" check Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- fs/Kconfig | 3 ++- fs/dax.c | 8 ++------ 2 files changed, 4 insertions(+), 7 deletions(-)