@@ -66,20 +66,6 @@ void dax_remove_host(struct gendisk *disk)
}
EXPORT_SYMBOL_GPL(dax_remove_host);
-int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
- pgoff_t *pgoff)
-{
- sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
- phys_addr_t phys_off = (start_sect + sector) * 512;
-
- if (pgoff)
- *pgoff = PHYS_PFN(phys_off);
- if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL(bdev_dax_pgoff);
-
/**
* fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
* @bdev: block device to find a dax_device for
@@ -709,23 +709,22 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
return __dax_invalidate_entry(mapping, index, false);
}
-static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
+static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
{
- return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
+ phys_addr_t paddr = iomap->addr + (pos & PAGE_MASK) - iomap->offset;
+
+ if (iomap->bdev)
+ paddr += (get_start_sect(iomap->bdev) << SECTOR_SHIFT);
+ return PHYS_PFN(paddr);
}
static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
{
- sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
+ pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
void *vto, *kaddr;
- pgoff_t pgoff;
long rc;
int id;
- rc = bdev_dax_pgoff(iter->iomap.bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
-
id = dax_read_lock();
rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
if (rc < 0) {
@@ -1013,14 +1012,10 @@ EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
pfn_t *pfnp)
{
- const sector_t sector = dax_iomap_sector(iomap, pos);
- pgoff_t pgoff;
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
int id, rc;
long length;
- rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
- if (rc)
- return rc;
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
NULL, pfnp);
@@ -1129,7 +1124,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
{
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
- pgoff_t pgoff;
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
long rc, id;
void *kaddr;
bool page_aligned = false;
@@ -1140,10 +1135,6 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
(size == PAGE_SIZE))
page_aligned = true;
- rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
-
id = dax_read_lock();
if (page_aligned)
@@ -1169,7 +1160,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
const struct iomap *iomap = &iomi->iomap;
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
- struct block_device *bdev = iomap->bdev;
struct dax_device *dax_dev = iomap->dax_dev;
loff_t end = pos + length, done = 0;
ssize_t ret = 0;
@@ -1203,9 +1193,8 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
while (pos < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
const size_t size = ALIGN(length + offset, PAGE_SIZE);
- const sector_t sector = dax_iomap_sector(iomap, pos);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
ssize_t map_len;
- pgoff_t pgoff;
void *kaddr;
if (fatal_signal_pending(current)) {
@@ -1213,10 +1202,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
break;
}
- ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
- if (ret)
- break;
-
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
&kaddr, NULL);
if (map_len < 0) {
@@ -107,7 +107,6 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
#endif
struct writeback_control;
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
void dax_remove_host(struct gendisk *disk);