@@ -168,15 +168,23 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem,
static blk_status_t pmem_do_write(struct pmem_device *pmem,
struct page *page, unsigned int page_off,
- sector_t sector, unsigned int len)
+ u64 pmem_off, unsigned int len)
{
blk_status_t rc = BLK_STS_OK;
bool bad_pmem = false;
- phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
- void *pmem_addr = pmem->virt_addr + pmem_off;
-
- if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
- bad_pmem = true;
+ phys_addr_t pmem_real_off = pmem_off + pmem->data_offset;
+ void *pmem_addr = pmem->virt_addr + pmem_real_off;
+ sector_t sector_start, sector_end;
+ unsigned nr_sectors;
+
+ sector_start = DIV_ROUND_UP(pmem_off, SECTOR_SIZE);
+ sector_end = (pmem_off + len) >> SECTOR_SHIFT;
+ if (sector_end > sector_start) {
+ nr_sectors = sector_end - sector_start;
+ if (is_bad_pmem(&pmem->bb, sector_start,
+ nr_sectors << SECTOR_SHIFT))
+ bad_pmem = true;
+ }
/*
* Note that we write the data both before and after
@@ -195,7 +203,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
flush_dcache_page(page);
write_pmem(pmem_addr, page, page_off, len);
if (unlikely(bad_pmem)) {
- rc = pmem_clear_poison(pmem, pmem_off, len);
+ rc = pmem_clear_poison(pmem, pmem_real_off, len);
write_pmem(pmem_addr, page, page_off, len);
}
@@ -220,7 +228,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
if (op_is_write(bio_op(bio)))
rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
- iter.bi_sector, bvec.bv_len);
+ iter.bi_sector << SECTOR_SHIFT, bvec.bv_len);
else
rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
iter.bi_sector, bvec.bv_len);
@@ -249,7 +257,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
blk_status_t rc;
if (op_is_write(op))
- rc = pmem_do_write(pmem, page, 0, sector,
+ rc = pmem_do_write(pmem, page, 0, sector << SECTOR_SHIFT,
hpage_nr_pages(page) * PAGE_SIZE);
else
rc = pmem_do_read(pmem, page, 0, sector,
Currently pmem_do_write() is written with assumption that all I/O is sector aligned. Soon I want to use this function in zero_page_range() where range passed in does not have to be sector aligned. Modify this function to be able to deal with an arbitrary range. Which is specified by pmem_off and len. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> --- drivers/nvdimm/pmem.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-)