Message ID | 20180831133019.27579-3-pagupta@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kvm "fake DAX" device | expand |
Hi Pankaj,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on linux-nvdimm/libnvdimm-for-next]
[also build test WARNING on v4.19-rc2 next-20180831]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Pankaj-Gupta/kvm-fake-DAX-device/20180903-160032
base: https://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git libnvdimm-for-next
reproduce:
# apt-get install sparse
make ARCH=x86_64 allmodconfig
make C=1 CF=-D__CHECK_ENDIAN__
:::::: branch date: 7 hours ago
:::::: commit date: 7 hours ago
drivers/nvdimm/pmem.c:116:25: sparse: expression using sizeof(void)
drivers/nvdimm/pmem.c:135:25: sparse: expression using sizeof(void)
>> drivers/nvdimm/pmem.c:204:32: sparse: incorrect type in assignment (different base types) @@ expected restricted blk_status_t [usertype] bi_status @@ got e] bi_status @@
drivers/nvdimm/pmem.c:204:32: expected restricted blk_status_t [usertype] bi_status
drivers/nvdimm/pmem.c:204:32: got int
drivers/nvdimm/pmem.c:208:9: sparse: expression using sizeof(void)
drivers/nvdimm/pmem.c:208:9: sparse: expression using sizeof(void)
include/linux/bvec.h:82:37: sparse: expression using sizeof(void)
include/linux/bvec.h:82:37: sparse: expression using sizeof(void)
include/linux/bvec.h:83:32: sparse: expression using sizeof(void)
include/linux/bvec.h:83:32: sparse: expression using sizeof(void)
drivers/nvdimm/pmem.c:220:32: sparse: incorrect type in assignment (different base types) @@ expected restricted blk_status_t [usertype] bi_status @@ got e] bi_status @@
drivers/nvdimm/pmem.c:220:32: expected restricted blk_status_t [usertype] bi_status
drivers/nvdimm/pmem.c:220:32: got int
# https://github.com/0day-ci/linux/commit/69b95edd2a1f4676361988fa36866b59427e2cfa
git remote add linux-review https://github.com/0day-ci/linux
git remote update linux-review
git checkout 69b95edd2a1f4676361988fa36866b59427e2cfa
vim +204 drivers/nvdimm/pmem.c
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 107
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 108 static void write_pmem(void *pmem_addr, struct page *page,
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 109 unsigned int off, unsigned int len)
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 110 {
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 111 unsigned int chunk;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 112 void *mem;
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 113
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 114 while (len) {
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 115 mem = kmap_atomic(page);
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 @116 chunk = min_t(unsigned int, len, PAGE_SIZE);
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 117 memcpy_flushcache(pmem_addr, mem + off, chunk);
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 118 kunmap_atomic(mem);
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 119 len -= chunk;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 120 off = 0;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 121 page++;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 122 pmem_addr += PAGE_SIZE;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 123 }
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 124 }
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 125
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 126 static blk_status_t read_pmem(struct page *page, unsigned int off,
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 127 void *pmem_addr, unsigned int len)
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 128 {
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 129 unsigned int chunk;
60622d682 drivers/nvdimm/pmem.c Dan Williams 2018-05-03 130 unsigned long rem;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 131 void *mem;
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 132
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 133 while (len) {
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 134 mem = kmap_atomic(page);
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 135 chunk = min_t(unsigned int, len, PAGE_SIZE);
60622d682 drivers/nvdimm/pmem.c Dan Williams 2018-05-03 136 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 137 kunmap_atomic(mem);
60622d682 drivers/nvdimm/pmem.c Dan Williams 2018-05-03 138 if (rem)
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 139 return BLK_STS_IOERR;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 140 len -= chunk;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 141 off = 0;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 142 page++;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 143 pmem_addr += PAGE_SIZE;
98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 144 }
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 145 return BLK_STS_OK;
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 146 }
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 147
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 148 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
3f289dcb4 drivers/nvdimm/pmem.c Tejun Heo 2018-07-18 149 unsigned int len, unsigned int off, unsigned int op,
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 150 sector_t sector)
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 151 {
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 152 blk_status_t rc = BLK_STS_OK;
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 153 bool bad_pmem = false;
32ab0a3f5 drivers/nvdimm/pmem.c Dan Williams 2015-08-01 154 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
7a9eb2066 drivers/nvdimm/pmem.c Dan Williams 2016-06-03 155 void *pmem_addr = pmem->virt_addr + pmem_off;
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 156
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 157 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 158 bad_pmem = true;
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 159
3f289dcb4 drivers/nvdimm/pmem.c Tejun Heo 2018-07-18 160 if (!op_is_write(op)) {
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 161 if (unlikely(bad_pmem))
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 162 rc = BLK_STS_IOERR;
b5ebc8ec6 drivers/nvdimm/pmem.c Dan Williams 2016-03-06 163 else {
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 164 rc = read_pmem(page, off, pmem_addr, len);
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 165 flush_dcache_page(page);
b5ebc8ec6 drivers/nvdimm/pmem.c Dan Williams 2016-03-06 166 }
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 167 } else {
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 168 /*
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 169 * Note that we write the data both before and after
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 170 * clearing poison. The write before clear poison
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 171 * handles situations where the latest written data is
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 172 * preserved and the clear poison operation simply marks
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 173 * the address range as valid without changing the data.
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 174 * In this case application software can assume that an
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 175 * interrupted write will either return the new good
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 176 * data or an error.
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 177 *
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 178 * However, if pmem_clear_poison() leaves the data in an
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 179 * indeterminate state we need to perform the write
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 180 * after clear poison.
0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 181 */
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 182 flush_dcache_page(page);
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 183 write_pmem(pmem_addr, page, off, len);
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 184 if (unlikely(bad_pmem)) {
3115bb02b drivers/nvdimm/pmem.c Toshi Kani 2016-10-13 185 rc = pmem_clear_poison(pmem, pmem_off, len);
bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 186 write_pmem(pmem_addr, page, off, len);
59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 187 }
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 188 }
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 189
b5ebc8ec6 drivers/nvdimm/pmem.c Dan Williams 2016-03-06 190 return rc;
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 191 }
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 192
dece16353 drivers/nvdimm/pmem.c Jens Axboe 2015-11-05 193 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 194 {
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 195 blk_status_t rc = 0;
f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 196 bool do_acct;
f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 197 unsigned long start;
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 198 struct bio_vec bvec;
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 199 struct bvec_iter iter;
bd842b8ca drivers/nvdimm/pmem.c Dan Williams 2016-03-18 200 struct pmem_device *pmem = q->queuedata;
7e267a8c7 drivers/nvdimm/pmem.c Dan Williams 2016-06-01 201 struct nd_region *nd_region = to_region(pmem);
7e267a8c7 drivers/nvdimm/pmem.c Dan Williams 2016-06-01 202
d2d6364dc drivers/nvdimm/pmem.c Ross Zwisler 2018-06-06 203 if (bio->bi_opf & REQ_PREFLUSH)
69b95edd2 drivers/nvdimm/pmem.c Pankaj Gupta 2018-08-31 @204 bio->bi_status = nd_region->flush(nd_region);
69b95edd2 drivers/nvdimm/pmem.c Pankaj Gupta 2018-08-31 205
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 206
f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 207 do_acct = nd_iostat_start(bio, &start);
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 208 bio_for_each_segment(bvec, bio, iter) {
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 209 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
3f289dcb4 drivers/nvdimm/pmem.c Tejun Heo 2018-07-18 210 bvec.bv_offset, bio_op(bio), iter.bi_sector);
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 211 if (rc) {
4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 212 bio->bi_status = rc;
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 213 break;
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 214 }
e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 215 }
f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 216 if (do_acct)
f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 217 nd_iostat_end(bio, start);
61031952f drivers/nvdimm/pmem.c Ross Zwisler 2015-06-25 218
1eff9d322 drivers/nvdimm/pmem.c Jens Axboe 2016-08-05 219 if (bio->bi_opf & REQ_FUA)
69b95edd2 drivers/nvdimm/pmem.c Pankaj Gupta 2018-08-31 220 bio->bi_status = nd_region->flush(nd_region);
61031952f drivers/nvdimm/pmem.c Ross Zwisler 2015-06-25 221
4246a0b63 drivers/nvdimm/pmem.c Christoph Hellwig 2015-07-20 222 bio_endio(bio);
dece16353 drivers/nvdimm/pmem.c Jens Axboe 2015-11-05 223 return BLK_QC_T_NONE;
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 224 }
9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 225
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
Hello, Thanks for the report. > > Hi Pankaj, > > Thank you for the patch! Perhaps something to improve: > > [auto build test WARNING on linux-nvdimm/libnvdimm-for-next] > [also build test WARNING on v4.19-rc2 next-20180831] > [if your patch is applied to the wrong git tree, please drop us a note to > help improve the system] > > url: > https://github.com/0day-ci/linux/commits/Pankaj-Gupta/kvm-fake-DAX-device/20180903-160032 > base: https://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git > libnvdimm-for-next > reproduce: > # apt-get install sparse > make ARCH=x86_64 allmodconfig > make C=1 CF=-D__CHECK_ENDIAN__ > :::::: branch date: 7 hours ago > :::::: commit date: 7 hours ago > > drivers/nvdimm/pmem.c:116:25: sparse: expression using sizeof(void) > drivers/nvdimm/pmem.c:135:25: sparse: expression using sizeof(void) > >> drivers/nvdimm/pmem.c:204:32: sparse: incorrect type in assignment > >> (different base types) @@ expected restricted blk_status_t [usertype] > >> bi_status @@ got e] bi_status @@ I will fix this in V2. Will wait for any review comments and address in v2. Thanks, Pankaj > drivers/nvdimm/pmem.c:204:32: expected restricted blk_status_t > [usertype] bi_status > drivers/nvdimm/pmem.c:204:32: got int > drivers/nvdimm/pmem.c:208:9: sparse: expression using sizeof(void) > drivers/nvdimm/pmem.c:208:9: sparse: expression using sizeof(void) > include/linux/bvec.h:82:37: sparse: expression using sizeof(void) > include/linux/bvec.h:82:37: sparse: expression using sizeof(void) > include/linux/bvec.h:83:32: sparse: expression using sizeof(void) > include/linux/bvec.h:83:32: sparse: expression using sizeof(void) > drivers/nvdimm/pmem.c:220:32: sparse: incorrect type in assignment > (different base types) @@ expected restricted blk_status_t [usertype] > bi_status @@ got e] bi_status @@ > drivers/nvdimm/pmem.c:220:32: expected restricted blk_status_t > [usertype] bi_status > drivers/nvdimm/pmem.c:220:32: got int > > # > https://github.com/0day-ci/linux/commit/69b95edd2a1f4676361988fa36866b59427e2cfa > git remote add linux-review https://github.com/0day-ci/linux > git remote update linux-review > git checkout 69b95edd2a1f4676361988fa36866b59427e2cfa > vim +204 drivers/nvdimm/pmem.c > > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 107 > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 108 static > void write_pmem(void *pmem_addr, struct page *page, > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 109 unsigned > int off, unsigned int len) > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 110 { > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 111 unsigned > int chunk; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 112 void > *mem; > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 113 > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 114 while > (len) { > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 115 mem = > kmap_atomic(page); > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 @116 chunk = > min_t(unsigned int, len, PAGE_SIZE); > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 117 > memcpy_flushcache(pmem_addr, mem + off, chunk); > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 118 > kunmap_atomic(mem); > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 119 len -= > chunk; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 120 off = 0; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 121 page++; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 122 > pmem_addr += PAGE_SIZE; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 123 } > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 124 } > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 125 > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 126 static > blk_status_t read_pmem(struct page *page, unsigned int off, > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 127 void > *pmem_addr, unsigned int len) > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 128 { > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 129 unsigned > int chunk; > 60622d682 drivers/nvdimm/pmem.c Dan Williams 2018-05-03 130 unsigned > long rem; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 131 void > *mem; > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 132 > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 133 while > (len) { > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 134 mem = > kmap_atomic(page); > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 135 chunk = > min_t(unsigned int, len, PAGE_SIZE); > 60622d682 drivers/nvdimm/pmem.c Dan Williams 2018-05-03 136 rem = > memcpy_mcsafe(mem + off, pmem_addr, chunk); > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 137 > kunmap_atomic(mem); > 60622d682 drivers/nvdimm/pmem.c Dan Williams 2018-05-03 138 if (rem) > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 139 return > BLK_STS_IOERR; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 140 len -= > chunk; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 141 off = 0; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 142 page++; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 143 > pmem_addr += PAGE_SIZE; > 98cc093cb drivers/nvdimm/pmem.c Huang Ying 2017-09-06 144 } > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 145 return > BLK_STS_OK; > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 146 } > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 147 > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 148 static > blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, > 3f289dcb4 drivers/nvdimm/pmem.c Tejun Heo 2018-07-18 149 > unsigned int len, unsigned int off, unsigned int op, > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 150 > sector_t sector) > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 151 { > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 152 > blk_status_t rc = BLK_STS_OK; > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 153 bool > bad_pmem = false; > 32ab0a3f5 drivers/nvdimm/pmem.c Dan Williams 2015-08-01 154 > phys_addr_t pmem_off = sector * 512 + pmem->data_offset; > 7a9eb2066 drivers/nvdimm/pmem.c Dan Williams 2016-06-03 155 void > *pmem_addr = pmem->virt_addr + pmem_off; > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 156 > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 157 if > (unlikely(is_bad_pmem(&pmem->bb, sector, len))) > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 158 bad_pmem > = true; > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 159 > 3f289dcb4 drivers/nvdimm/pmem.c Tejun Heo 2018-07-18 160 if > (!op_is_write(op)) { > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 161 if > (unlikely(bad_pmem)) > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 162 rc = > BLK_STS_IOERR; > b5ebc8ec6 drivers/nvdimm/pmem.c Dan Williams 2016-03-06 163 else { > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 164 rc = > read_pmem(page, off, pmem_addr, len); > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 165 > flush_dcache_page(page); > b5ebc8ec6 drivers/nvdimm/pmem.c Dan Williams 2016-03-06 166 } > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 167 } else { > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 168 /* > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 169 * Note > that we write the data both before and after > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 170 * > clearing poison. The write before clear poison > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 171 * > handles situations where the latest written data is > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 172 * > preserved and the clear poison operation simply marks > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 173 * the > address range as valid without changing the data. > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 174 * In > this case application software can assume that an > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 175 * > interrupted write will either return the new good > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 176 * data > or an error. > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 177 * > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 178 * > However, if pmem_clear_poison() leaves the data in an > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 179 * > indeterminate state we need to perform the write > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 180 * after > clear poison. > 0a370d261 drivers/nvdimm/pmem.c Dan Williams 2016-04-14 181 */ > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 182 > flush_dcache_page(page); > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 183 > write_pmem(pmem_addr, page, off, len); > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 184 if > (unlikely(bad_pmem)) { > 3115bb02b drivers/nvdimm/pmem.c Toshi Kani 2016-10-13 185 rc = > pmem_clear_poison(pmem, pmem_off, len); > bd697a80c drivers/nvdimm/pmem.c Vishal Verma 2016-09-30 186 > write_pmem(pmem_addr, page, off, len); > 59e647398 drivers/nvdimm/pmem.c Dan Williams 2016-03-08 187 } > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 188 } > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 189 > b5ebc8ec6 drivers/nvdimm/pmem.c Dan Williams 2016-03-06 190 return > rc; > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 191 } > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 192 > dece16353 drivers/nvdimm/pmem.c Jens Axboe 2015-11-05 193 static > blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 194 { > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 195 > blk_status_t rc = 0; > f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 196 bool > do_acct; > f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 197 unsigned > long start; > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 198 struct > bio_vec bvec; > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 199 struct > bvec_iter iter; > bd842b8ca drivers/nvdimm/pmem.c Dan Williams 2016-03-18 200 struct > pmem_device *pmem = q->queuedata; > 7e267a8c7 drivers/nvdimm/pmem.c Dan Williams 2016-06-01 201 struct > nd_region *nd_region = to_region(pmem); > 7e267a8c7 drivers/nvdimm/pmem.c Dan Williams 2016-06-01 202 > d2d6364dc drivers/nvdimm/pmem.c Ross Zwisler 2018-06-06 203 if > (bio->bi_opf & REQ_PREFLUSH) > 69b95edd2 drivers/nvdimm/pmem.c Pankaj Gupta 2018-08-31 @204 > bio->bi_status = nd_region->flush(nd_region); > 69b95edd2 drivers/nvdimm/pmem.c Pankaj Gupta 2018-08-31 205 > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 206 > f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 207 do_acct = > nd_iostat_start(bio, &start); > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 208 > bio_for_each_segment(bvec, bio, iter) { > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 209 rc = > pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, > 3f289dcb4 drivers/nvdimm/pmem.c Tejun Heo 2018-07-18 210 > bvec.bv_offset, bio_op(bio), iter.bi_sector); > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 211 if (rc) > { > 4e4cbee93 drivers/nvdimm/pmem.c Christoph Hellwig 2017-06-03 212 > bio->bi_status = rc; > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 213 break; > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 214 } > e10624f8c drivers/nvdimm/pmem.c Dan Williams 2016-01-06 215 } > f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 216 if > (do_acct) > f0dc089ce drivers/nvdimm/pmem.c Dan Williams 2015-05-16 217 > nd_iostat_end(bio, start); > 61031952f drivers/nvdimm/pmem.c Ross Zwisler 2015-06-25 218 > 1eff9d322 drivers/nvdimm/pmem.c Jens Axboe 2016-08-05 219 if > (bio->bi_opf & REQ_FUA) > 69b95edd2 drivers/nvdimm/pmem.c Pankaj Gupta 2018-08-31 220 > bio->bi_status = nd_region->flush(nd_region); > 61031952f drivers/nvdimm/pmem.c Ross Zwisler 2015-06-25 221 > 4246a0b63 drivers/nvdimm/pmem.c Christoph Hellwig 2015-07-20 222 > bio_endio(bio); > dece16353 drivers/nvdimm/pmem.c Jens Axboe 2015-11-05 223 return > BLK_QC_T_NONE; > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 224 } > 9e853f231 drivers/block/pmem.c Ross Zwisler 2015-04-01 225 > > --- > 0-DAY kernel test infrastructure Open Source Technology Center > https://lists.01.org/pipermail/kbuild-all Intel Corporation >
On Fri, Aug 31, 2018 at 6:32 AM Pankaj Gupta <pagupta@redhat.com> wrote: > > This patch adds functionality to perform flush from guest > to host over VIRTIO. We are registering a callback based > on 'nd_region' type. virtio_pmem driver requires this special > flush function. For rest of the region types we are registering > existing flush function. Report error returned by host fsync > failure to userspace. > > Signed-off-by: Pankaj Gupta <pagupta@redhat.com> This looks ok to me, just some nits below. > --- > drivers/acpi/nfit/core.c | 7 +++++-- > drivers/nvdimm/claim.c | 3 ++- > drivers/nvdimm/pmem.c | 12 ++++++++---- > drivers/nvdimm/region_devs.c | 12 ++++++++++-- > include/linux/libnvdimm.h | 4 +++- > 5 files changed, 28 insertions(+), 10 deletions(-) > > diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c > index b072cfc..cd63b69 100644 > --- a/drivers/acpi/nfit/core.c > +++ b/drivers/acpi/nfit/core.c > @@ -2216,6 +2216,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, > { > u64 cmd, offset; > struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; > + struct nd_region *nd_region = nfit_blk->nd_region; > > enum { > BCW_OFFSET_MASK = (1ULL << 48)-1, > @@ -2234,7 +2235,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, > offset = to_interleave_offset(offset, mmio); > > writeq(cmd, mmio->addr.base + offset); > - nvdimm_flush(nfit_blk->nd_region); > + nd_region->flush(nd_region); I would keep the indirect function call override inside of nvdimm_flush. Then this hunk can go away... > > if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) > readq(mmio->addr.base + offset); > @@ -2245,6 +2246,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, > unsigned int lane) > { > struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; > + struct nd_region *nd_region = nfit_blk->nd_region; > unsigned int copied = 0; > u64 base_offset; > int rc; > @@ -2283,7 +2285,8 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, > } > > if (rw) > - nvdimm_flush(nfit_blk->nd_region); > + nd_region->flush(nd_region); > + > ...ditto, no need to touch this code. > rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; > return rc; > diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c > index fb667bf..49dce9c 100644 > --- a/drivers/nvdimm/claim.c > +++ b/drivers/nvdimm/claim.c > @@ -262,6 +262,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, > { > struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); > unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); > + struct nd_region *nd_region = to_nd_region(ndns->dev.parent); > sector_t sector = offset >> 9; > int rc = 0; > > @@ -301,7 +302,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, > } > > memcpy_flushcache(nsio->addr + offset, buf, size); > - nvdimm_flush(to_nd_region(ndns->dev.parent)); > + nd_region->flush(nd_region); For this you would need to teach nsio_rw_bytes() that the flush can fail. > > return rc; > } > diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c > index 6071e29..ba57cfa 100644 > --- a/drivers/nvdimm/pmem.c > +++ b/drivers/nvdimm/pmem.c > @@ -201,7 +201,8 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) > struct nd_region *nd_region = to_region(pmem); > > if (bio->bi_opf & REQ_PREFLUSH) > - nvdimm_flush(nd_region); > + bio->bi_status = nd_region->flush(nd_region); > + Let's have nvdimm_flush() return 0 or -EIO if it fails since thats what nsio_rw_bytes() expects, and you'll need to translate that to: BLK_STS_IOERR > > do_acct = nd_iostat_start(bio, &start); > bio_for_each_segment(bvec, bio, iter) { > @@ -216,7 +217,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) > nd_iostat_end(bio, start); > > if (bio->bi_opf & REQ_FUA) > - nvdimm_flush(nd_region); > + bio->bi_status = nd_region->flush(nd_region); Same comment. > > bio_endio(bio); > return BLK_QC_T_NONE; > @@ -517,6 +518,7 @@ static int nd_pmem_probe(struct device *dev) > static int nd_pmem_remove(struct device *dev) > { > struct pmem_device *pmem = dev_get_drvdata(dev); > + struct nd_region *nd_region = to_region(pmem); > > if (is_nd_btt(dev)) > nvdimm_namespace_detach_btt(to_nd_btt(dev)); > @@ -528,14 +530,16 @@ static int nd_pmem_remove(struct device *dev) > sysfs_put(pmem->bb_state); > pmem->bb_state = NULL; > } > - nvdimm_flush(to_nd_region(dev->parent)); > + nd_region->flush(nd_region); Not needed if the indirect function call moves inside nvdimm_flush(). > > return 0; > } > > static void nd_pmem_shutdown(struct device *dev) > { > - nvdimm_flush(to_nd_region(dev->parent)); > + struct nd_region *nd_region = to_nd_region(dev->parent); > + > + nd_region->flush(nd_region); > } > > static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) > diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c > index fa37afc..a170a6b 100644 > --- a/drivers/nvdimm/region_devs.c > +++ b/drivers/nvdimm/region_devs.c > @@ -290,7 +290,7 @@ static ssize_t deep_flush_store(struct device *dev, struct device_attribute *att > return rc; > if (!flush) > return -EINVAL; > - nvdimm_flush(nd_region); > + nd_region->flush(nd_region); Let's pass the error code through if the flush fails. > > return len; > } > @@ -1065,6 +1065,11 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, > dev->of_node = ndr_desc->of_node; > nd_region->ndr_size = resource_size(ndr_desc->res); > nd_region->ndr_start = ndr_desc->res->start; > + if (ndr_desc->flush) > + nd_region->flush = ndr_desc->flush; > + else > + nd_region->flush = nvdimm_flush; > + We'll need to rename the existing nvdimm_flush() to generic_nvdimm_flush(). > nd_device_register(dev); > > return nd_region; > @@ -1109,7 +1114,7 @@ EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); > * nvdimm_flush - flush any posted write queues between the cpu and pmem media > * @nd_region: blk or interleaved pmem region > */ > -void nvdimm_flush(struct nd_region *nd_region) > +int nvdimm_flush(struct nd_region *nd_region) > { > struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); > int i, idx; > @@ -1133,7 +1138,10 @@ void nvdimm_flush(struct nd_region *nd_region) > if (ndrd_get_flush_wpq(ndrd, i, 0)) > writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); > wmb(); > + > + return 0; > } > + Needless newline. > EXPORT_SYMBOL_GPL(nvdimm_flush); > > /** > diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h > index 097072c..3af7177 100644 > --- a/include/linux/libnvdimm.h > +++ b/include/linux/libnvdimm.h > @@ -115,6 +115,7 @@ struct nd_mapping_desc { > int position; > }; > > +struct nd_region; > struct nd_region_desc { > struct resource *res; > struct nd_mapping_desc *mapping; > @@ -126,6 +127,7 @@ struct nd_region_desc { > int numa_node; > unsigned long flags; > struct device_node *of_node; > + int (*flush)(struct nd_region *nd_region); > }; > > struct device; > @@ -201,7 +203,7 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); > unsigned int nd_region_acquire_lane(struct nd_region *nd_region); > void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); > u64 nd_fletcher64(void *addr, size_t len, bool le); > -void nvdimm_flush(struct nd_region *nd_region); > +int nvdimm_flush(struct nd_region *nd_region); > int nvdimm_has_flush(struct nd_region *nd_region); > int nvdimm_has_cache(struct nd_region *nd_region); > > -- > 2.9.3 >
> > This patch adds functionality to perform flush from guest > > to host over VIRTIO. We are registering a callback based > > on 'nd_region' type. virtio_pmem driver requires this special > > flush function. For rest of the region types we are registering > > existing flush function. Report error returned by host fsync > > failure to userspace. > > > > Signed-off-by: Pankaj Gupta <pagupta@redhat.com> > > This looks ok to me, just some nits below. > > > --- > > drivers/acpi/nfit/core.c | 7 +++++-- > > drivers/nvdimm/claim.c | 3 ++- > > drivers/nvdimm/pmem.c | 12 ++++++++---- > > drivers/nvdimm/region_devs.c | 12 ++++++++++-- > > include/linux/libnvdimm.h | 4 +++- > > 5 files changed, 28 insertions(+), 10 deletions(-) > > > > diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c > > index b072cfc..cd63b69 100644 > > --- a/drivers/acpi/nfit/core.c > > +++ b/drivers/acpi/nfit/core.c > > @@ -2216,6 +2216,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, > > unsigned int bw, > > { > > u64 cmd, offset; > > struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; > > + struct nd_region *nd_region = nfit_blk->nd_region; > > > > enum { > > BCW_OFFSET_MASK = (1ULL << 48)-1, > > @@ -2234,7 +2235,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, > > unsigned int bw, > > offset = to_interleave_offset(offset, mmio); > > > > writeq(cmd, mmio->addr.base + offset); > > - nvdimm_flush(nfit_blk->nd_region); > > + nd_region->flush(nd_region); > > I would keep the indirect function call override inside of > nvdimm_flush. Then this hunk can go away... Sure. Will change. > > > > > if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) > > readq(mmio->addr.base + offset); > > @@ -2245,6 +2246,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk > > *nfit_blk, > > unsigned int lane) > > { > > struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; > > + struct nd_region *nd_region = nfit_blk->nd_region; > > unsigned int copied = 0; > > u64 base_offset; > > int rc; > > @@ -2283,7 +2285,8 @@ static int acpi_nfit_blk_single_io(struct nfit_blk > > *nfit_blk, > > } > > > > if (rw) > > - nvdimm_flush(nfit_blk->nd_region); > > + nd_region->flush(nd_region); > > + > > > > ...ditto, no need to touch this code. Sure. > > > rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; > > return rc; > > diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c > > index fb667bf..49dce9c 100644 > > --- a/drivers/nvdimm/claim.c > > +++ b/drivers/nvdimm/claim.c > > @@ -262,6 +262,7 @@ static int nsio_rw_bytes(struct nd_namespace_common > > *ndns, > > { > > struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); > > unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); > > + struct nd_region *nd_region = to_nd_region(ndns->dev.parent); > > sector_t sector = offset >> 9; > > int rc = 0; > > > > @@ -301,7 +302,7 @@ static int nsio_rw_bytes(struct nd_namespace_common > > *ndns, > > } > > > > memcpy_flushcache(nsio->addr + offset, buf, size); > > - nvdimm_flush(to_nd_region(ndns->dev.parent)); > > + nd_region->flush(nd_region); > > For this you would need to teach nsio_rw_bytes() that the flush can fail. Sure. > > > > > return rc; > > } > > diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c > > index 6071e29..ba57cfa 100644 > > --- a/drivers/nvdimm/pmem.c > > +++ b/drivers/nvdimm/pmem.c > > @@ -201,7 +201,8 @@ static blk_qc_t pmem_make_request(struct request_queue > > *q, struct bio *bio) > > struct nd_region *nd_region = to_region(pmem); > > > > if (bio->bi_opf & REQ_PREFLUSH) > > - nvdimm_flush(nd_region); > > + bio->bi_status = nd_region->flush(nd_region); > > + > > Let's have nvdimm_flush() return 0 or -EIO if it fails since thats > what nsio_rw_bytes() expects, and you'll need to translate that to: > BLK_STS_IOERR o.k. Will change it as per suggestion. > > > > > do_acct = nd_iostat_start(bio, &start); > > bio_for_each_segment(bvec, bio, iter) { > > @@ -216,7 +217,7 @@ static blk_qc_t pmem_make_request(struct request_queue > > *q, struct bio *bio) > > nd_iostat_end(bio, start); > > > > if (bio->bi_opf & REQ_FUA) > > - nvdimm_flush(nd_region); > > + bio->bi_status = nd_region->flush(nd_region); > > Same comment. Sure. > > > > > bio_endio(bio); > > return BLK_QC_T_NONE; > > @@ -517,6 +518,7 @@ static int nd_pmem_probe(struct device *dev) > > static int nd_pmem_remove(struct device *dev) > > { > > struct pmem_device *pmem = dev_get_drvdata(dev); > > + struct nd_region *nd_region = to_region(pmem); > > > > if (is_nd_btt(dev)) > > nvdimm_namespace_detach_btt(to_nd_btt(dev)); > > @@ -528,14 +530,16 @@ static int nd_pmem_remove(struct device *dev) > > sysfs_put(pmem->bb_state); > > pmem->bb_state = NULL; > > } > > - nvdimm_flush(to_nd_region(dev->parent)); > > + nd_region->flush(nd_region); > > Not needed if the indirect function call moves inside nvdimm_flush(). o.k > > > > > return 0; > > } > > > > static void nd_pmem_shutdown(struct device *dev) > > { > > - nvdimm_flush(to_nd_region(dev->parent)); > > + struct nd_region *nd_region = to_nd_region(dev->parent); > > + > > + nd_region->flush(nd_region); > > } > > > > static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) > > diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c > > index fa37afc..a170a6b 100644 > > --- a/drivers/nvdimm/region_devs.c > > +++ b/drivers/nvdimm/region_devs.c > > @@ -290,7 +290,7 @@ static ssize_t deep_flush_store(struct device *dev, > > struct device_attribute *att > > return rc; > > if (!flush) > > return -EINVAL; > > - nvdimm_flush(nd_region); > > + nd_region->flush(nd_region); > > Let's pass the error code through if the flush fails. o.k > > > > > return len; > > } > > @@ -1065,6 +1065,11 @@ static struct nd_region *nd_region_create(struct > > nvdimm_bus *nvdimm_bus, > > dev->of_node = ndr_desc->of_node; > > nd_region->ndr_size = resource_size(ndr_desc->res); > > nd_region->ndr_start = ndr_desc->res->start; > > + if (ndr_desc->flush) > > + nd_region->flush = ndr_desc->flush; > > + else > > + nd_region->flush = nvdimm_flush; > > + > > We'll need to rename the existing nvdimm_flush() to generic_nvdimm_flush(). Sure. > > > nd_device_register(dev); > > > > return nd_region; > > @@ -1109,7 +1114,7 @@ EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); > > * nvdimm_flush - flush any posted write queues between the cpu and pmem > > media > > * @nd_region: blk or interleaved pmem region > > */ > > -void nvdimm_flush(struct nd_region *nd_region) > > +int nvdimm_flush(struct nd_region *nd_region) > > { > > struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); > > int i, idx; > > @@ -1133,7 +1138,10 @@ void nvdimm_flush(struct nd_region *nd_region) > > if (ndrd_get_flush_wpq(ndrd, i, 0)) > > writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); > > wmb(); > > + > > + return 0; > > } > > + > > Needless newline. Will remove this. > > > EXPORT_SYMBOL_GPL(nvdimm_flush); > > > > /** > > diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h > > index 097072c..3af7177 100644 > > --- a/include/linux/libnvdimm.h > > +++ b/include/linux/libnvdimm.h > > @@ -115,6 +115,7 @@ struct nd_mapping_desc { > > int position; > > }; > > > > +struct nd_region; > > struct nd_region_desc { > > struct resource *res; > > struct nd_mapping_desc *mapping; > > @@ -126,6 +127,7 @@ struct nd_region_desc { > > int numa_node; > > unsigned long flags; > > struct device_node *of_node; > > + int (*flush)(struct nd_region *nd_region); > > }; > > > > struct device; > > @@ -201,7 +203,7 @@ unsigned long nd_blk_memremap_flags(struct > > nd_blk_region *ndbr); > > unsigned int nd_region_acquire_lane(struct nd_region *nd_region); > > void nd_region_release_lane(struct nd_region *nd_region, unsigned int > > lane); > > u64 nd_fletcher64(void *addr, size_t len, bool le); > > -void nvdimm_flush(struct nd_region *nd_region); > > +int nvdimm_flush(struct nd_region *nd_region); > > int nvdimm_has_flush(struct nd_region *nd_region); > > int nvdimm_has_cache(struct nd_region *nd_region); > > > > -- > > 2.9.3 > > > Thanks, Pankaj
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index b072cfc..cd63b69 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2216,6 +2216,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, { u64 cmd, offset; struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; + struct nd_region *nd_region = nfit_blk->nd_region; enum { BCW_OFFSET_MASK = (1ULL << 48)-1, @@ -2234,7 +2235,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, offset = to_interleave_offset(offset, mmio); writeq(cmd, mmio->addr.base + offset); - nvdimm_flush(nfit_blk->nd_region); + nd_region->flush(nd_region); if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) readq(mmio->addr.base + offset); @@ -2245,6 +2246,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, unsigned int lane) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; + struct nd_region *nd_region = nfit_blk->nd_region; unsigned int copied = 0; u64 base_offset; int rc; @@ -2283,7 +2285,8 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, } if (rw) - nvdimm_flush(nfit_blk->nd_region); + nd_region->flush(nd_region); + rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; return rc; diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index fb667bf..49dce9c 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -262,6 +262,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); + struct nd_region *nd_region = to_nd_region(ndns->dev.parent); sector_t sector = offset >> 9; int rc = 0; @@ -301,7 +302,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, } memcpy_flushcache(nsio->addr + offset, buf, size); - nvdimm_flush(to_nd_region(ndns->dev.parent)); + nd_region->flush(nd_region); return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 6071e29..ba57cfa 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -201,7 +201,8 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) struct nd_region *nd_region = to_region(pmem); if (bio->bi_opf & REQ_PREFLUSH) - nvdimm_flush(nd_region); + bio->bi_status = nd_region->flush(nd_region); + do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { @@ -216,7 +217,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) nd_iostat_end(bio, start); if (bio->bi_opf & REQ_FUA) - nvdimm_flush(nd_region); + bio->bi_status = nd_region->flush(nd_region); bio_endio(bio); return BLK_QC_T_NONE; @@ -517,6 +518,7 @@ static int nd_pmem_probe(struct device *dev) static int nd_pmem_remove(struct device *dev) { struct pmem_device *pmem = dev_get_drvdata(dev); + struct nd_region *nd_region = to_region(pmem); if (is_nd_btt(dev)) nvdimm_namespace_detach_btt(to_nd_btt(dev)); @@ -528,14 +530,16 @@ static int nd_pmem_remove(struct device *dev) sysfs_put(pmem->bb_state); pmem->bb_state = NULL; } - nvdimm_flush(to_nd_region(dev->parent)); + nd_region->flush(nd_region); return 0; } static void nd_pmem_shutdown(struct device *dev) { - nvdimm_flush(to_nd_region(dev->parent)); + struct nd_region *nd_region = to_nd_region(dev->parent); + + nd_region->flush(nd_region); } static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index fa37afc..a170a6b 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -290,7 +290,7 @@ static ssize_t deep_flush_store(struct device *dev, struct device_attribute *att return rc; if (!flush) return -EINVAL; - nvdimm_flush(nd_region); + nd_region->flush(nd_region); return len; } @@ -1065,6 +1065,11 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, dev->of_node = ndr_desc->of_node; nd_region->ndr_size = resource_size(ndr_desc->res); nd_region->ndr_start = ndr_desc->res->start; + if (ndr_desc->flush) + nd_region->flush = ndr_desc->flush; + else + nd_region->flush = nvdimm_flush; + nd_device_register(dev); return nd_region; @@ -1109,7 +1114,7 @@ EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); * nvdimm_flush - flush any posted write queues between the cpu and pmem media * @nd_region: blk or interleaved pmem region */ -void nvdimm_flush(struct nd_region *nd_region) +int nvdimm_flush(struct nd_region *nd_region) { struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); int i, idx; @@ -1133,7 +1138,10 @@ void nvdimm_flush(struct nd_region *nd_region) if (ndrd_get_flush_wpq(ndrd, i, 0)) writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); wmb(); + + return 0; } + EXPORT_SYMBOL_GPL(nvdimm_flush); /** diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 097072c..3af7177 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -115,6 +115,7 @@ struct nd_mapping_desc { int position; }; +struct nd_region; struct nd_region_desc { struct resource *res; struct nd_mapping_desc *mapping; @@ -126,6 +127,7 @@ struct nd_region_desc { int numa_node; unsigned long flags; struct device_node *of_node; + int (*flush)(struct nd_region *nd_region); }; struct device; @@ -201,7 +203,7 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); -void nvdimm_flush(struct nd_region *nd_region); +int nvdimm_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_cache(struct nd_region *nd_region);
This patch adds functionality to perform flush from guest to host over VIRTIO. We are registering a callback based on 'nd_region' type. virtio_pmem driver requires this special flush function. For rest of the region types we are registering existing flush function. Report error returned by host fsync failure to userspace. Signed-off-by: Pankaj Gupta <pagupta@redhat.com> --- drivers/acpi/nfit/core.c | 7 +++++-- drivers/nvdimm/claim.c | 3 ++- drivers/nvdimm/pmem.c | 12 ++++++++---- drivers/nvdimm/region_devs.c | 12 ++++++++++-- include/linux/libnvdimm.h | 4 +++- 5 files changed, 28 insertions(+), 10 deletions(-)