Message ID | 20241206015308.3342386-11-kbusch@meta.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | block write streams with nvme fdp | expand |
Hi Keith, kernel test robot noticed the following build warnings: [auto build test WARNING on axboe-block/for-next] [also build test WARNING on next-20241205] [cannot apply to brauner-vfs/vfs.all hch-configfs/for-next linus/master v6.13-rc1] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Keith-Busch/fs-add-a-write-stream-field-to-the-kiocb/20241206-095707 base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next patch link: https://lore.kernel.org/r/20241206015308.3342386-11-kbusch%40meta.com patch subject: [PATCHv11 10/10] nvme: use fdp streams if write stream is provided config: i386-randconfig-061 (https://download.01.org/0day-ci/archive/20241206/202412062116.SzYvrv5L-lkp@intel.com/config) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241206/202412062116.SzYvrv5L-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202412062116.SzYvrv5L-lkp@intel.com/ sparse warnings: (new ones prefixed by >>) drivers/nvme/host/core.c: note: in included file (through drivers/nvme/host/nvme.h): include/linux/nvme.h:790:44: sparse: sparse: array of flexible structures >> drivers/nvme/host/core.c:2261:34: sparse: sparse: cast to restricted __le16 drivers/nvme/host/core.c: note: in included file (through include/linux/async.h): include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true vim +2261 drivers/nvme/host/core.c 2209 2210 static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2211 { 2212 struct nvme_fdp_ruh_status_desc *ruhsd; 2213 struct nvme_ns_head *head = ns->head; 2214 struct nvme_fdp_ruh_status *ruhs; 2215 struct nvme_command c = {}; 2216 u32 fdp, fdp_idx; 2217 int size, ret, i; 2218 2219 ret = nvme_get_features(ns->ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0, 2220 &fdp); 2221 if (ret) 2222 goto err; 2223 2224 if (!(fdp & NVME_FDP_FDPE)) 2225 goto err; 2226 2227 fdp_idx = (fdp >> NVME_FDP_FDPCIDX_SHIFT) & NVME_FDP_FDPCIDX_MASK; 2228 ret = nvme_check_fdp(ns, info, fdp_idx); 2229 if (ret || !info->runs) 2230 goto err; 2231 2232 size = struct_size(ruhs, ruhsd, NVME_MAX_PLIDS); 2233 ruhs = kzalloc(size, GFP_KERNEL); 2234 if (!ruhs) { 2235 ret = -ENOMEM; 2236 goto err; 2237 } 2238 2239 c.imr.opcode = nvme_cmd_io_mgmt_recv; 2240 c.imr.nsid = cpu_to_le32(head->ns_id); 2241 c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS; 2242 c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size)); 2243 ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size); 2244 if (ret) 2245 goto free; 2246 2247 head->nr_plids = le16_to_cpu(ruhs->nruhsd); 2248 if (!head->nr_plids) 2249 goto free; 2250 2251 head->nr_plids = min(head->nr_plids, NVME_MAX_PLIDS); 2252 head->plids = kcalloc(head->nr_plids, sizeof(head->plids), 2253 GFP_KERNEL); 2254 if (!head->plids) { 2255 ret = -ENOMEM; 2256 goto free; 2257 } 2258 2259 for (i = 0; i < head->nr_plids; i++) { 2260 ruhsd = &ruhs->ruhsd[i]; > 2261 head->plids[i] = le16_to_cpu(ruhsd->pid); 2262 } 2263 2264 kfree(ruhs); 2265 return 0; 2266 2267 free: 2268 kfree(ruhs); 2269 err: 2270 head->nr_plids = 0; 2271 info->runs = 0; 2272 return ret; 2273 } 2274
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 410a77de92f88..c6f48403fc51c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -997,6 +997,18 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + if (op == nvme_cmd_write && ns->head->nr_plids) { + u16 write_stream = req->bio->bi_write_stream; + + if (WARN_ON_ONCE(write_stream > ns->head->nr_plids)) + return BLK_STS_INVAL; + + if (write_stream) { + dsmgmt |= ns->head->plids[write_stream - 1] << 16; + control |= NVME_RW_DTYPE_DPLCMT; + } + } + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) return BLK_STS_INVAL; @@ -2197,11 +2209,12 @@ static int nvme_check_fdp(struct nvme_ns *ns, struct nvme_ns_info *info, static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info) { + struct nvme_fdp_ruh_status_desc *ruhsd; struct nvme_ns_head *head = ns->head; struct nvme_fdp_ruh_status *ruhs; struct nvme_command c = {}; u32 fdp, fdp_idx; - int size, ret; + int size, ret, i; ret = nvme_get_features(ns->ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0, &fdp); @@ -2235,6 +2248,19 @@ static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info) if (!head->nr_plids) goto free; + head->nr_plids = min(head->nr_plids, NVME_MAX_PLIDS); + head->plids = kcalloc(head->nr_plids, sizeof(head->plids), + GFP_KERNEL); + if (!head->plids) { + ret = -ENOMEM; + goto free; + } + + for (i = 0; i < head->nr_plids; i++) { + ruhsd = &ruhs->ruhsd[i]; + head->plids[i] = le16_to_cpu(ruhsd->pid); + } + kfree(ruhs); return 0; @@ -2289,6 +2315,10 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, "FDP failure status:0x%x\n", ret); if (ret < 0) goto out; + } else { + ns->head->nr_plids = 0; + kfree(ns->head->plids); + ns->head->plids = NULL; } blk_mq_freeze_queue(ns->disk->queue); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5c8bdaa2c8824..4c12d35b3e39e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -495,6 +495,7 @@ struct nvme_ns_head { struct gendisk *disk; u16 nr_plids; + u16 *plids; #ifdef CONFIG_NVME_MULTIPATH struct bio_list requeue_list; spinlock_t requeue_lock;