From patchwork Thu Apr 8 01:45:06 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Douglas Gilbert X-Patchwork-Id: 12189671 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4F8F1C43460 for ; Thu, 8 Apr 2021 01:46:10 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 30CF461165 for ; Thu, 8 Apr 2021 01:46:10 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231204AbhDHBqT (ORCPT ); Wed, 7 Apr 2021 21:46:19 -0400 Received: from smtp.infotech.no ([82.134.31.41]:45087 "EHLO smtp.infotech.no" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231131AbhDHBqQ (ORCPT ); Wed, 7 Apr 2021 21:46:16 -0400 Received: from localhost (localhost [127.0.0.1]) by smtp.infotech.no (Postfix) with ESMTP id 835B720423B; Thu, 8 Apr 2021 03:46:04 +0200 (CEST) X-Virus-Scanned: by amavisd-new-2.6.6 (20110518) (Debian) at infotech.no Received: from smtp.infotech.no ([127.0.0.1]) by localhost (smtp.infotech.no [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id tsTsfcdtt-Wr; Thu, 8 Apr 2021 03:46:02 +0200 (CEST) Received: from xtwo70.bingwo.ca (host-45-58-219-4.dyn.295.ca [45.58.219.4]) by smtp.infotech.no (Postfix) with ESMTPA id 5A487204269; Thu, 8 Apr 2021 03:45:58 +0200 (CEST) From: Douglas Gilbert To: linux-scsi@vger.kernel.org Cc: martin.petersen@oracle.com, jejb@linux.vnet.ibm.com, hare@suse.de Subject: [PATCH v17 20/45] sg: sg_find_srp_by_id Date: Wed, 7 Apr 2021 21:45:06 -0400 Message-Id: <20210408014531.248890-21-dgilbert@interlog.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20210408014531.248890-1-dgilbert@interlog.com> References: <20210408014531.248890-1-dgilbert@interlog.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-scsi@vger.kernel.org Replace sg_get_rq_mark() with sg_find_srp_by_id() and sg_get_ready_srp(). Add sg_chk_mmap() to check flags and reserve buffer available for mmap() based requests. Add sg_copy_sense() and sg_rec_state_v3() which is just refactoring. Add sg_calc_rq_dur() and sg_get_dur() in preparation for optional nanosecond duration timing. Reviewed-by: Hannes Reinecke Signed-off-by: Douglas Gilbert --- drivers/scsi/sg.c | 286 +++++++++++++++++++++++++++++++--------------- 1 file changed, 197 insertions(+), 89 deletions(-) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 81de8cf5ef4b..74df15255a18 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -153,16 +153,19 @@ struct sg_fd; struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct list_head entry; /* list entry */ - struct sg_fd *parentfp; /* NULL -> not in use */ struct sg_scatter_hold data; /* hold buffer, perhaps scatter list */ struct sg_io_hdr header; /* scsi command+info, see */ u8 sense_b[SCSI_SENSE_BUFFERSIZE]; + u32 duration; /* cmd duration in milliseconds */ char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ atomic_t rq_st; /* request state, holds a enum sg_rq_state */ + u64 start_ns; /* starting point of command duration calc */ + unsigned long frq_bm[1]; /* see SG_FRQ_* defines above */ + struct sg_fd *parentfp; /* pointer to owning fd, even when on fl */ struct request *rq; /* released in sg_rq_end_io(), bio kept */ struct bio *bio; /* kept until this req -->SG_RS_INACTIVE */ struct execute_work ew_orph; /* harvest orphan request */ @@ -228,7 +231,7 @@ static ssize_t sg_submit(struct sg_fd *sfp, struct file *filp, const char __user *buf, size_t count, bool blocking, bool read_only, bool sg_io_owned, struct sg_request **o_srp); -static int sg_common_write(struct sg_fd *sfp, struct sg_comm_wr_t *cwp); +static int sg_common_write(struct sg_fd *sfp, struct sg_comm_wr_t *cwrp); static int sg_read_append(struct sg_request *srp, void __user *outp, int num_xfer); static void sg_remove_sgat(struct sg_fd *sfp, struct sg_scatter_hold *schp); @@ -238,6 +241,7 @@ static void sg_link_reserve(struct sg_fd *sfp, struct sg_request *srp, static void sg_unlink_reserve(struct sg_fd *sfp, struct sg_request *srp); static struct sg_fd *sg_add_sfp(struct sg_device *sdp); static void sg_remove_sfp(struct kref *); +static struct sg_request *sg_find_srp_by_id(struct sg_fd *sfp, int pack_id); static struct sg_request *sg_setup_req(struct sg_fd *sfp); static void sg_deact_request(struct sg_fd *sfp, struct sg_request *srp); static struct sg_device *sg_get_dev(int dev); @@ -449,7 +453,8 @@ sg_open(struct inode *inode, struct file *filp) sfp = sg_add_sfp(sdp); /* increments sdp->d_ref */ if (IS_ERR(sfp)) { res = PTR_ERR(sfp); - goto out_undo; } + goto out_undo; + } filp->private_data = sfp; atomic_inc(&sdp->open_cnt); @@ -512,7 +517,6 @@ sg_release(struct inode *inode, struct file *filp) static ssize_t sg_write(struct file *filp, const char __user *p, size_t count, loff_t *ppos) { - bool blocking = !(filp->f_flags & O_NONBLOCK); int mxsize, cmd_size, input_size, res; u8 opcode; struct sg_device *sdp; @@ -613,21 +617,19 @@ sg_write(struct file *filp, const char __user *p, size_t count, loff_t *ppos) } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, - * but is is possible that the app intended SG_DXFER_TO_DEV, because there - * is a non-zero input_size, so emit a warning. + * but it is possible that the app intended SG_DXFER_TO_DEV, because + * there is a non-zero input_size, so emit a warning. */ if (h3p->dxfer_direction == SG_DXFER_TO_FROM_DEV) { - printk_ratelimited(KERN_WARNING - "sg_write: data in/out %d/%d bytes " - "for SCSI command 0x%x-- guessing " - "data in;\n program %s not setting " - "count and/or reply_len properly\n", - ohp->reply_len - (int)SZ_SG_HEADER, - input_size, (unsigned int) cmnd[0], - current->comm); + printk_ratelimited + (KERN_WARNING + "%s: data in/out %d/%d bytes for SCSI command 0x%x-- guessing data in;\n" + " program %s not setting count and/or reply_len properly\n", + __func__, ohp->reply_len - (int)SZ_SG_HEADER, + input_size, (unsigned int)cmnd[0], current->comm); } cwr.timeout = sfp->timeout; - cwr.blocking = blocking; + cwr.blocking = !(filp->f_flags & O_NONBLOCK); cwr.srp = srp; cwr.cmnd = cmnd; res = sg_common_write(sfp, &cwr); @@ -655,6 +657,18 @@ sg_fetch_cmnd(struct file *filp, struct sg_fd *sfp, const u8 __user *u_cdbp, return 0; } +static inline int +sg_chk_mmap(struct sg_fd *sfp, int rq_flags, int len) +{ + if (len > sfp->reserve.buflen) + return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ + if (rq_flags & SG_FLAG_DIRECT_IO) + return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ + if (sfp->res_in_use) + return -EBUSY; /* reserve buffer already being used */ + return 0; +} + static ssize_t sg_submit(struct sg_fd *sfp, struct file *filp, const char __user *buf, size_t count, bool blocking, bool read_only, bool sg_io_owned, @@ -687,17 +701,10 @@ sg_submit(struct sg_fd *sfp, struct file *filp, const char __user *buf, return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { - if (hp->dxfer_len > sfp->reserve.buflen) { - sg_deact_request(sfp, srp); - return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ - } - if (hp->flags & SG_FLAG_DIRECT_IO) { + res = sg_chk_mmap(sfp, hp->flags, hp->dxfer_len); + if (res) { sg_deact_request(sfp, srp); - return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ - } - if (sfp->res_in_use) { - sg_deact_request(sfp, srp); - return -EBUSY; /* reserve buffer already being used */ + return res; } } ul_timeout = msecs_to_jiffies(srp->header.timeout); @@ -719,6 +726,12 @@ sg_submit(struct sg_fd *sfp, struct file *filp, const char __user *buf, return count; } +/* + * All writes and submits converge on this function to launch the SCSI + * command/request (via blk_execute_rq_nowait). Returns a pointer to a + * sg_request object holding the request just issued or a negated errno + * value twisted by ERR_PTR. + */ static int sg_common_write(struct sg_fd *sfp, struct sg_comm_wr_t *cwrp) { @@ -799,36 +812,58 @@ sg_rstate_chg(struct sg_request *srp, enum sg_rq_state old_st, * returns true (or an event like a signal (e.g. control-C) occurs). */ -static struct sg_request * -sg_get_rq_mark(struct sg_fd *sfp, int pack_id) +static inline bool +sg_get_ready_srp(struct sg_fd *sfp, struct sg_request **srpp, int pack_id) { - struct sg_request *resp; - unsigned long iflags; + struct sg_request *srp; - spin_lock_irqsave(&sfp->rq_list_lock, iflags); - list_for_each_entry(resp, &sfp->rq_list, entry) { - /* look for requests that are ready + not SG_IO owned */ - if (resp->done == 1 && !resp->sg_io_owned && - (-1 == pack_id || resp->header.pack_id == pack_id)) { - resp->done = 2; /* guard against other readers */ - spin_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; - } + if (unlikely(SG_IS_DETACHING(sfp->parentdp))) { + *srpp = NULL; + return true; } - spin_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return NULL; + srp = sg_find_srp_by_id(sfp, pack_id); + *srpp = srp; + return !!srp; +} + +/* + * Returns number of bytes copied to user space provided sense buffer or + * negated errno value. + */ +static int +sg_copy_sense(struct sg_request *srp) +{ + int sb_len_ret = 0; + struct sg_io_hdr *hp = &srp->header; + + /* If need be, copy the sense buffer to the user space */ + if ((CHECK_CONDITION & hp->masked_status) || + (DRIVER_SENSE & hp->driver_status)) { + int sb_len = SCSI_SENSE_BUFFERSIZE; + void __user *up = hp->sbp; + + sb_len = min_t(int, hp->mx_sb_len, sb_len); + /* Additional sense length field */ + sb_len_ret = 8 + (int)srp->sense_b[7]; + sb_len_ret = min_t(int, sb_len_ret, sb_len); + if (copy_to_user(up, srp->sense_b, sb_len_ret)) + return -EFAULT; + hp->sb_len_wr = sb_len_ret; + } + return sb_len_ret; } static int -srp_done(struct sg_fd *sfp, struct sg_request *srp) +sg_rec_state_v3(struct sg_fd *sfp, struct sg_request *srp) { - unsigned long flags; - int ret; + int sb_len_wr; - spin_lock_irqsave(&sfp->rq_list_lock, flags); - ret = srp->done; - spin_unlock_irqrestore(&sfp->rq_list_lock, flags); - return ret; + sb_len_wr = sg_copy_sense(srp); + if (sb_len_wr < 0) + return sb_len_wr; + if (unlikely(SG_IS_DETACHING(sfp->parentdp))) + return -ENODEV; + return 0; } #if IS_ENABLED(CONFIG_SCSI_LOGGING) @@ -858,12 +893,11 @@ sg_rep_rq_state_fail(struct sg_fd *sfp, enum sg_rq_state exp_old_st, #endif static ssize_t -sg_receive_v3(struct sg_fd *sfp, char __user *buf, size_t count, - struct sg_request *srp) +sg_receive_v3(struct sg_fd *sfp, struct sg_request *srp, size_t count, + void __user *p) { - struct sg_io_hdr *hp = &srp->header; int err = 0; - int len; + struct sg_io_hdr *hp = &srp->header; if (in_compat_syscall()) { if (count < sizeof(struct compat_sg_io_hdr)) { @@ -874,27 +908,11 @@ sg_receive_v3(struct sg_fd *sfp, char __user *buf, size_t count, err = -EINVAL; goto err_out; } - hp->sb_len_wr = 0; - if (hp->mx_sb_len > 0 && hp->sbp) { - if ((CHECK_CONDITION & hp->masked_status) || - (DRIVER_SENSE & hp->driver_status)) { - int sb_len = SCSI_SENSE_BUFFERSIZE; - - sb_len = (hp->mx_sb_len > sb_len) ? sb_len : - hp->mx_sb_len; - /* Additional sense length field */ - len = 8 + (int)srp->sense_b[7]; - len = (len > sb_len) ? sb_len : len; - if (copy_to_user(hp->sbp, srp->sense_b, len)) { - err = -EFAULT; - goto err_out; - } - hp->sb_len_wr = len; - } - } + SG_LOG(3, sfp, "%s: srp=0x%p\n", __func__, srp); + err = sg_rec_state_v3(sfp, srp); if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; - err = put_sg_io_hdr(hp, buf); + err = put_sg_io_hdr(hp, p); err_out: sg_finish_scsi_blk_rq(srp); sg_deact_request(sfp, srp); @@ -975,16 +993,22 @@ sg_read_v1v2(void __user *buf, int count, struct sg_fd *sfp, return res; } +/* + * This is the read(2) system call entry point (see sg_fops) for this driver. + * Accepts v1, v2 or v3 type headers (not v4). Returns count or negated + * errno; if count is 0 then v3: returns -EINVAL; v1+v2: 0 when no other + * error detected or -EIO. + */ static ssize_t sg_read(struct file *filp, char __user *p, size_t count, loff_t *ppos) { bool could_be_v3; bool non_block = !!(filp->f_flags & O_NONBLOCK); - int want_id = -1; + int want_id = SG_PACK_ID_WILDCARD; int hlen, ret; - struct sg_device *sdp; + struct sg_device *sdp = NULL; struct sg_fd *sfp; - struct sg_request *srp; + struct sg_request *srp = NULL; struct sg_header *h2p = NULL; struct sg_io_hdr a_sg_io_hdr; @@ -999,7 +1023,7 @@ sg_read(struct file *filp, char __user *p, size_t count, loff_t *ppos) sfp = filp->private_data; sdp = sfp->parentdp; SG_LOG(3, sfp, "%s: read() count=%d\n", __func__, (int)count); - ret = sg_allow_if_err_recovery(sdp, false); + ret = sg_allow_if_err_recovery(sdp, non_block); if (ret) return ret; @@ -1018,17 +1042,13 @@ sg_read(struct file *filp, char __user *p, size_t count, loff_t *ppos) if (h2p->reply_len < 0 && could_be_v3) { struct sg_io_hdr *v3_hdr = (struct sg_io_hdr *)h2p; - if (likely(v3_hdr->interface_id == 'S')) { + if (v3_hdr->interface_id == 'S') { struct sg_io_hdr __user *h3_up; h3_up = (struct sg_io_hdr __user *)p; ret = get_user(want_id, &h3_up->pack_id); - if (unlikely(ret)) + if (ret) return ret; - } else if (v3_hdr->interface_id == 'Q') { - pr_info_once("sg: %s: v4 interface%s here\n", - __func__, " disallowed"); - return -EPERM; } else { return -EPERM; } @@ -1036,25 +1056,25 @@ sg_read(struct file *filp, char __user *p, size_t count, loff_t *ppos) want_id = h2p->pack_id; } } - srp = sg_get_rq_mark(sfp, want_id); - if (!srp) { /* now wait on packet to arrive */ + srp = sg_find_srp_by_id(sfp, want_id); + if (!srp) { /* nothing available so wait on packet to arrive or */ if (SG_IS_DETACHING(sdp)) return -ENODEV; if (non_block) /* O_NONBLOCK or v3::flags & SGV4_FLAG_IMMED */ return -EAGAIN; - ret = wait_event_interruptible - (sfp->read_wait, - (SG_IS_DETACHING(sdp) || - (srp = sg_get_rq_mark(sfp, want_id)))); + ret = wait_event_interruptible(sfp->read_wait, + sg_get_ready_srp(sfp, &srp, + want_id)); if (SG_IS_DETACHING(sdp)) return -ENODEV; if (ret) /* -ERESTARTSYS as signal hit process */ return ret; + /* otherwise srp should be valid */ } if (srp->header.interface_id == '\0') ret = sg_read_v1v2(p, (int)count, sfp, srp); else - ret = sg_receive_v3(sfp, p, count, srp); + ret = sg_receive_v3(sfp, srp, count, p); if (ret < 0) SG_LOG(1, sfp, "%s: negated errno: %d\n", __func__, ret); return ret < 0 ? ret : (int)count; @@ -1103,6 +1123,52 @@ sg_calc_sgat_param(struct sg_device *sdp) sdp->max_sgat_sz = sz; } +static u32 +sg_calc_rq_dur(const struct sg_request *srp) +{ + ktime_t ts0 = ns_to_ktime(srp->start_ns); + ktime_t now_ts; + s64 diff; + + if (ts0 == 0) + return 0; + if (unlikely(ts0 == S64_MAX)) /* _prior_ to issuing req */ + return 999999999; /* eye catching */ + now_ts = ktime_get_boottime(); + if (unlikely(ts0 > now_ts)) + return 999999998; + /* unlikely req duration will exceed 2**32 milliseconds */ + diff = ktime_ms_delta(now_ts, ts0); + return (diff > (s64)U32_MAX) ? 3999999999U : (u32)diff; +} + +/* Return of U32_MAX means srp is inactive */ +static u32 +sg_get_dur(struct sg_request *srp, const enum sg_rq_state *sr_stp, + bool *is_durp) +{ + bool is_dur = false; + u32 res = U32_MAX; + + switch (sr_stp ? *sr_stp : atomic_read(&srp->rq_st)) { + case SG_RS_INFLIGHT: + case SG_RS_BUSY: + res = sg_calc_rq_dur(srp); + break; + case SG_RS_AWAIT_RCV: + case SG_RS_RCV_DONE: + case SG_RS_INACTIVE: + res = srp->duration; + is_dur = true; /* completion has occurred, timing finished */ + break; + default: + break; + } + if (is_durp) + *is_durp = is_dur; + return res; +} + static void sg_fill_request_table(struct sg_fd *sfp, struct sg_req_info *rinfo) { @@ -1119,6 +1185,7 @@ sg_fill_request_table(struct sg_fd *sfp, struct sg_req_info *rinfo) srp->header.masked_status & srp->header.host_status & srp->header.driver_status; + rinfo[val].duration = sg_get_dur(srp, NULL, NULL); /* dummy */ if (srp->done) rinfo[val].duration = srp->header.duration; @@ -1136,6 +1203,18 @@ sg_fill_request_table(struct sg_fd *sfp, struct sg_req_info *rinfo) } } +static int +srp_done(struct sg_fd *sfp, struct sg_request *srp) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&sfp->rq_list_lock, flags); + ret = srp->done; + spin_unlock_irqrestore(&sfp->rq_list_lock, flags); + return ret; +} + /* * Handles ioctl(SG_IO) for blocking (sync) usage of v3 or v4 interface. * Returns 0 on success else a negated errno. @@ -1163,7 +1242,7 @@ sg_ctl_sg_io(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp, if (srp->done) { srp->done = 2; spin_unlock_irq(&sfp->rq_list_lock); - res = sg_receive_v3(sfp, p, SZ_SG_IO_HDR, srp); + res = sg_receive_v3(sfp, srp, SZ_SG_IO_HDR, p); return (res < 0) ? res : 0; } srp->orphan = 1; @@ -1391,7 +1470,9 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp, return result; if (val > SG_MAX_CDB_SIZE) return -ENOMEM; - sfp->next_cmd_len = (val > 0) ? val : 0; + mutex_lock(&sfp->f_mutex); + sfp->next_cmd_len = max_t(int, val, 0); + mutex_unlock(&sfp->f_mutex); return 0; case SG_GET_ACCESS_COUNT: SG_LOG(3, sfp, "%s: SG_GET_ACCESS_COUNT\n", __func__); @@ -2351,6 +2432,33 @@ sg_read_append(struct sg_request *srp, void __user *outp, int num_xfer) return res; } +/* + * If there are multiple requests outstanding, the speed of this function is + * important. SG_PACK_ID_WILDCARD is -1 and that case is typically + * the fast path. This function is only used in the non-blocking cases. + * Returns pointer to (first) matching sg_request or NULL. If found, + * sg_request state is moved from SG_RS_AWAIT_RCV to SG_RS_BUSY. + */ +static struct sg_request * +sg_find_srp_by_id(struct sg_fd *sfp, int pack_id) +{ + unsigned long iflags; + struct sg_request *resp; + + spin_lock_irqsave(&sfp->rq_list_lock, iflags); + list_for_each_entry(resp, &sfp->rq_list, entry) { + /* look for requests that are ready + not SG_IO owned */ + if (resp->done == 1 && !resp->sg_io_owned && + (-1 == pack_id || resp->header.pack_id == pack_id)) { + resp->done = 2; /* guard against other readers */ + spin_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; + } + } + spin_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return NULL; +} + static void sg_link_reserve(struct sg_fd *sfp, struct sg_request *srp, int size) {