From patchwork Tue Oct 12 11:31:17 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dan Carpenter X-Patchwork-Id: 247451 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o9CBVqZ4004975 for ; Tue, 12 Oct 2010 11:31:53 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932252Ab0JLLbu (ORCPT ); Tue, 12 Oct 2010 07:31:50 -0400 Received: from mail-wy0-f174.google.com ([74.125.82.174]:46247 "EHLO mail-wy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932249Ab0JLLbs (ORCPT ); Tue, 12 Oct 2010 07:31:48 -0400 Received: by wye20 with SMTP id 20so1285303wye.19 for ; Tue, 12 Oct 2010 04:31:47 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:received:date:from:to:cc:subject :message-id:references:mime-version:content-type:content-disposition :in-reply-to:user-agent; bh=RC2YrNGTuZx+dyO6susUHCEqYuoKif4wXQJuEZ2uZtE=; b=s4dQV9503FU+5pIjutMi3t8DppxXt9AaS+cpbmh709qZhY6iTyx9kr3eY9ntsBu9Ji ew/91JyDtFcsoFOgnzV7VRcIWqgKkHKsdOITfmJjD2uXpiBuSP0LFIPzgmsBSKtJhOew zOMKYjjD/9icYK45qkEsGasZzPtpMnZ88EO60= DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=date:from:to:cc:subject:message-id:references:mime-version :content-type:content-disposition:in-reply-to:user-agent; b=J9FM/PcOWKu5mpe8mxGre7fwVCN7DIADhsltu+v5wglhmU/AxWEAWFfXsYj0ACgBF5 NsL6DbxdS7JH9P/uKc7LhFXWdamGHETvSj5MENg3DC6UJnd4qwoYC59mHcvh8cWURRHf UGIhUUrKdJPdEbJnwxRpCiYJLEqX2/XL+XrMg= Received: by 10.216.21.206 with SMTP id r56mr862618wer.31.1286883106709; Tue, 12 Oct 2010 04:31:46 -0700 (PDT) Received: from bicker (h3f08.n1.ips.mtn.co.ug [41.210.191.8]) by mx.google.com with ESMTPS id x75sm3949050weq.2.2010.10.12.04.31.23 (version=TLSv1/SSLv3 cipher=RC4-MD5); Tue, 12 Oct 2010 04:31:32 -0700 (PDT) Date: Tue, 12 Oct 2010 13:31:17 +0200 From: Dan Carpenter To: Jason Gunthorpe Cc: Roland Dreier , Sean Hefty , Hal Rosenstock , linux-rdma@vger.kernel.org, kernel-janitors@vger.kernel.org Subject: [patch v3] infiniband: uverbs: handle large number of entries Message-ID: <20101012113117.GB6742@bicker> References: <20101007071610.GC11681@bicker> <20101007161649.GD21206@obsidianresearch.com> <20101007165947.GD11681@bicker> <20101009231607.GA24649@obsidianresearch.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20101009231607.GA24649@obsidianresearch.com> User-Agent: Mutt/1.5.18 (2008-05-17) Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Tue, 12 Oct 2010 11:31:53 +0000 (UTC) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6fcfbeb..b0788b6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -891,68 +891,89 @@ out: return ret ? ret : in_len; } +static int copy_header_to_user(void __user *dest, u32 count) +{ + u32 header[2]; /* the second u32 is reserved */ + + memset(header, 0, sizeof(header)); + if (copy_to_user(dest, header, sizeof(header))) + return -EFAULT; + return 0; +} + +static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) +{ + struct ib_uverbs_wc tmp; + + memset(&tmp, 0, sizeof(tmp)); + + tmp.wr_id = wc->wr_id; + tmp.status = wc->status; + tmp.opcode = wc->opcode; + tmp.vendor_err = wc->vendor_err; + tmp.byte_len = wc->byte_len; + tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; + tmp.qp_num = wc->qp->qp_num; + tmp.src_qp = wc->src_qp; + tmp.wc_flags = wc->wc_flags; + tmp.pkey_index = wc->pkey_index; + tmp.slid = wc->slid; + tmp.sl = wc->sl; + tmp.dlid_path_bits = wc->dlid_path_bits; + tmp.port_num = wc->port_num; + + if (copy_to_user(dest, &tmp, sizeof(tmp))) + return -EFAULT; + return 0; +} + ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { struct ib_uverbs_poll_cq cmd; - struct ib_uverbs_poll_cq_resp *resp; + u8 __user *header_ptr; + u8 __user *data_ptr; struct ib_cq *cq; - struct ib_wc *wc; - int ret = 0; + struct ib_wc wc; + u32 count = 0; + int ret; int i; - int rsize; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; - wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); - if (!wc) - return -ENOMEM; - - rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); - resp = kmalloc(rsize, GFP_KERNEL); - if (!resp) { - ret = -ENOMEM; - goto out_wc; - } - cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); - if (!cq) { - ret = -EINVAL; - goto out; - } + if (!cq) + return -EINVAL; - resp->count = ib_poll_cq(cq, cmd.ne, wc); + /* we copy a struct ib_uverbs_poll_cq_resp to user space */ + header_ptr = (void __user *)(unsigned long)cmd.response; + data_ptr = header_ptr + sizeof(u32) * 2; - put_cq_read(cq); + for (i = 0; i < cmd.ne; i++) { + ret = ib_poll_cq(cq, 1, &wc); + if (ret < 0) + goto out_put; + if (!ret) + break; - for (i = 0; i < resp->count; i++) { - resp->wc[i].wr_id = wc[i].wr_id; - resp->wc[i].status = wc[i].status; - resp->wc[i].opcode = wc[i].opcode; - resp->wc[i].vendor_err = wc[i].vendor_err; - resp->wc[i].byte_len = wc[i].byte_len; - resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; - resp->wc[i].qp_num = wc[i].qp->qp_num; - resp->wc[i].src_qp = wc[i].src_qp; - resp->wc[i].wc_flags = wc[i].wc_flags; - resp->wc[i].pkey_index = wc[i].pkey_index; - resp->wc[i].slid = wc[i].slid; - resp->wc[i].sl = wc[i].sl; - resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; - resp->wc[i].port_num = wc[i].port_num; + ret = copy_wc_to_user(data_ptr, &wc); + if (ret) + goto out_put; + data_ptr += sizeof(struct ib_uverbs_wc); + count++; } - if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) - ret = -EFAULT; + ret = copy_header_to_user(header_ptr, count); + if (ret) + goto out_put; -out: - kfree(resp); + ret = in_len; -out_wc: - kfree(wc); - return ret ? ret : in_len; +out_put: + put_cq_read(cq); + return ret; } ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,