From patchwork Tue Jan 19 08:08:06 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Yan, Zheng" X-Patchwork-Id: 8058801 Return-Path: X-Original-To: patchwork-ceph-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id F1B28BEEE5 for ; Tue, 19 Jan 2016 08:08:38 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 17CAE2024D for ; Tue, 19 Jan 2016 08:08:38 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 0467B2024C for ; Tue, 19 Jan 2016 08:08:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932591AbcASIIf (ORCPT ); Tue, 19 Jan 2016 03:08:35 -0500 Received: from mx1.redhat.com ([209.132.183.28]:58119 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932445AbcASIIe (ORCPT ); Tue, 19 Jan 2016 03:08:34 -0500 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (Postfix) with ESMTPS id 6E28D8F316; Tue, 19 Jan 2016 08:08:34 +0000 (UTC) Received: from localhost.localdomain (vpn1-5-6.pek2.redhat.com [10.72.5.6]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id u0J88NM9015494; Tue, 19 Jan 2016 03:08:30 -0500 From: "Yan, Zheng" To: ceph-devel@vger.kernel.org Cc: idryomov@gmail.com, "Yan, Zheng" Subject: [PATCH V2 1/6] libceph: enlarge max number of operations in OSD request Date: Tue, 19 Jan 2016 16:08:06 +0800 Message-Id: <1453190891-40937-2-git-send-email-zyan@redhat.com> In-Reply-To: <1453190891-40937-1-git-send-email-zyan@redhat.com> References: <1453190891-40937-1-git-send-email-zyan@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Each operation requires a 'ceph_osd_req_op' structure. To avoid increasing memory usage of 'struct ceph_osd_request' in ordinary cases, we dynamically allocate 'ceph_osd_req_op' structures when number of operations in OSD request are larger than 3 Signed-off-by: Yan, Zheng --- include/linux/ceph/osd_client.h | 6 ++++-- net/ceph/osd_client.c | 46 +++++++++++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 7506b48..5bf428a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -43,7 +43,8 @@ struct ceph_osd { }; -#define CEPH_OSD_MAX_OP 3 +#define CEPH_OSD_MAX_OP 16 +#define CEPH_OSD_INITIAL_OP 3 enum ceph_osd_data_type { CEPH_OSD_DATA_TYPE_NONE = 0, @@ -136,7 +137,8 @@ struct ceph_osd_request { /* request osd ops array */ unsigned int r_num_ops; - struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP]; + struct ceph_osd_req_op *r_ops; + struct ceph_osd_req_op r_inline_ops[CEPH_OSD_INITIAL_OP]; /* these are updated on each send */ __le32 *r_request_osdmap_epoch; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f8f2359..b93752e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -335,12 +335,14 @@ static void ceph_osdc_release_request(struct kref *kref) for (which = 0; which < req->r_num_ops; which++) osd_req_op_data_release(req, which); + if (req->r_ops != req->r_inline_ops) + kfree(req->r_ops); + ceph_put_snap_context(req->r_snapc); if (req->r_mempool) mempool_free(req, req->r_osdc->req_mempool); else kmem_cache_free(ceph_osd_request_cache, req); - } void ceph_osdc_get_request(struct ceph_osd_request *req) @@ -372,16 +374,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); BUG_ON(num_ops > CEPH_OSD_MAX_OP); - msg_size = 4 + 4 + 8 + 8 + 4+8; - msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ - msg_size += 1 + 8 + 4 + 4; /* pg_t */ - msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */ - msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); - msg_size += 8; /* snapid */ - msg_size += 8; /* snap_seq */ - msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ - msg_size += 4; - if (use_mempool) { req = mempool_alloc(osdc->req_mempool, gfp_flags); memset(req, 0, sizeof(*req)); @@ -395,6 +387,17 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req->r_mempool = use_mempool; req->r_num_ops = num_ops; + if (num_ops <= CEPH_OSD_INITIAL_OP) { + req->r_ops = req->r_inline_ops; + } else { + BUG_ON(use_mempool); + req->r_ops = kzalloc(sizeof(*req->r_ops) * num_ops, gfp_flags); + if (!req->r_ops) { + ceph_osdc_put_request(req); + return NULL; + } + } + kref_init(&req->r_kref); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); @@ -409,11 +412,18 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req->r_target_oloc.pool = -1; /* create reply message */ + msg_size = OSD_OPREPLY_FRONT_LEN; + if (num_ops > CEPH_OSD_INITIAL_OP) { + /* ceph_osd_op and op_result */ + msg_size += (num_ops - CEPH_OSD_INITIAL_OP) * + (sizeof(struct ceph_osd_op) + 4); + } + if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); else - msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, - OSD_OPREPLY_FRONT_LEN, gfp_flags, true); + msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, + gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; @@ -421,6 +431,16 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req->r_reply = msg; /* create request message; allow space for oid */ + msg_size = 4 + 4 + 8 + 8 + 4 + 8; + msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ + msg_size += 1 + 8 + 4 + 4; /* pg_t */ + msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */ + msg_size += 2 + num_ops * sizeof(struct ceph_osd_op); + msg_size += 8; /* snapid */ + msg_size += 8; /* snap_seq */ + msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ + msg_size += 4; + if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op, 0); else