From patchwork Wed May 1 21:36:15 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alex Elder X-Patchwork-Id: 2508961 Return-Path: X-Original-To: patchwork-ceph-devel@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id 818473FD85 for ; Wed, 1 May 2013 21:36:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757689Ab3EAVgT (ORCPT ); Wed, 1 May 2013 17:36:19 -0400 Received: from mail-ie0-f172.google.com ([209.85.223.172]:45670 "EHLO mail-ie0-f172.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758841Ab3EAVgR (ORCPT ); Wed, 1 May 2013 17:36:17 -0400 Received: by mail-ie0-f172.google.com with SMTP id c12so2466095ieb.31 for ; Wed, 01 May 2013 14:36:17 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20120113; h=x-received:message-id:date:from:user-agent:mime-version:to:subject :references:in-reply-to:content-type:content-transfer-encoding :x-gm-message-state; bh=N8Pf32caXuaNVhLSTgYiJig+wDpZAX4kFA4qUGaFn3o=; b=mSrJbkSdMd5/9K1AC+ZCX+7T2KvYR7SUaR5CfgJT9i0lPRmN/BYkiFSXGsSOpCRi0p VF9yEFpbAGJzK6ToJCGS5bK0MMILFL4ekXEMSO6jTuMF4mB3BnJ0AFxmu3sBULTYA9rK 7BY6L4YdP15n2MA82YWewbuvVkN+kjjyHQ8RWvORj2bT7LpgucWpIiTutQzfn0f5B2Ee ZRsOcwK+X3K6MnjTggjw9J8VhEcKqU/OIeVhRlp6hANWk6fGCPFLrWnJbzGE5nw4zH8c DTtcbCvZRaVXwGCh2bkbVjK1xZvg8OyvLgy56O44k37xLlMH7zsy+cWvXm8/o4xMxQHQ KQ7Q== X-Received: by 10.43.146.3 with SMTP id jw3mr2165882icc.39.1367444177277; Wed, 01 May 2013 14:36:17 -0700 (PDT) Received: from [172.22.22.4] (c-71-195-31-37.hsd1.mn.comcast.net. [71.195.31.37]) by mx.google.com with ESMTPSA id ha2sm22615301igb.1.2013.05.01.14.36.16 for (version=TLSv1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Wed, 01 May 2013 14:36:16 -0700 (PDT) Message-ID: <51818ACF.5010202@inktank.com> Date: Wed, 01 May 2013 16:36:15 -0500 From: Alex Elder User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130329 Thunderbird/17.0.5 MIME-Version: 1.0 To: ceph-devel@vger.kernel.org Subject: [PATCH 4/4] rbd: allocate image object names with a slab allocator References: <51818A5C.1080903@inktank.com> In-Reply-To: <51818A5C.1080903@inktank.com> X-Gm-Message-State: ALoCoQl8NrlbWuBwjGSRHvMyGmggZbK/QxdqwgDjfxH47ZucULZe8zt4GZ0PYjJDFki6l+a+WlN1 Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org The names of objects used for image object requests are always fixed size. So create a slab cache to manage them. Define a new function rbd_segment_name_free() to match rbd_segment_name() (which is what supplies the dynamically-allocated name buffer). This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder --- drivers/block/rbd.c | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) segment = offset >> rbd_dev->header.obj_order; @@ -1001,6 +1004,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) return name; } +static void rbd_segment_name_free(const char *name) +{ + /* The explicit cast here is needed to drop the const qualifier */ + + kmem_cache_free(rbd_segment_name_cache, (void *)name); +} + static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) { u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; @@ -2033,7 +2043,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, length = rbd_segment_length(rbd_dev, img_offset, resid); obj_request = rbd_obj_request_create(object_name, offset, length, type); - kfree(object_name); /* object request has its own copy */ + /* object request has its own copy of the object name */ + rbd_segment_name_free(object_name); if (!obj_request) goto out_unwind; @@ -5018,17 +5029,32 @@ static int rbd_slab_init(void) sizeof (struct rbd_obj_request), __alignof__(struct rbd_obj_request), 0, NULL); - if (rbd_obj_request_cache) - return 0; + if (!rbd_obj_request_cache) + goto out_err; + rbd_assert(!rbd_segment_name_cache); + rbd_segment_name_cache = kmem_cache_create("rbd_segment_name", + MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL); + if (rbd_segment_name_cache) + return 0; +out_err: kmem_cache_destroy(rbd_img_request_cache); rbd_img_request_cache = NULL; + if (rbd_img_request_cache) { + kmem_cache_destroy(rbd_obj_request_cache); + rbd_img_request_cache = NULL; + } + return -ENOMEM; } static void rbd_slab_exit(void) { + rbd_assert(rbd_segment_name_cache); + kmem_cache_destroy(rbd_segment_name_cache); + rbd_segment_name_cache = NULL; + rbd_assert(rbd_obj_request_cache); kmem_cache_destroy(rbd_obj_request_cache); rbd_obj_request_cache = NULL; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 28a5ea3..8d9aeef 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -345,8 +345,11 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); +/* Slab caches for frequently-allocated structures */ + static struct kmem_cache *rbd_img_request_cache; static struct kmem_cache *rbd_obj_request_cache; +static struct kmem_cache *rbd_segment_name_cache; static int rbd_img_request_submit(struct rbd_img_request *img_request); @@ -985,7 +988,7 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) u64 segment; int ret; - name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO); + name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL;