From patchwork Thu Nov 11 09:05:35 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Chuanxiao.Dong" X-Patchwork-Id: 316892 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id oAB96tRv008735 for ; Thu, 11 Nov 2010 09:07:25 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752469Ab0KKJHZ (ORCPT ); Thu, 11 Nov 2010 04:07:25 -0500 Received: from mga11.intel.com ([192.55.52.93]:8448 "EHLO mga11.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751042Ab0KKJHY convert rfc822-to-8bit (ORCPT ); Thu, 11 Nov 2010 04:07:24 -0500 Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP; 11 Nov 2010 01:07:23 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.59,181,1288594800"; d="scan'208,223";a="625720035" Received: from unknown (HELO intel.com) ([172.16.120.128]) by fmsmga002.fm.intel.com with ESMTP; 11 Nov 2010 01:07:22 -0800 Date: Thu, 11 Nov 2010 17:05:35 +0800 From: "Chuanxiao.Dong" To: cjb@laptop.org Cc: adrian.hunter@nokia.com, linux-mmc@vger.kernel.org Subject: [PATCH v1 2/3]set a suitable max_discard_sectors value for mmc queue Message-ID: <20101111090535.GC18170@intel.com> Reply-To: "Chuanxiao.Dong" MIME-Version: 1.0 Content-Disposition: inline User-Agent: Mutt/1.5.19 (2009-01-05) Sender: linux-mmc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-mmc@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Thu, 11 Nov 2010 09:07:26 +0000 (UTC) diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 4e42d03..f665c62 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -131,7 +131,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); if (mmc_can_erase(card)) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); - mq->queue->limits.max_discard_sectors = UINT_MAX; + /* get a suitable max_discard_sectors limitation */ + ret = mmc_set_discard_limit(card); + if (ret > 0) + mq->queue->limits.max_discard_sectors = ret; + else + mq->queue->limits.max_discard_sectors = UINT_MAX; + if (card->erased_byte == 0) mq->queue->limits.discard_zeroes_data = 1; if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index d48bb26..46b66f8 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1160,6 +1160,41 @@ void mmc_init_erase(struct mmc_card *card) } } +static unsigned int mmc_calc_mmc_erase_timeout(struct mmc_card *card) +{ + unsigned int erase_timeout = 0; + + /* CSD Erase Group Size uses write timeout */ + unsigned int mult = (10 << card->csd.r2w_factor); + unsigned int timeout_clks = card->csd.tacc_clks * mult; + unsigned int timeout_us; + + /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ + if (card->csd.tacc_ns < 1000000) + timeout_us = (card->csd.tacc_ns * mult) / 1000; + else + timeout_us = (card->csd.tacc_ns / 1000) * mult; + + /* + * ios.clock is only a target. The real clock rate might be + * less but not that much less, so fudge it by multiplying by 2. + */ + timeout_clks <<= 1; + timeout_us += (timeout_clks * 1000) / + (card->host->ios.clock / 1000); + + erase_timeout = timeout_us / 1000; + + /* + * Theoretically, the calculation could underflow so round up + * to 1ms in that case. + */ + if (!erase_timeout) + erase_timeout = 1; + + return erase_timeout; +} + static void mmc_set_mmc_erase_timeout(struct mmc_card *card, struct mmc_command *cmd, unsigned int arg, unsigned int qty) @@ -1172,35 +1207,8 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card, erase_timeout = card->ext_csd.trim_timeout; else erase_timeout = card->ext_csd.hc_erase_timeout; - } else { - /* CSD Erase Group Size uses write timeout */ - unsigned int mult = (10 << card->csd.r2w_factor); - unsigned int timeout_clks = card->csd.tacc_clks * mult; - unsigned int timeout_us; - - /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ - if (card->csd.tacc_ns < 1000000) - timeout_us = (card->csd.tacc_ns * mult) / 1000; - else - timeout_us = (card->csd.tacc_ns / 1000) * mult; - - /* - * ios.clock is only a target. The real clock rate might be - * less but not that much less, so fudge it by multiplying by 2. - */ - timeout_clks <<= 1; - timeout_us += (timeout_clks * 1000) / - (card->host->ios.clock / 1000); - - erase_timeout = timeout_us / 1000; - - /* - * Theoretically, the calculation could underflow so round up - * to 1ms in that case. - */ - if (!erase_timeout) - erase_timeout = 1; - } + } else + erase_timeout = mmc_calc_mmc_erase_timeout(card); /* Multiplier for secure operations */ if (arg & MMC_SECURE_ARGS) { @@ -1458,6 +1466,85 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, } EXPORT_SYMBOL(mmc_erase_group_aligned); +/* + * mmc_set_discard_limit: set the max_discard_sectors according + * to host controller timeout capability. + * */ +int mmc_set_discard_limit(struct mmc_card *card) +{ + struct mmc_host *host; + unsigned int max, nr = 0; + unsigned int host_timeout; + host = card->host; + /* get host controller timeout clks to caculate + * the suitable erase timeout value. + * */ + if (host->ops && host->ops->get_tmclk) + host_timeout = host->ops->get_tmclk(host); + else + return nr; + /* max timeout in ms */ + max = (1 << 27) / host_timeout; + if (max == 0) + goto out; + + if (mmc_card_sd(card)) { + /* time in ms */ + max -= card->ssr.erase_offset; + if (card->ssr.erase_timeout && max > 0) { + nr = (max / card->ssr.erase_timeout); + if (card->erase_shift) + nr <<= card->erase_shift; + } + } + + if (mmc_card_mmc(card)) { + unsigned int erase_timeout, mult; + if (card->ext_csd.erase_group_def & 1) { + /* Use High Capacity erase timeout + * so here to choose the maximum timeout value + * */ + erase_timeout = (card->ext_csd.trim_timeout > + card->ext_csd.hc_erase_timeout) ? + card->ext_csd.trim_timeout : + card->ext_csd.hc_erase_timeout; + } else + erase_timeout = mmc_calc_mmc_erase_timeout(card); + + /* If driver send sec trim/erase command, timeout value + * should be more larger + * */ + mult = (card->ext_csd.sec_trim_mult > + card->ext_csd.sec_erase_mult) ? + card->ext_csd.sec_trim_mult : + card->ext_csd.sec_erase_mult; + erase_timeout *= mult; + if (erase_timeout) { + nr = (max / erase_timeout); + if (card->erase_shift) + nr <<= card->erase_shift; + else + nr *= card->erase_size; + } + } +out: + if (nr == 0) { + /* Have to set a small limitation for request queue + * to ensure that host controller won't generate a + * timeout interrupt during waiting, here let limitation + * to be 1 erase block. And this will let TRIM/ERASE + * performance much lower. + * */ + nr = 1; + if (card->erase_shift) + nr <<= card->erase_shift; + else + nr *= card->erase_size; + } + return nr; +} +EXPORT_SYMBOL(mmc_set_discard_limit); + int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) { struct mmc_command cmd; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 64e013f..ffddd1f 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -152,6 +152,7 @@ extern int mmc_can_trim(struct mmc_card *card); extern int mmc_can_secure_erase_trim(struct mmc_card *card); extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, unsigned int nr); +extern int mmc_set_discard_limit(struct mmc_card *card); extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f108cee..63f1c9e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -117,6 +117,9 @@ struct mmc_host_ops { /* optional callback for HC quirks */ void (*init_card)(struct mmc_host *host, struct mmc_card *card); + + /* Get host controller timeout clk */ + unsigned int (*get_tmclk)(struct mmc_host *host); }; struct mmc_card;