Message ID | 1422522030-17793-12-git-send-email-adrian.hunter@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 29 January 2015 at 10:00, Adrian Hunter <adrian.hunter@intel.com> wrote: > Make use of mmc core support for re-tuning instead > of doing it all in the sdhci driver. > > This patch also changes to flag the need for re-tuning > always after runtime suspend when tuning has been used > at initialization. Previously it was only done if > the re-tuning timer was in use. > > Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> > --- > drivers/mmc/host/sdhci.c | 113 ++++++---------------------------------------- > include/linux/mmc/sdhci.h | 3 -- > 2 files changed, 14 insertions(+), 102 deletions(-) > > diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c > index c9881ca..dd0be76 100644 > --- a/drivers/mmc/host/sdhci.c > +++ b/drivers/mmc/host/sdhci.c > @@ -51,7 +51,6 @@ static void sdhci_finish_data(struct sdhci_host *); > > static void sdhci_finish_command(struct sdhci_host *); > static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); > -static void sdhci_tuning_timer(unsigned long data); > static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); > static int sdhci_pre_dma_transfer(struct sdhci_host *host, > struct mmc_data *data, > @@ -252,17 +251,6 @@ static void sdhci_init(struct sdhci_host *host, int soft) > static void sdhci_reinit(struct sdhci_host *host) > { > sdhci_init(host, 0); > - /* > - * Retuning stuffs are affected by different cards inserted and only > - * applicable to UHS-I cards. So reset these fields to their initial > - * value when card is removed. > - */ > - if (host->flags & SDHCI_USING_RETUNING_TIMER) { > - host->flags &= ~SDHCI_USING_RETUNING_TIMER; > - > - del_timer_sync(&host->tuning_timer); > - host->flags &= ~SDHCI_NEEDS_RETUNING; > - } > sdhci_enable_card_detection(host); > } > > @@ -1350,7 +1338,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) > struct sdhci_host *host; > int present; > unsigned long flags; > - u32 tuning_opcode; > > host = mmc_priv(mmc); > > @@ -1399,39 +1386,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) > host->mrq->cmd->error = -ENOMEDIUM; > tasklet_schedule(&host->finish_tasklet); > } else { > - u32 present_state; > - > - present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); > - /* > - * Check if the re-tuning timer has already expired and there > - * is no on-going data transfer and DAT0 is not busy. If so, > - * we need to execute tuning procedure before sending command. > - */ > - if ((host->flags & SDHCI_NEEDS_RETUNING) && > - !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) && > - (present_state & SDHCI_DATA_0_LVL_MASK)) { > - if (mmc->card) { > - /* eMMC uses cmd21 but sd and sdio use cmd19 */ > - tuning_opcode = > - mmc->card->type == MMC_TYPE_MMC ? > - MMC_SEND_TUNING_BLOCK_HS200 : > - MMC_SEND_TUNING_BLOCK; > - > - /* Here we need to set the host->mrq to NULL, > - * in case the pending finish_tasklet > - * finishes it incorrectly. > - */ > - host->mrq = NULL; > - > - spin_unlock_irqrestore(&host->lock, flags); > - sdhci_execute_tuning(mmc, tuning_opcode); > - spin_lock_irqsave(&host->lock, flags); > - > - /* Restore original mmc_request structure */ > - host->mrq = mrq; > - } > - } > - > if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) > sdhci_send_command(host, mrq->sbc); > else > @@ -2077,23 +2031,19 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) > } > > out: > - host->flags &= ~SDHCI_NEEDS_RETUNING; > - > if (tuning_count) { > - host->flags |= SDHCI_USING_RETUNING_TIMER; > - mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); > + /* > + * In case tuning fails, host controllers which support > + * re-tuning can try tuning again at a later time, when the > + * re-tuning timer expires. So for these controllers, we > + * return 0. Since there might be other controllers who do not > + * have this capability, we return error for them. > + */ > + err = 0; > } > > - /* > - * In case tuning fails, host controllers which support re-tuning can > - * try tuning again at a later time, when the re-tuning timer expires. > - * So for these controllers, we return 0. Since there might be other > - * controllers who do not have this capability, we return error for > - * them. SDHCI_USING_RETUNING_TIMER means the host is currently using > - * a retuning timer to do the retuning for the card. > - */ > - if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) > - err = 0; > + if (!err) > + mmc_retune_enable(host->mmc, tuning_count); > > sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); > sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); > @@ -2349,20 +2299,6 @@ static void sdhci_timeout_timer(unsigned long data) > spin_unlock_irqrestore(&host->lock, flags); > } > > -static void sdhci_tuning_timer(unsigned long data) > -{ > - struct sdhci_host *host; > - unsigned long flags; > - > - host = (struct sdhci_host *)data; > - > - spin_lock_irqsave(&host->lock, flags); > - > - host->flags |= SDHCI_NEEDS_RETUNING; > - > - spin_unlock_irqrestore(&host->lock, flags); > -} > - > /*****************************************************************************\ > * * > * Interrupt handling * > @@ -2740,11 +2676,8 @@ int sdhci_suspend_host(struct sdhci_host *host) > { > sdhci_disable_card_detection(host); > > - /* Disable tuning since we are suspending */ > - if (host->flags & SDHCI_USING_RETUNING_TIMER) { > - del_timer_sync(&host->tuning_timer); > - host->flags &= ~SDHCI_NEEDS_RETUNING; > - } > + mmc_retune_timer_stop(host->mmc); > + mmc_retune_needed(host->mmc); > > if (!device_may_wakeup(mmc_dev(host->mmc))) { > host->ier = 0; > @@ -2794,10 +2727,6 @@ int sdhci_resume_host(struct sdhci_host *host) > > sdhci_enable_card_detection(host); > > - /* Set the re-tuning expiration flag */ > - if (host->flags & SDHCI_USING_RETUNING_TIMER) > - host->flags |= SDHCI_NEEDS_RETUNING; > - > return ret; > } > > @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) > { > unsigned long flags; > > - /* Disable tuning since we are suspending */ > - if (host->flags & SDHCI_USING_RETUNING_TIMER) { > - del_timer_sync(&host->tuning_timer); > - host->flags &= ~SDHCI_NEEDS_RETUNING; > - } > + mmc_retune_timer_stop(host->mmc); I think this could give a deadlock. What if the retuning is just about to start and thus sdhci's ->execute_tuning() callback has been invoked, which is waiting for the pm_runtime_get_sync() to return. > + mmc_retune_needed(host->mmc); This seems racy. What if a new request has already been started from the mmc core (waiting for sdhci's ->request() callback to return). That would mean the mmc core won't detect that a retune was needed. > > spin_lock_irqsave(&host->lock, flags); > host->ier &= SDHCI_INT_CARD_INT; > @@ -2881,10 +2807,6 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) > spin_unlock_irqrestore(&host->lock, flags); > } > > - /* Set the re-tuning expiration flag */ > - if (host->flags & SDHCI_USING_RETUNING_TIMER) > - host->flags |= SDHCI_NEEDS_RETUNING; > - > spin_lock_irqsave(&host->lock, flags); > > host->runtime_suspended = false; > @@ -3419,13 +3341,6 @@ int sdhci_add_host(struct sdhci_host *host) > > init_waitqueue_head(&host->buf_ready_int); > > - if (host->version >= SDHCI_SPEC_300) { > - /* Initialize re-tuning timer */ > - init_timer(&host->tuning_timer); > - host->tuning_timer.data = (unsigned long)host; > - host->tuning_timer.function = sdhci_tuning_timer; > - } > - > sdhci_init(host, 0); > > ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, > diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h > index c3e3db1..c5b00be 100644 > --- a/include/linux/mmc/sdhci.h > +++ b/include/linux/mmc/sdhci.h > @@ -138,13 +138,11 @@ struct sdhci_host { > #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ > #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ > #define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */ > -#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */ > #define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */ > #define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ > #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ > #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ > #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ > -#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ > #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ > #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ > > @@ -210,7 +208,6 @@ struct sdhci_host { > unsigned int tuning_count; /* Timer count for re-tuning */ > unsigned int tuning_mode; /* Re-tuning mode supported by host */ > #define SDHCI_TUNING_MODE_1 0 > - struct timer_list tuning_timer; /* Timer for tuning */ > > struct sdhci_host_next next_data; > unsigned long private[0] ____cacheline_aligned; > -- > 1.9.1 > Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 06/03/15 14:51, Ulf Hansson wrote: > On 29 January 2015 at 10:00, Adrian Hunter <adrian.hunter@intel.com> wrote: >> Make use of mmc core support for re-tuning instead >> of doing it all in the sdhci driver. >> >> This patch also changes to flag the need for re-tuning >> always after runtime suspend when tuning has been used >> at initialization. Previously it was only done if >> the re-tuning timer was in use. >> >> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> >> --- >> drivers/mmc/host/sdhci.c | 113 ++++++---------------------------------------- >> include/linux/mmc/sdhci.h | 3 -- >> 2 files changed, 14 insertions(+), 102 deletions(-) >> >> diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c >> index c9881ca..dd0be76 100644 >> --- a/drivers/mmc/host/sdhci.c >> +++ b/drivers/mmc/host/sdhci.c >> @@ -51,7 +51,6 @@ static void sdhci_finish_data(struct sdhci_host *); >> >> static void sdhci_finish_command(struct sdhci_host *); >> static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); >> -static void sdhci_tuning_timer(unsigned long data); >> static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); >> static int sdhci_pre_dma_transfer(struct sdhci_host *host, >> struct mmc_data *data, >> @@ -252,17 +251,6 @@ static void sdhci_init(struct sdhci_host *host, int soft) >> static void sdhci_reinit(struct sdhci_host *host) >> { >> sdhci_init(host, 0); >> - /* >> - * Retuning stuffs are affected by different cards inserted and only >> - * applicable to UHS-I cards. So reset these fields to their initial >> - * value when card is removed. >> - */ >> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >> - host->flags &= ~SDHCI_USING_RETUNING_TIMER; >> - >> - del_timer_sync(&host->tuning_timer); >> - host->flags &= ~SDHCI_NEEDS_RETUNING; >> - } >> sdhci_enable_card_detection(host); >> } >> >> @@ -1350,7 +1338,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) >> struct sdhci_host *host; >> int present; >> unsigned long flags; >> - u32 tuning_opcode; >> >> host = mmc_priv(mmc); >> >> @@ -1399,39 +1386,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) >> host->mrq->cmd->error = -ENOMEDIUM; >> tasklet_schedule(&host->finish_tasklet); >> } else { >> - u32 present_state; >> - >> - present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); >> - /* >> - * Check if the re-tuning timer has already expired and there >> - * is no on-going data transfer and DAT0 is not busy. If so, >> - * we need to execute tuning procedure before sending command. >> - */ >> - if ((host->flags & SDHCI_NEEDS_RETUNING) && >> - !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) && >> - (present_state & SDHCI_DATA_0_LVL_MASK)) { >> - if (mmc->card) { >> - /* eMMC uses cmd21 but sd and sdio use cmd19 */ >> - tuning_opcode = >> - mmc->card->type == MMC_TYPE_MMC ? >> - MMC_SEND_TUNING_BLOCK_HS200 : >> - MMC_SEND_TUNING_BLOCK; >> - >> - /* Here we need to set the host->mrq to NULL, >> - * in case the pending finish_tasklet >> - * finishes it incorrectly. >> - */ >> - host->mrq = NULL; >> - >> - spin_unlock_irqrestore(&host->lock, flags); >> - sdhci_execute_tuning(mmc, tuning_opcode); >> - spin_lock_irqsave(&host->lock, flags); >> - >> - /* Restore original mmc_request structure */ >> - host->mrq = mrq; >> - } >> - } >> - >> if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) >> sdhci_send_command(host, mrq->sbc); >> else >> @@ -2077,23 +2031,19 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) >> } >> >> out: >> - host->flags &= ~SDHCI_NEEDS_RETUNING; >> - >> if (tuning_count) { >> - host->flags |= SDHCI_USING_RETUNING_TIMER; >> - mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); >> + /* >> + * In case tuning fails, host controllers which support >> + * re-tuning can try tuning again at a later time, when the >> + * re-tuning timer expires. So for these controllers, we >> + * return 0. Since there might be other controllers who do not >> + * have this capability, we return error for them. >> + */ >> + err = 0; >> } >> >> - /* >> - * In case tuning fails, host controllers which support re-tuning can >> - * try tuning again at a later time, when the re-tuning timer expires. >> - * So for these controllers, we return 0. Since there might be other >> - * controllers who do not have this capability, we return error for >> - * them. SDHCI_USING_RETUNING_TIMER means the host is currently using >> - * a retuning timer to do the retuning for the card. >> - */ >> - if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) >> - err = 0; >> + if (!err) >> + mmc_retune_enable(host->mmc, tuning_count); >> >> sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); >> sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); >> @@ -2349,20 +2299,6 @@ static void sdhci_timeout_timer(unsigned long data) >> spin_unlock_irqrestore(&host->lock, flags); >> } >> >> -static void sdhci_tuning_timer(unsigned long data) >> -{ >> - struct sdhci_host *host; >> - unsigned long flags; >> - >> - host = (struct sdhci_host *)data; >> - >> - spin_lock_irqsave(&host->lock, flags); >> - >> - host->flags |= SDHCI_NEEDS_RETUNING; >> - >> - spin_unlock_irqrestore(&host->lock, flags); >> -} >> - >> /*****************************************************************************\ >> * * >> * Interrupt handling * >> @@ -2740,11 +2676,8 @@ int sdhci_suspend_host(struct sdhci_host *host) >> { >> sdhci_disable_card_detection(host); >> >> - /* Disable tuning since we are suspending */ >> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >> - del_timer_sync(&host->tuning_timer); >> - host->flags &= ~SDHCI_NEEDS_RETUNING; >> - } >> + mmc_retune_timer_stop(host->mmc); >> + mmc_retune_needed(host->mmc); >> >> if (!device_may_wakeup(mmc_dev(host->mmc))) { >> host->ier = 0; >> @@ -2794,10 +2727,6 @@ int sdhci_resume_host(struct sdhci_host *host) >> >> sdhci_enable_card_detection(host); >> >> - /* Set the re-tuning expiration flag */ >> - if (host->flags & SDHCI_USING_RETUNING_TIMER) >> - host->flags |= SDHCI_NEEDS_RETUNING; >> - >> return ret; >> } >> >> @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) >> { >> unsigned long flags; >> >> - /* Disable tuning since we are suspending */ >> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >> - del_timer_sync(&host->tuning_timer); >> - host->flags &= ~SDHCI_NEEDS_RETUNING; >> - } >> + mmc_retune_timer_stop(host->mmc); > > I think this could give a deadlock. > > What if the retuning is just about to start and thus sdhci's > ->execute_tuning() callback has been invoked, which is waiting for the > pm_runtime_get_sync() to return. The re-tune timer is mmc_retune_timer() and it does not take any locks so it can't deadlock. > >> + mmc_retune_needed(host->mmc); > > This seems racy. > > What if a new request has already been started from the mmc core > (waiting for sdhci's ->request() callback to return). That would mean > the mmc core won't detect that a retune was needed. That is a good point. The host controller must not runtime suspend after re-tuning until retuning is released. I can think of a couple of options: - move the retuning call into the ->request function - add extra host ops for the host to runtime resume/suspend -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
[...] >>> @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) >>> { >>> unsigned long flags; >>> >>> - /* Disable tuning since we are suspending */ >>> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >>> - del_timer_sync(&host->tuning_timer); >>> - host->flags &= ~SDHCI_NEEDS_RETUNING; >>> - } >>> + mmc_retune_timer_stop(host->mmc); >> >> I think this could give a deadlock. >> >> What if the retuning is just about to start and thus sdhci's >> ->execute_tuning() callback has been invoked, which is waiting for the >> pm_runtime_get_sync() to return. > > The re-tune timer is mmc_retune_timer() and it does not take any locks > so it can't deadlock. > You missed my point. The problem is related to runtime PM. Here the sequence I think will cause the deadlock. mmc_retune_timer_stop() ->del_timer_sync() ... Wait for timer-handler to finish. If the timer-handler is running, it has invoked the ->execute_tuning() callback and is thus waiting for a pm_runtime_get_sync() to return. Now, waiting for a pm_runtime_get_sync() to return from a runtime PM suspend callback will deadlock! >> >>> + mmc_retune_needed(host->mmc); >> >> This seems racy. >> >> What if a new request has already been started from the mmc core >> (waiting for sdhci's ->request() callback to return). That would mean >> the mmc core won't detect that a retune was needed. > > That is a good point. The host controller must not runtime suspend after > re-tuning until retuning is released. I can think of a couple of options: > - move the retuning call into the ->request function > - add extra host ops for the host to runtime resume/suspend > I am not sure which approach I prefer yet. Need some more time to think. For your information, Neil Brown is having a similar issue which he is trying to address [1]. I think we need an generic approach to deal with the runtime PM synchronization issue described above. More precisely in those scenarios when mmc hosts needs to notify the mmc core to take some specific actions, from a mmc host's runtime PM callback. Kind regards Uffe [1] https://lkml.org/lkml/2015/2/23/721 -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 10/03/15 15:55, Ulf Hansson wrote: > [...] > >>>> @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) >>>> { >>>> unsigned long flags; >>>> >>>> - /* Disable tuning since we are suspending */ >>>> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >>>> - del_timer_sync(&host->tuning_timer); >>>> - host->flags &= ~SDHCI_NEEDS_RETUNING; >>>> - } >>>> + mmc_retune_timer_stop(host->mmc); >>> >>> I think this could give a deadlock. >>> >>> What if the retuning is just about to start and thus sdhci's >>> ->execute_tuning() callback has been invoked, which is waiting for the >>> pm_runtime_get_sync() to return. >> >> The re-tune timer is mmc_retune_timer() and it does not take any locks >> so it can't deadlock. >> > > You missed my point. The problem is related to runtime PM. > > Here the sequence I think will cause the deadlock. > mmc_retune_timer_stop() > ->del_timer_sync() > ... > Wait for timer-handler to finish. > > If the timer-handler is running, it has invoked the ->execute_tuning() No, the timer handler does not invoke anything. It just sets a flag. > callback and is thus waiting for a pm_runtime_get_sync() to return. > > Now, waiting for a pm_runtime_get_sync() to return from a runtime PM > suspend callback will deadlock! > >>> >>>> + mmc_retune_needed(host->mmc); >>> >>> This seems racy. >>> >>> What if a new request has already been started from the mmc core >>> (waiting for sdhci's ->request() callback to return). That would mean >>> the mmc core won't detect that a retune was needed. >> >> That is a good point. The host controller must not runtime suspend after >> re-tuning until retuning is released. I can think of a couple of options: >> - move the retuning call into the ->request function >> - add extra host ops for the host to runtime resume/suspend >> > > I am not sure which approach I prefer yet. Need some more time to think. > > For your information, Neil Brown is having a similar issue which he is > trying to address [1]. It is a bit different. Re-tuning is about doing something before a request, rather than before a suspend. > I think we need an generic approach to deal with the runtime PM > synchronization issue described above. More precisely in those > scenarios when mmc hosts needs to notify the mmc core to take some > specific actions, from a mmc host's runtime PM callback. For the re-tune case I did not want to assume what the host driver needed to do, so I added ->hold_tuning() and ->release_tuning() host operations. Please see V3 of the patches I sent earlier today. Thanks very much for looking at the patches by the way! :-) -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 10 March 2015 at 15:20, Adrian Hunter <adrian.hunter@intel.com> wrote: > On 10/03/15 15:55, Ulf Hansson wrote: >> [...] >> >>>>> @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) >>>>> { >>>>> unsigned long flags; >>>>> >>>>> - /* Disable tuning since we are suspending */ >>>>> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >>>>> - del_timer_sync(&host->tuning_timer); >>>>> - host->flags &= ~SDHCI_NEEDS_RETUNING; >>>>> - } >>>>> + mmc_retune_timer_stop(host->mmc); >>>> >>>> I think this could give a deadlock. >>>> >>>> What if the retuning is just about to start and thus sdhci's >>>> ->execute_tuning() callback has been invoked, which is waiting for the >>>> pm_runtime_get_sync() to return. >>> >>> The re-tune timer is mmc_retune_timer() and it does not take any locks >>> so it can't deadlock. >>> >> >> You missed my point. The problem is related to runtime PM. >> >> Here the sequence I think will cause the deadlock. >> mmc_retune_timer_stop() >> ->del_timer_sync() >> ... >> Wait for timer-handler to finish. >> >> If the timer-handler is running, it has invoked the ->execute_tuning() > > No, the timer handler does not invoke anything. It just sets a flag. Ah, yes. I say that now. > >> callback and is thus waiting for a pm_runtime_get_sync() to return. >> >> Now, waiting for a pm_runtime_get_sync() to return from a runtime PM >> suspend callback will deadlock! >> >>>> >>>>> + mmc_retune_needed(host->mmc); >>>> >>>> This seems racy. >>>> >>>> What if a new request has already been started from the mmc core >>>> (waiting for sdhci's ->request() callback to return). That would mean >>>> the mmc core won't detect that a retune was needed. >>> >>> That is a good point. The host controller must not runtime suspend after >>> re-tuning until retuning is released. I can think of a couple of options: >>> - move the retuning call into the ->request function >>> - add extra host ops for the host to runtime resume/suspend >>> >> >> I am not sure which approach I prefer yet. Need some more time to think. >> >> For your information, Neil Brown is having a similar issue which he is >> trying to address [1]. > > It is a bit different. Re-tuning is about doing something before a > request, rather than before a suspend. That's true, but we are still have the same locking issues to consider for runtime PM. > >> I think we need an generic approach to deal with the runtime PM >> synchronization issue described above. More precisely in those >> scenarios when mmc hosts needs to notify the mmc core to take some >> specific actions, from a mmc host's runtime PM callback. > > For the re-tune case I did not want to assume what the host driver > needed to do, so I added ->hold_tuning() and ->release_tuning() > host operations. I have thought a bit more on how I would like this to be implemented. It's a bit closer to what Neil's suggests in his approach [1]. First, I would like only one API provided from the mmc core. This API shall be invoked from the host driver's runtime PM suspend callback. Something along the lines, "mmc_host_runtime_suspend()". This function will return -EBUSY if the host isn't allowed to be runtime PM suspended. To make sure this function don't deadlock, it must not do any requests towards the host (since that would trigger a pm_runtime_get_sync() of the host's device). Also, it need to claim the mmc host in a non-blocking manner (mmc_claim_host() needs to be extended to support that) since that prevents deadlocking and also tells us whether there are new requests prepared by the mmc core. If that's the case, there are no need to complete the runtime PM suspend operation, but instead immediately return -EBUSY. In your case mmc_host_runtime_suspend(), also needs to assign a flag indicating that a re-tune is needed at next request. That flag shall be set when the host is claimed to prevent the host_ops->request() callback to be invoked, which removes the race condition. Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 23/03/15 14:54, Ulf Hansson wrote: > On 10 March 2015 at 15:20, Adrian Hunter <adrian.hunter@intel.com> wrote: >> On 10/03/15 15:55, Ulf Hansson wrote: >>> [...] >>> >>>>>> @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) >>>>>> { >>>>>> unsigned long flags; >>>>>> >>>>>> - /* Disable tuning since we are suspending */ >>>>>> - if (host->flags & SDHCI_USING_RETUNING_TIMER) { >>>>>> - del_timer_sync(&host->tuning_timer); >>>>>> - host->flags &= ~SDHCI_NEEDS_RETUNING; >>>>>> - } >>>>>> + mmc_retune_timer_stop(host->mmc); >>>>> >>>>> I think this could give a deadlock. >>>>> >>>>> What if the retuning is just about to start and thus sdhci's >>>>> ->execute_tuning() callback has been invoked, which is waiting for the >>>>> pm_runtime_get_sync() to return. >>>> >>>> The re-tune timer is mmc_retune_timer() and it does not take any locks >>>> so it can't deadlock. >>>> >>> >>> You missed my point. The problem is related to runtime PM. >>> >>> Here the sequence I think will cause the deadlock. >>> mmc_retune_timer_stop() >>> ->del_timer_sync() >>> ... >>> Wait for timer-handler to finish. >>> >>> If the timer-handler is running, it has invoked the ->execute_tuning() >> >> No, the timer handler does not invoke anything. It just sets a flag. > > Ah, yes. I say that now. > >> >>> callback and is thus waiting for a pm_runtime_get_sync() to return. >>> >>> Now, waiting for a pm_runtime_get_sync() to return from a runtime PM >>> suspend callback will deadlock! >>> >>>>> >>>>>> + mmc_retune_needed(host->mmc); >>>>> >>>>> This seems racy. >>>>> >>>>> What if a new request has already been started from the mmc core >>>>> (waiting for sdhci's ->request() callback to return). That would mean >>>>> the mmc core won't detect that a retune was needed. >>>> >>>> That is a good point. The host controller must not runtime suspend after >>>> re-tuning until retuning is released. I can think of a couple of options: >>>> - move the retuning call into the ->request function >>>> - add extra host ops for the host to runtime resume/suspend >>>> >>> >>> I am not sure which approach I prefer yet. Need some more time to think. >>> >>> For your information, Neil Brown is having a similar issue which he is >>> trying to address [1]. >> >> It is a bit different. Re-tuning is about doing something before a >> request, rather than before a suspend. > > That's true, but we are still have the same locking issues to consider > for runtime PM. I have no locking issues, so I am not sure what you mean here. > >> >>> I think we need an generic approach to deal with the runtime PM >>> synchronization issue described above. More precisely in those >>> scenarios when mmc hosts needs to notify the mmc core to take some >>> specific actions, from a mmc host's runtime PM callback. >> >> For the re-tune case I did not want to assume what the host driver >> needed to do, so I added ->hold_tuning() and ->release_tuning() >> host operations. > > I have thought a bit more on how I would like this to be implemented. > It's a bit closer to what Neil's suggests in his approach [1]. I am not sure it is valuable to mix up the two issues. For Neil's problem I would do something quiet different: 1. The host driver already knows the bus width so can easily "get/put" runtime pm to prevent suspend when the bus width does not permit it. 2. The need to do things when the card is idle comes up a lot (e.g. bkops, sleep notification, cache flush etc etc). In Neil's case he wants to switch to 1-bit mode, but that just seems another of these "idle" operations. So I would investigate the requirements of supporting idle operations in general. > > First, I would like only one API provided from the mmc core. This API > shall be invoked from the host driver's runtime PM suspend callback. > > Something along the lines, "mmc_host_runtime_suspend()". This function > will return -EBUSY if the host isn't allowed to be runtime PM > suspended. > > To make sure this function don't deadlock, it must not do any requests > towards the host (since that would trigger a pm_runtime_get_sync() of > the host's device). > > Also, it need to claim the mmc host in a non-blocking manner > (mmc_claim_host() needs to be extended to support that) since that > prevents deadlocking and also tells us whether there are new requests > prepared by the mmc core. If that's the case, there are no need to > complete the runtime PM suspend operation, but instead immediately > return -EBUSY. > > In your case mmc_host_runtime_suspend(), also needs to assign a flag > indicating that a re-tune is needed at next request. That flag shall > be set when the host is claimed to prevent the host_ops->request() > callback to be invoked, which removes the race condition. > > Kind regards > Uffe > > -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
[...] > > I have no locking issues, so I am not sure what you mean here. Okay, I should have stated race conditions. > >> >>> >>>> I think we need an generic approach to deal with the runtime PM >>>> synchronization issue described above. More precisely in those >>>> scenarios when mmc hosts needs to notify the mmc core to take some >>>> specific actions, from a mmc host's runtime PM callback. >>> >>> For the re-tune case I did not want to assume what the host driver >>> needed to do, so I added ->hold_tuning() and ->release_tuning() >>> host operations. >> >> I have thought a bit more on how I would like this to be implemented. >> It's a bit closer to what Neil's suggests in his approach [1]. > > I am not sure it is valuable to mix up the two issues. > > For Neil's problem I would do something quiet different: > > 1. The host driver already knows the bus width so can easily "get/put" > runtime pm to prevent suspend when the bus width does not permit it. To me the problem is the other way around. The host driver don't want prevent runtime PM suspend. Instead it want to notify the core that it's ready to be runtime PM suspended. Due to restrictions by the SDIO spec, the mmc core first need to switch to 1-bit data, before the host can do clock gating in runtime PM suspend. > > 2. The need to do things when the card is idle comes up a lot (e.g. bkops, > sleep notification, cache flush etc etc). In Neil's case he wants to switch > to 1-bit mode, but that just seems another of these "idle" operations. So I > would investigate the requirements of supporting idle operations in general. That won't work for the SDIO case, since runtime PM is being deployed differently for SDIO than for MMC/SD. In the SDIO case it's the SDIO func drivers that handles the get/put. For the MMC/SD case it's handled by the block device layer. Still, yes I agree that it would be of great interest to look into "idle" operations of eMMC/SD, especially for those other things you mention, but I don't see it as the solution for Neil's SDIO issue. Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 23/03/2015 5:02 p.m., Ulf Hansson wrote: > [...] > >> >> I have no locking issues, so I am not sure what you mean here. > > Okay, I should have stated race conditions. Which I resolved using runtime get / put calls. Returning -EBUSY from runtime suspend, on the other hand, seems less than ideal. First, reading the hold count from runtime suspend is a new race. Secondly, returning -EBUSY leaves the host controller active until the host controller driver is accessed again, which breaks runtime pm. > >> >>> >>>> >>>>> I think we need an generic approach to deal with the runtime PM >>>>> synchronization issue described above. More precisely in those >>>>> scenarios when mmc hosts needs to notify the mmc core to take some >>>>> specific actions, from a mmc host's runtime PM callback. >>>> >>>> For the re-tune case I did not want to assume what the host driver >>>> needed to do, so I added ->hold_tuning() and ->release_tuning() >>>> host operations. >>> >>> I have thought a bit more on how I would like this to be implemented. >>> It's a bit closer to what Neil's suggests in his approach [1]. >> >> I am not sure it is valuable to mix up the two issues. >> >> For Neil's problem I would do something quiet different: >> >> 1. The host driver already knows the bus width so can easily "get/put" >> runtime pm to prevent suspend when the bus width does not permit it. > > To me the problem is the other way around. > > The host driver don't want prevent runtime PM suspend. Instead it want > to notify the core that it's ready to be runtime PM suspended. > > Due to restrictions by the SDIO spec, the mmc core first need to > switch to 1-bit data, before the host can do clock gating in runtime > PM suspend. That makes two things dependent instead of decoupling them. > >> >> 2. The need to do things when the card is idle comes up a lot (e.g. bkops, >> sleep notification, cache flush etc etc). In Neil's case he wants to switch >> to 1-bit mode, but that just seems another of these "idle" operations. So I >> would investigate the requirements of supporting idle operations in general. > > That won't work for the SDIO case, since runtime PM is being deployed > differently for SDIO than for MMC/SD. > > In the SDIO case it's the SDIO func drivers that handles the get/put. > For the MMC/SD case it's handled by the block device layer. It doesn't need to have anything to do with runtime pm. It just needs to be a way the block or SDIO function drivers can inform the core that other stuff can be done. -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Mon, 23 Mar 2015 16:26:07 +0200 Adrian Hunter <adrian.hunter@intel.com> wrote: > For Neil's problem I would do something quiet different: > > 1. The host driver already knows the bus width so can easily "get/put" > runtime pm to prevent suspend when the bus width does not permit it. > > 2. The need to do things when the card is idle comes up a lot (e.g. bkops, > sleep notification, cache flush etc etc). In Neil's case he wants to switch > to 1-bit mode, but that just seems another of these "idle" operations. So I > would investigate the requirements of supporting idle operations in general. > Hi, I agree that what I want to achieve could be characterised as an "idle" operation. I also happen to think that runtime PM is designed to support "idle" operations - it can track when a device goes 'idle' and "do the right thing". It handles all the needed ref counting and races with resume etc. So I think it should be used to manage these "idle" operations. The difficulty is that in the naive approach, we want to do something in the "runtime_suspend" operation which actually uses the device. So it has to be non-idle in order to go idle. I agree that this can appear clumsy. What we effectively have here is two levels of "idle". First the "host" goes idle in that no new commands are arriving and no interrupts have happened. In response to this the host sends a few final commands to the controller and then the controller goes idle. This two-stage process should be able to be modelled with the two levels of device: the mmc_host class device and the platform device which controls the hardware. e.g. mmc1 and omap_hsmmc.1 on my board. So this is (roughly) what I would do if I wanted to implement fully general "idle" operations for mmc cards. 1/ enable runtime_pm on the host device - in mmc_add_host. I think pm_suspend_ignore_children() would be needed so that the host can go to sleep while the card is still active. 2/ Use pm_runtime_get / pm_runtime_put_autosuspend in mmc_claim_host and mmc_release_host. 3/ Remove the ->enable and ->disable calls from mmc_{claim,release}_host. I think they only affect omap_hsmmc and it only uses them for runtime PM, which now will happen automatically. 4/ In the 'runtime_suspend' method for mmc_host, take a runtime_pm reference on the platform device and schedule a work item to run the "idle" operations When the "idle" operations complete, the runtime_pm reference will be dropped and then - having no active children or references - the platform device will go to sleep (stop its clocks or whatever). When something then calls mmc_claim_host(), the "idle" operations can be undone, either directly or via the runtime_resume operation. This would be a fairly intrusive change. I'm happy to try to find time to write bits of it if there is general agreement, but I cannot test anything other than omap_hsmmc, and I don't know any details about any other possible "idle" operations so I would need input from someone who does. NeilBrown
On 24 March 2015 at 03:49, NeilBrown <neilb@suse.de> wrote: > On Mon, 23 Mar 2015 16:26:07 +0200 Adrian Hunter <adrian.hunter@intel.com> > wrote: > >> For Neil's problem I would do something quiet different: >> >> 1. The host driver already knows the bus width so can easily "get/put" >> runtime pm to prevent suspend when the bus width does not permit it. >> >> 2. The need to do things when the card is idle comes up a lot (e.g. bkops, >> sleep notification, cache flush etc etc). In Neil's case he wants to switch >> to 1-bit mode, but that just seems another of these "idle" operations. So I >> would investigate the requirements of supporting idle operations in general. >> > > Hi, > I agree that what I want to achieve could be characterised as an "idle" > operation. > I also happen to think that runtime PM is designed to support "idle" > operations - it can track when a device goes 'idle' and "do the right thing". > It handles all the needed ref counting and races with resume etc. So I > think it should be used to manage these "idle" operations. > > The difficulty is that in the naive approach, we want to do something in the > "runtime_suspend" operation which actually uses the device. So it has to be > non-idle in order to go idle. I agree that this can appear clumsy. > > What we effectively have here is two levels of "idle". First the "host" > goes idle in that no new commands are arriving and no interrupts have > happened. In response to this the host sends a few final commands to the > controller and then the controller goes idle. > > This two-stage process should be able to be modelled with the two levels of > device: the mmc_host class device and the platform device which controls the > hardware. e.g. mmc1 and omap_hsmmc.1 on my board. > > So this is (roughly) what I would do if I wanted to implement fully general > "idle" operations for mmc cards. > > 1/ enable runtime_pm on the host device - in mmc_add_host. > I think pm_suspend_ignore_children() would be needed so that the host > can go to sleep while the card is still active. > 2/ Use pm_runtime_get / pm_runtime_put_autosuspend in mmc_claim_host > and mmc_release_host. > 3/ Remove the ->enable and ->disable calls from mmc_{claim,release}_host. > I think they only affect omap_hsmmc and it only uses them for runtime > PM, which now will happen automatically. > 4/ In the 'runtime_suspend' method for mmc_host, take a runtime_pm reference > on the platform device and schedule a work item to run the "idle" > operations > > > When the "idle" operations complete, the runtime_pm reference will be > dropped and then - having no active children or references - the > platform device will go to sleep (stop its clocks or whatever). > When something then calls mmc_claim_host(), the "idle" operations can be > undone, either directly or via the runtime_resume operation. I don't think this will work. The problem is that the scheduled work which performs the idle operations will invoke mmc_claim|release_host() when it's about to execute the "idle commands/requests". > > This would be a fairly intrusive change. I'm happy to try to find time to > write bits of it if there is general agreement, but I cannot test anything > other than omap_hsmmc, and I don't know any details about any other possible > "idle" operations so I would need input from someone who does. > > NeilBrown Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 23 March 2015 at 22:11, Adrian Hunter <adrian.hunter@intel.com> wrote: > On 23/03/2015 5:02 p.m., Ulf Hansson wrote: >> >> [...] >> >>> >>> I have no locking issues, so I am not sure what you mean here. >> >> >> Okay, I should have stated race conditions. > > > Which I resolved using runtime get / put calls. Yes, I noticed that and it works! Though, I would rather avoid to add yet another pair of host ops callbacks for this. What do you think if these options instead? 1) Do runtime PM get/put from the host ops ->enable|disable() callbacks. As omap_hsmmc does. 2) Or even better, do runtime PM get/put directly of the host device from mmc_claim|release_host(). > > Returning -EBUSY from runtime suspend, on the other hand, seems > less than ideal. Agree, if we can avoid it, we certainly should! > > First, reading the hold count from runtime suspend is a new race. I am not sure I understand, could you please elaborate why? > > Secondly, returning -EBUSY leaves the host controller active until the > host controller driver is accessed again, which breaks runtime pm. Returning -EBUSY is "allowed" for any runtime PM suspend callback and it's supported by the runtime PM core. We won't be breaking anything. Anyway, I suggest we put this idea on hold and try other options. > >> >>> >>>> >>>>> >>>>>> I think we need an generic approach to deal with the runtime PM >>>>>> synchronization issue described above. More precisely in those >>>>>> scenarios when mmc hosts needs to notify the mmc core to take some >>>>>> specific actions, from a mmc host's runtime PM callback. >>>>> >>>>> >>>>> For the re-tune case I did not want to assume what the host driver >>>>> needed to do, so I added ->hold_tuning() and ->release_tuning() >>>>> host operations. >>>> >>>> >>>> I have thought a bit more on how I would like this to be implemented. >>>> It's a bit closer to what Neil's suggests in his approach [1]. >>> >>> >>> I am not sure it is valuable to mix up the two issues. >>> >>> For Neil's problem I would do something quiet different: >>> >>> 1. The host driver already knows the bus width so can easily "get/put" >>> runtime pm to prevent suspend when the bus width does not permit it. >> >> >> To me the problem is the other way around. >> >> The host driver don't want prevent runtime PM suspend. Instead it want >> to notify the core that it's ready to be runtime PM suspended. >> >> Due to restrictions by the SDIO spec, the mmc core first need to >> switch to 1-bit data, before the host can do clock gating in runtime >> PM suspend. > > > That makes two things dependent instead of decoupling them. Good point! I don't like it either, let's try do better! > >> >>> >>> 2. The need to do things when the card is idle comes up a lot (e.g. >>> bkops, >>> sleep notification, cache flush etc etc). In Neil's case he wants to >>> switch >>> to 1-bit mode, but that just seems another of these "idle" operations. So >>> I >>> would investigate the requirements of supporting idle operations in >>> general. >> >> >> That won't work for the SDIO case, since runtime PM is being deployed >> differently for SDIO than for MMC/SD. >> >> In the SDIO case it's the SDIO func drivers that handles the get/put. >> For the MMC/SD case it's handled by the block device layer. > > > It doesn't need to have anything to do with runtime pm. It just needs > to be a way the block or SDIO function drivers can inform the core > that other stuff can be done. I have thought more about this since yesterday and I somewhat agree with your suggestion. Especially in that sense that I think we should consider Neil's issue as an "idle operation" for SDIO. For "idle operations" for MMC/SD cards, runtime PM is already deployed. So, I think it's just a matter of extending that support to cover more "idle operations" besides the MMC_CAP_AGGRESSIVE_PM thing. What we need to figure out is how to make this work for SDIO - and that's when it becomes more complex. I really would like us to avoid exporting new SDIO APIs, unless really needed. Today runtime PM is deployed by the following SDIO func drivers: drivers/net/wireless/libertas/if_sdio.c drivers/net/wireless/ti/wl1251/sdio.c drivers/net/wireless/ti/wlcore/sdio.c We _could_ consider to convert above drivers to use something else than runtime PM to control the power to the card. I think that would be the ideal solution, since then we can deploy runtime PM for SDIO fairly similar to how MMC/SDIO is done. That means doing runtime PM get/put of the device for the struct mmc_card, inside the mmc core while handling SDIO requests. The above requires some work, both in the mmc core and in the SDIO func drivers. The nice thing is that we don't need to export new APIs to the SDIO func drivers and we can keep the complexity of dealing with "idle operations" inside the mmc core. Thoughts? Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 24/03/15 23:12, Ulf Hansson wrote: > On 23 March 2015 at 22:11, Adrian Hunter <adrian.hunter@intel.com> wrote: >> On 23/03/2015 5:02 p.m., Ulf Hansson wrote: >>> >>> [...] >>> >>>> >>>> I have no locking issues, so I am not sure what you mean here. >>> >>> >>> Okay, I should have stated race conditions. >> >> >> Which I resolved using runtime get / put calls. > > Yes, I noticed that and it works! Though, I would rather avoid to add > yet another pair of host ops callbacks for this. > > What do you think if these options instead? > 1) Do runtime PM get/put from the host ops ->enable|disable() > callbacks. As omap_hsmmc does. > 2) Or even better, do runtime PM get/put directly of the host device > from mmc_claim|release_host(). Claiming the host is not directly related to host controller runtime pm. It is possible to imagine cases for claiming the host for synchronization reasons that do not need the host controller powered up. And card drivers could reasonably assume that they can claim the host for long periods without affecting the runtime pm of the host controller. Currently we have that the host controller driver is the exclusive owner of runtime pm for the host controller. So the first thing is to decide if we want to keep that or let the core be involved as well. I would argue for sticking with the status quo. Let the host controller driver know what is going on and leave it to do the right thing with its own power management. That means the callbacks, which are > >> >> Returning -EBUSY from runtime suspend, on the other hand, seems >> less than ideal. > > Agree, if we can avoid it, we certainly should! > >> >> First, reading the hold count from runtime suspend is a new race. > > I am not sure I understand, could you please elaborate why? Just in the sense that there is no synchronization between the suspend callback and updating the hold count. So the upper layers could finish a request and become idle but the suspend callback might have seen the hold count before it was reduced to zero, leaving the host controller active when it should be runtime suspended. > >> >> Secondly, returning -EBUSY leaves the host controller active until the >> host controller driver is accessed again, which breaks runtime pm. > > Returning -EBUSY is "allowed" for any runtime PM suspend callback and > it's supported by the runtime PM core. We won't be breaking anything. Yes, I agree it is not a violation of the API. I just meant that if the host controller remains active when it should be runtime suspended then power management is, in a sense, "broken". > > Anyway, I suggest we put this idea on hold and try other options. > >> >>> >>>> >>>>> >>>>>> >>>>>>> I think we need an generic approach to deal with the runtime PM >>>>>>> synchronization issue described above. More precisely in those >>>>>>> scenarios when mmc hosts needs to notify the mmc core to take some >>>>>>> specific actions, from a mmc host's runtime PM callback. >>>>>> >>>>>> >>>>>> For the re-tune case I did not want to assume what the host driver >>>>>> needed to do, so I added ->hold_tuning() and ->release_tuning() >>>>>> host operations. >>>>> >>>>> >>>>> I have thought a bit more on how I would like this to be implemented. >>>>> It's a bit closer to what Neil's suggests in his approach [1]. >>>> >>>> >>>> I am not sure it is valuable to mix up the two issues. >>>> >>>> For Neil's problem I would do something quiet different: >>>> >>>> 1. The host driver already knows the bus width so can easily "get/put" >>>> runtime pm to prevent suspend when the bus width does not permit it. >>> >>> >>> To me the problem is the other way around. >>> >>> The host driver don't want prevent runtime PM suspend. Instead it want >>> to notify the core that it's ready to be runtime PM suspended. >>> >>> Due to restrictions by the SDIO spec, the mmc core first need to >>> switch to 1-bit data, before the host can do clock gating in runtime >>> PM suspend. >> >> >> That makes two things dependent instead of decoupling them. > > Good point! > > I don't like it either, let's try do better! > >> >>> >>>> >>>> 2. The need to do things when the card is idle comes up a lot (e.g. >>>> bkops, >>>> sleep notification, cache flush etc etc). In Neil's case he wants to >>>> switch >>>> to 1-bit mode, but that just seems another of these "idle" operations. So >>>> I >>>> would investigate the requirements of supporting idle operations in >>>> general. >>> >>> >>> That won't work for the SDIO case, since runtime PM is being deployed >>> differently for SDIO than for MMC/SD. >>> >>> In the SDIO case it's the SDIO func drivers that handles the get/put. >>> For the MMC/SD case it's handled by the block device layer. >> >> >> It doesn't need to have anything to do with runtime pm. It just needs >> to be a way the block or SDIO function drivers can inform the core >> that other stuff can be done. > > I have thought more about this since yesterday and I somewhat agree > with your suggestion. Especially in that sense that I think we should > consider Neil's issue as an "idle operation" for SDIO. > > For "idle operations" for MMC/SD cards, runtime PM is already > deployed. So, I think it's just a matter of extending that support to > cover more "idle operations" besides the MMC_CAP_AGGRESSIVE_PM thing. > > What we need to figure out is how to make this work for SDIO - and > that's when it becomes more complex. I really would like us to avoid > exporting new SDIO APIs, unless really needed. > > Today runtime PM is deployed by the following SDIO func drivers: > drivers/net/wireless/libertas/if_sdio.c > drivers/net/wireless/ti/wl1251/sdio.c > drivers/net/wireless/ti/wlcore/sdio.c > > We _could_ consider to convert above drivers to use something else > than runtime PM to control the power to the card. I think that would > be the ideal solution, since then we can deploy runtime PM for SDIO > fairly similar to how MMC/SDIO is done. That means doing runtime PM > get/put of the device for the struct mmc_card, inside the mmc core > while handling SDIO requests. > > The above requires some work, both in the mmc core and in the SDIO > func drivers. The nice thing is that we don't need to export new APIs > to the SDIO func drivers and we can keep the complexity of dealing > with "idle operations" inside the mmc core. > > Thoughts? There isn't necessarily any link between "idle operations" and runtime pm. For example for eMMC background operations I would expect to see: - queue is empty so block driver enables "idle operations". - core waits (delayed work?) a few milliseconds and then starts bkops. - a new I/O request arrives, block driver wakes up and tells the core to stop "idle operations" ASAP, and waits until it does. - or, the core notifies (callback perhaps) the block driver that "idle operations" are finished, so the block driver can runtime-put the card Also need to stop "idle operations" for system suspend, maybe other places. Now in Neil's case there is a relation to runtime pm in that it would be nice if the switch to 1-bit mode was delayed by the host controllers autosuspend_delay. But potentially the host controller driver could routinely set the delay so that it matches the autosuspend_delay anyway. Currently the SDIO function drivers all use sdio_claim_host(). So for Neil's case I would see: - host controller driver sees the switch to 4-bit mode and does a runtime "get" to prevent runtime suspend - sdio_release_host enables "idle operations" - the core sees that someone has requested a switch to 1-bit mode after a certain delay, so it waits that delay (delayed work?) and does the switch - host controller driver sees the switch to 1-bit mode and runtime suspends via a pm_runtime_put - sdio_claim_host tells the core to stop "idle operations" ASAP and waits until it has - host controller driver sees the switch to 4-bit mode and does a runtime "get" to prevent runtime suspend -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 25 March 2015 at 14:48, Adrian Hunter <adrian.hunter@intel.com> wrote: > > On 24/03/15 23:12, Ulf Hansson wrote: > > On 23 March 2015 at 22:11, Adrian Hunter <adrian.hunter@intel.com> wrote: > >> On 23/03/2015 5:02 p.m., Ulf Hansson wrote: > >>> > >>> [...] > >>> > >>>> > >>>> I have no locking issues, so I am not sure what you mean here. > >>> > >>> > >>> Okay, I should have stated race conditions. > >> > >> > >> Which I resolved using runtime get / put calls. > > > > Yes, I noticed that and it works! Though, I would rather avoid to add > > yet another pair of host ops callbacks for this. > > > > What do you think if these options instead? > > 1) Do runtime PM get/put from the host ops ->enable|disable() > > callbacks. As omap_hsmmc does. > > 2) Or even better, do runtime PM get/put directly of the host device > > from mmc_claim|release_host(). > > Claiming the host is not directly related to host controller runtime pm. It > is possible to imagine cases for claiming the host for synchronization > reasons that do not need the host controller powered up. And card drivers > could reasonably assume that they can claim the host for long periods > without affecting the runtime pm of the host controller. Yes, it _may_ power up the host controller sometimes when not needed. I don't think this will be an issue, mostly because it should be rare and not happening frequently - if ever. The only users of mmc_claim_host() for SD/MMC is the core itself, so I don't see any issue with misbehaving "card drivers" here. SDIO is again different, since it's up to the SDIO func drivers to behave - as you stated. But, if they don't they anyway need to be fixed, right!? > > Currently we have that the host controller driver is the exclusive owner of > runtime pm for the host controller. So the first thing is to decide if we > want to keep that or let the core be involved as well. I would argue for > sticking with the status quo. Let the host controller driver know what is > going on and leave it to do the right thing with its own power management. The problem I see with the current solution, is that we will be scheduling a runtime PM suspend for each an every request towards the host. It's ineffective, due to that we will unnecessary involve the runtime PM core, thus increasing CPU utilization - when we actually don't need to. I will send a patch for this tomorrow, let's discuss it separately. [...] > > > > I have thought more about this since yesterday and I somewhat agree > > with your suggestion. Especially in that sense that I think we should > > consider Neil's issue as an "idle operation" for SDIO. > > > > For "idle operations" for MMC/SD cards, runtime PM is already > > deployed. So, I think it's just a matter of extending that support to > > cover more "idle operations" besides the MMC_CAP_AGGRESSIVE_PM thing. > > > > What we need to figure out is how to make this work for SDIO - and > > that's when it becomes more complex. I really would like us to avoid > > exporting new SDIO APIs, unless really needed. > > > > Today runtime PM is deployed by the following SDIO func drivers: > > drivers/net/wireless/libertas/if_sdio.c > > drivers/net/wireless/ti/wl1251/sdio.c > > drivers/net/wireless/ti/wlcore/sdio.c > > > > We _could_ consider to convert above drivers to use something else > > than runtime PM to control the power to the card. I think that would > > be the ideal solution, since then we can deploy runtime PM for SDIO > > fairly similar to how MMC/SDIO is done. That means doing runtime PM > > get/put of the device for the struct mmc_card, inside the mmc core > > while handling SDIO requests. > > > > The above requires some work, both in the mmc core and in the SDIO > > func drivers. The nice thing is that we don't need to export new APIs > > to the SDIO func drivers and we can keep the complexity of dealing > > with "idle operations" inside the mmc core. > > > > Thoughts? > > There isn't necessarily any link between "idle operations" and runtime pm. I think this is exactly what runtime PM is designed for so I don't want us to re-invent something that is specific for mmc. > > For example for eMMC background operations I would expect to see: > > - queue is empty so block driver enables "idle operations". > - core waits (delayed work?) a few milliseconds and then starts bkops. > - a new I/O request arrives, block driver wakes up and tells the core to > stop "idle operations" ASAP, and waits until it does. > - or, the core notifies (callback perhaps) the block driver that "idle > operations" are finished, so the block driver can runtime-put the card > > Also need to stop "idle operations" for system suspend, maybe other places. Conceptual wise, I fully agree. Though, I want to make use of runtime PM. For the eMMC/SD case, the runtime PM suspend callbacks (specified per bus_ops) should be able to act as the "trigger" point to kick off "idle operations". Now, the thing to figure out, is how to execute "idle operations" and at the same time being able to interrupt/abort them from another context. An option for how to deal with this, could be to schedule a "delayed work" from the runtime PM suspend callback. But if we can avoid it, I think we should. > > Now in Neil's case there is a relation to runtime pm in that it would be > nice if the switch to 1-bit mode was delayed by the host controllers > autosuspend_delay. But potentially the host controller driver could > routinely set the delay so that it matches the autosuspend_delay anyway. > > Currently the SDIO function drivers all use sdio_claim_host(). So for Neil's > case I would see: > > - host controller driver sees the switch to 4-bit mode and does a runtime > "get" to prevent runtime suspend > - sdio_release_host enables "idle operations" > - the core sees that someone has requested a switch to 1-bit mode after a > certain delay, so it waits that delay (delayed work?) and does the switch > - host controller driver sees the switch to 1-bit mode and runtime suspends > via a pm_runtime_put > - sdio_claim_host tells the core to stop "idle operations" ASAP and waits > until it has > - host controller driver sees the switch to 4-bit mode and does a runtime > "get" to prevent runtime suspend That seems like a way forward! Still I rather would like the above mentioned SDIO func drivers to use something else than runtime PM to control the power to the cards, as I suggested. But that might be too much to fix!? Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 26/03/15 18:06, Ulf Hansson wrote: > On 25 March 2015 at 14:48, Adrian Hunter <adrian.hunter@intel.com> wrote: >> >> On 24/03/15 23:12, Ulf Hansson wrote: >>> On 23 March 2015 at 22:11, Adrian Hunter <adrian.hunter@intel.com> wrote: >>>> On 23/03/2015 5:02 p.m., Ulf Hansson wrote: >>>>> >>>>> [...] >>>>> >>>>>> >>>>>> I have no locking issues, so I am not sure what you mean here. >>>>> >>>>> >>>>> Okay, I should have stated race conditions. >>>> >>>> >>>> Which I resolved using runtime get / put calls. >>> >>> Yes, I noticed that and it works! Though, I would rather avoid to add >>> yet another pair of host ops callbacks for this. >>> >>> What do you think if these options instead? >>> 1) Do runtime PM get/put from the host ops ->enable|disable() >>> callbacks. As omap_hsmmc does. >>> 2) Or even better, do runtime PM get/put directly of the host device >>> from mmc_claim|release_host(). >> >> Claiming the host is not directly related to host controller runtime pm. It >> is possible to imagine cases for claiming the host for synchronization >> reasons that do not need the host controller powered up. And card drivers >> could reasonably assume that they can claim the host for long periods >> without affecting the runtime pm of the host controller. > > > Yes, it _may_ power up the host controller sometimes when not needed. > I don't think this will be an issue, mostly because it should be rare > and not happening frequently - if ever. > > The only users of mmc_claim_host() for SD/MMC is the core itself, so I > don't see any issue with misbehaving "card drivers" here. > > SDIO is again different, since it's up to the SDIO func drivers to > behave - as you stated. But, if they don't they anyway need to be > fixed, right!? Be aware that that links the holding of re-tuning with claiming the host. It will then not be allowed for re-tuning to be held when the host is released. Will we have to add to mmc_release_host(): WARN_ON(host->hold_count); That could be a problem. Say you wanted to start bkops and then release the host so that a different context could issue an HPI. That wouldn't be allowed anymore. Generally it is impossible to see all ends, which begs the question: why link things if you don't have to? > >> >> Currently we have that the host controller driver is the exclusive owner of >> runtime pm for the host controller. So the first thing is to decide if we >> want to keep that or let the core be involved as well. I would argue for >> sticking with the status quo. Let the host controller driver know what is >> going on and leave it to do the right thing with its own power management. > > The problem I see with the current solution, is that we will be > scheduling a runtime PM suspend for each an every request towards the > host. > > It's ineffective, due to that we will unnecessary involve the runtime > PM core, thus increasing CPU utilization - when we actually don't need > to. > > I will send a patch for this tomorrow, let's discuss it separately. Yes please let's keep that separate. > > [...] > >>> >>> I have thought more about this since yesterday and I somewhat agree >>> with your suggestion. Especially in that sense that I think we should >>> consider Neil's issue as an "idle operation" for SDIO. >>> >>> For "idle operations" for MMC/SD cards, runtime PM is already >>> deployed. So, I think it's just a matter of extending that support to >>> cover more "idle operations" besides the MMC_CAP_AGGRESSIVE_PM thing. >>> >>> What we need to figure out is how to make this work for SDIO - and >>> that's when it becomes more complex. I really would like us to avoid >>> exporting new SDIO APIs, unless really needed. >>> >>> Today runtime PM is deployed by the following SDIO func drivers: >>> drivers/net/wireless/libertas/if_sdio.c >>> drivers/net/wireless/ti/wl1251/sdio.c >>> drivers/net/wireless/ti/wlcore/sdio.c >>> >>> We _could_ consider to convert above drivers to use something else >>> than runtime PM to control the power to the card. I think that would >>> be the ideal solution, since then we can deploy runtime PM for SDIO >>> fairly similar to how MMC/SDIO is done. That means doing runtime PM >>> get/put of the device for the struct mmc_card, inside the mmc core >>> while handling SDIO requests. >>> >>> The above requires some work, both in the mmc core and in the SDIO >>> func drivers. The nice thing is that we don't need to export new APIs >>> to the SDIO func drivers and we can keep the complexity of dealing >>> with "idle operations" inside the mmc core. >>> >>> Thoughts? >> >> There isn't necessarily any link between "idle operations" and runtime pm. > > I think this is exactly what runtime PM is designed for so I don't > want us to re-invent something that is specific for mmc. Need to remember that PM can theoretically be configured out, which makes using it for bkops seem inappropriate. > >> >> For example for eMMC background operations I would expect to see: >> >> - queue is empty so block driver enables "idle operations". >> - core waits (delayed work?) a few milliseconds and then starts bkops. >> - a new I/O request arrives, block driver wakes up and tells the core to >> stop "idle operations" ASAP, and waits until it does. >> - or, the core notifies (callback perhaps) the block driver that "idle >> operations" are finished, so the block driver can runtime-put the card >> >> Also need to stop "idle operations" for system suspend, maybe other places. > > Conceptual wise, I fully agree. Though, I want to make use of runtime PM. So long as it is power management. I am not sure bkops falls into that category. > > For the eMMC/SD case, the runtime PM suspend callbacks (specified per > bus_ops) should be able to act as the "trigger" point to kick off > "idle operations". > > Now, the thing to figure out, is how to execute "idle operations" and > at the same time being able to interrupt/abort them from another > context. An option for how to deal with this, could be to schedule a > "delayed work" from the runtime PM suspend callback. But if we can > avoid it, I think we should. Whatever the context that runs the idle operations, for the case where a driver wants to stop the idle operations ASAP, it would be nice if it could simply take over control - particularly if the idle operation is anyway waiting for something. So the HPI or whatever is done in the driver context directly and the idle operation context (if there even is one at that stage) just exits. > >> >> Now in Neil's case there is a relation to runtime pm in that it would be >> nice if the switch to 1-bit mode was delayed by the host controllers >> autosuspend_delay. But potentially the host controller driver could >> routinely set the delay so that it matches the autosuspend_delay anyway. >> >> Currently the SDIO function drivers all use sdio_claim_host(). So for Neil's >> case I would see: >> >> - host controller driver sees the switch to 4-bit mode and does a runtime >> "get" to prevent runtime suspend >> - sdio_release_host enables "idle operations" >> - the core sees that someone has requested a switch to 1-bit mode after a >> certain delay, so it waits that delay (delayed work?) and does the switch >> - host controller driver sees the switch to 1-bit mode and runtime suspends >> via a pm_runtime_put >> - sdio_claim_host tells the core to stop "idle operations" ASAP and waits >> until it has >> - host controller driver sees the switch to 4-bit mode and does a runtime >> "get" to prevent runtime suspend > > That seems like a way forward! > > Still I rather would like the above mentioned SDIO func drivers to use > something else than runtime PM to control the power to the cards, as I > suggested. But that might be too much to fix!? Let's keep that a separate issue. -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 27/03/15 11:54, Adrian Hunter wrote: > On 26/03/15 18:06, Ulf Hansson wrote: >> On 25 March 2015 at 14:48, Adrian Hunter <adrian.hunter@intel.com> wrote: >>> >>> On 24/03/15 23:12, Ulf Hansson wrote: >>>> On 23 March 2015 at 22:11, Adrian Hunter <adrian.hunter@intel.com> wrote: >>>>> On 23/03/2015 5:02 p.m., Ulf Hansson wrote: >>>>>> >>>>>> [...] >>>>>> >>>>>>> >>>>>>> I have no locking issues, so I am not sure what you mean here. >>>>>> >>>>>> >>>>>> Okay, I should have stated race conditions. >>>>> >>>>> >>>>> Which I resolved using runtime get / put calls. >>>> >>>> Yes, I noticed that and it works! Though, I would rather avoid to add >>>> yet another pair of host ops callbacks for this. >>>> >>>> What do you think if these options instead? >>>> 1) Do runtime PM get/put from the host ops ->enable|disable() >>>> callbacks. As omap_hsmmc does. >>>> 2) Or even better, do runtime PM get/put directly of the host device >>>> from mmc_claim|release_host(). >>> >>> Claiming the host is not directly related to host controller runtime pm. It >>> is possible to imagine cases for claiming the host for synchronization >>> reasons that do not need the host controller powered up. And card drivers >>> could reasonably assume that they can claim the host for long periods >>> without affecting the runtime pm of the host controller. >> >> >> Yes, it _may_ power up the host controller sometimes when not needed. >> I don't think this will be an issue, mostly because it should be rare >> and not happening frequently - if ever. >> >> The only users of mmc_claim_host() for SD/MMC is the core itself, so I >> don't see any issue with misbehaving "card drivers" here. >> >> SDIO is again different, since it's up to the SDIO func drivers to >> behave - as you stated. But, if they don't they anyway need to be >> fixed, right!? > > Be aware that that links the holding of re-tuning with claiming the host. > It will then not be allowed for re-tuning to be held when the host is > released. Will we have to add to mmc_release_host(): > > WARN_ON(host->hold_count); > > That could be a problem. Say you wanted to start bkops and then release the > host so that a different context could issue an HPI. That wouldn't be > allowed anymore. On the other hand, we would need to prevent the host controller runtime suspending in that case anyway. > > Generally it is impossible to see all ends, which begs the question: why > link things if you don't have to? So I correct myself. Any time we would need to hold re-tuning but release the host would anyway require preventing runtime suspend of the host controller. So the requirement: "don't allow the host controller to runtime suspend while re-tuning is held" is covered by the requirement "don't allow the host controller to runtime suspend when it is doing something". > >> >>> >>> Currently we have that the host controller driver is the exclusive owner of >>> runtime pm for the host controller. So the first thing is to decide if we >>> want to keep that or let the core be involved as well. I would argue for >>> sticking with the status quo. Let the host controller driver know what is >>> going on and leave it to do the right thing with its own power management. >> >> The problem I see with the current solution, is that we will be >> scheduling a runtime PM suspend for each an every request towards the >> host. >> >> It's ineffective, due to that we will unnecessary involve the runtime >> PM core, thus increasing CPU utilization - when we actually don't need >> to. >> >> I will send a patch for this tomorrow, let's discuss it separately. > > Yes please let's keep that separate. > >> >> [...] >> >>>> >>>> I have thought more about this since yesterday and I somewhat agree >>>> with your suggestion. Especially in that sense that I think we should >>>> consider Neil's issue as an "idle operation" for SDIO. >>>> >>>> For "idle operations" for MMC/SD cards, runtime PM is already >>>> deployed. So, I think it's just a matter of extending that support to >>>> cover more "idle operations" besides the MMC_CAP_AGGRESSIVE_PM thing. >>>> >>>> What we need to figure out is how to make this work for SDIO - and >>>> that's when it becomes more complex. I really would like us to avoid >>>> exporting new SDIO APIs, unless really needed. >>>> >>>> Today runtime PM is deployed by the following SDIO func drivers: >>>> drivers/net/wireless/libertas/if_sdio.c >>>> drivers/net/wireless/ti/wl1251/sdio.c >>>> drivers/net/wireless/ti/wlcore/sdio.c >>>> >>>> We _could_ consider to convert above drivers to use something else >>>> than runtime PM to control the power to the card. I think that would >>>> be the ideal solution, since then we can deploy runtime PM for SDIO >>>> fairly similar to how MMC/SDIO is done. That means doing runtime PM >>>> get/put of the device for the struct mmc_card, inside the mmc core >>>> while handling SDIO requests. >>>> >>>> The above requires some work, both in the mmc core and in the SDIO >>>> func drivers. The nice thing is that we don't need to export new APIs >>>> to the SDIO func drivers and we can keep the complexity of dealing >>>> with "idle operations" inside the mmc core. >>>> >>>> Thoughts? >>> >>> There isn't necessarily any link between "idle operations" and runtime pm. >> >> I think this is exactly what runtime PM is designed for so I don't >> want us to re-invent something that is specific for mmc. > > Need to remember that PM can theoretically be configured out, which makes > using it for bkops seem inappropriate. > >> >>> >>> For example for eMMC background operations I would expect to see: >>> >>> - queue is empty so block driver enables "idle operations". >>> - core waits (delayed work?) a few milliseconds and then starts bkops. >>> - a new I/O request arrives, block driver wakes up and tells the core to >>> stop "idle operations" ASAP, and waits until it does. >>> - or, the core notifies (callback perhaps) the block driver that "idle >>> operations" are finished, so the block driver can runtime-put the card >>> >>> Also need to stop "idle operations" for system suspend, maybe other places. >> >> Conceptual wise, I fully agree. Though, I want to make use of runtime PM. > > So long as it is power management. I am not sure bkops falls into that category. > >> >> For the eMMC/SD case, the runtime PM suspend callbacks (specified per >> bus_ops) should be able to act as the "trigger" point to kick off >> "idle operations". >> >> Now, the thing to figure out, is how to execute "idle operations" and >> at the same time being able to interrupt/abort them from another >> context. An option for how to deal with this, could be to schedule a >> "delayed work" from the runtime PM suspend callback. But if we can >> avoid it, I think we should. > > Whatever the context that runs the idle operations, for the case where a > driver wants to stop the idle operations ASAP, it would be nice if it could > simply take over control - particularly if the idle operation is anyway > waiting for something. So the HPI or whatever is done in the driver context > directly and the idle operation context (if there even is one at that stage) > just exits. > >> >>> >>> Now in Neil's case there is a relation to runtime pm in that it would be >>> nice if the switch to 1-bit mode was delayed by the host controllers >>> autosuspend_delay. But potentially the host controller driver could >>> routinely set the delay so that it matches the autosuspend_delay anyway. >>> >>> Currently the SDIO function drivers all use sdio_claim_host(). So for Neil's >>> case I would see: >>> >>> - host controller driver sees the switch to 4-bit mode and does a runtime >>> "get" to prevent runtime suspend >>> - sdio_release_host enables "idle operations" >>> - the core sees that someone has requested a switch to 1-bit mode after a >>> certain delay, so it waits that delay (delayed work?) and does the switch >>> - host controller driver sees the switch to 1-bit mode and runtime suspends >>> via a pm_runtime_put >>> - sdio_claim_host tells the core to stop "idle operations" ASAP and waits >>> until it has >>> - host controller driver sees the switch to 4-bit mode and does a runtime >>> "get" to prevent runtime suspend >> >> That seems like a way forward! >> >> Still I rather would like the above mentioned SDIO func drivers to use >> something else than runtime PM to control the power to the cards, as I >> suggested. But that might be too much to fix!? > > Let's keep that a separate issue. > > > -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c9881ca..dd0be76 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -51,7 +51,6 @@ static void sdhci_finish_data(struct sdhci_host *); static void sdhci_finish_command(struct sdhci_host *); static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); -static void sdhci_tuning_timer(unsigned long data); static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); static int sdhci_pre_dma_transfer(struct sdhci_host *host, struct mmc_data *data, @@ -252,17 +251,6 @@ static void sdhci_init(struct sdhci_host *host, int soft) static void sdhci_reinit(struct sdhci_host *host) { sdhci_init(host, 0); - /* - * Retuning stuffs are affected by different cards inserted and only - * applicable to UHS-I cards. So reset these fields to their initial - * value when card is removed. - */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags &= ~SDHCI_USING_RETUNING_TIMER; - - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - } sdhci_enable_card_detection(host); } @@ -1350,7 +1338,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) struct sdhci_host *host; int present; unsigned long flags; - u32 tuning_opcode; host = mmc_priv(mmc); @@ -1399,39 +1386,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } else { - u32 present_state; - - present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); - /* - * Check if the re-tuning timer has already expired and there - * is no on-going data transfer and DAT0 is not busy. If so, - * we need to execute tuning procedure before sending command. - */ - if ((host->flags & SDHCI_NEEDS_RETUNING) && - !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) && - (present_state & SDHCI_DATA_0_LVL_MASK)) { - if (mmc->card) { - /* eMMC uses cmd21 but sd and sdio use cmd19 */ - tuning_opcode = - mmc->card->type == MMC_TYPE_MMC ? - MMC_SEND_TUNING_BLOCK_HS200 : - MMC_SEND_TUNING_BLOCK; - - /* Here we need to set the host->mrq to NULL, - * in case the pending finish_tasklet - * finishes it incorrectly. - */ - host->mrq = NULL; - - spin_unlock_irqrestore(&host->lock, flags); - sdhci_execute_tuning(mmc, tuning_opcode); - spin_lock_irqsave(&host->lock, flags); - - /* Restore original mmc_request structure */ - host->mrq = mrq; - } - } - if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) sdhci_send_command(host, mrq->sbc); else @@ -2077,23 +2031,19 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) } out: - host->flags &= ~SDHCI_NEEDS_RETUNING; - if (tuning_count) { - host->flags |= SDHCI_USING_RETUNING_TIMER; - mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); + /* + * In case tuning fails, host controllers which support + * re-tuning can try tuning again at a later time, when the + * re-tuning timer expires. So for these controllers, we + * return 0. Since there might be other controllers who do not + * have this capability, we return error for them. + */ + err = 0; } - /* - * In case tuning fails, host controllers which support re-tuning can - * try tuning again at a later time, when the re-tuning timer expires. - * So for these controllers, we return 0. Since there might be other - * controllers who do not have this capability, we return error for - * them. SDHCI_USING_RETUNING_TIMER means the host is currently using - * a retuning timer to do the retuning for the card. - */ - if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) - err = 0; + if (!err) + mmc_retune_enable(host->mmc, tuning_count); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); @@ -2349,20 +2299,6 @@ static void sdhci_timeout_timer(unsigned long data) spin_unlock_irqrestore(&host->lock, flags); } -static void sdhci_tuning_timer(unsigned long data) -{ - struct sdhci_host *host; - unsigned long flags; - - host = (struct sdhci_host *)data; - - spin_lock_irqsave(&host->lock, flags); - - host->flags |= SDHCI_NEEDS_RETUNING; - - spin_unlock_irqrestore(&host->lock, flags); -} - /*****************************************************************************\ * * * Interrupt handling * @@ -2740,11 +2676,8 @@ int sdhci_suspend_host(struct sdhci_host *host) { sdhci_disable_card_detection(host); - /* Disable tuning since we are suspending */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - } + mmc_retune_timer_stop(host->mmc); + mmc_retune_needed(host->mmc); if (!device_may_wakeup(mmc_dev(host->mmc))) { host->ier = 0; @@ -2794,10 +2727,6 @@ int sdhci_resume_host(struct sdhci_host *host) sdhci_enable_card_detection(host); - /* Set the re-tuning expiration flag */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) - host->flags |= SDHCI_NEEDS_RETUNING; - return ret; } @@ -2834,11 +2763,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) { unsigned long flags; - /* Disable tuning since we are suspending */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - } + mmc_retune_timer_stop(host->mmc); + mmc_retune_needed(host->mmc); spin_lock_irqsave(&host->lock, flags); host->ier &= SDHCI_INT_CARD_INT; @@ -2881,10 +2807,6 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) spin_unlock_irqrestore(&host->lock, flags); } - /* Set the re-tuning expiration flag */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) - host->flags |= SDHCI_NEEDS_RETUNING; - spin_lock_irqsave(&host->lock, flags); host->runtime_suspended = false; @@ -3419,13 +3341,6 @@ int sdhci_add_host(struct sdhci_host *host) init_waitqueue_head(&host->buf_ready_int); - if (host->version >= SDHCI_SPEC_300) { - /* Initialize re-tuning timer */ - init_timer(&host->tuning_timer); - host->tuning_timer.data = (unsigned long)host; - host->tuning_timer.function = sdhci_tuning_timer; - } - sdhci_init(host, 0); ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index c3e3db1..c5b00be 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -138,13 +138,11 @@ struct sdhci_host { #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ #define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */ -#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */ #define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */ #define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ -#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ @@ -210,7 +208,6 @@ struct sdhci_host { unsigned int tuning_count; /* Timer count for re-tuning */ unsigned int tuning_mode; /* Re-tuning mode supported by host */ #define SDHCI_TUNING_MODE_1 0 - struct timer_list tuning_timer; /* Timer for tuning */ struct sdhci_host_next next_data; unsigned long private[0] ____cacheline_aligned;
Make use of mmc core support for re-tuning instead of doing it all in the sdhci driver. This patch also changes to flag the need for re-tuning always after runtime suspend when tuning has been used at initialization. Previously it was only done if the re-tuning timer was in use. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> --- drivers/mmc/host/sdhci.c | 113 ++++++---------------------------------------- include/linux/mmc/sdhci.h | 3 -- 2 files changed, 14 insertions(+), 102 deletions(-)