2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/devfreq.h>
18 #include <linux/device.h>
19 #include <linux/delay.h>
20 #include <linux/pagemap.h>
21 #include <linux/err.h>
22 #include <linux/leds.h>
23 #include <linux/scatterlist.h>
24 #include <linux/log2.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeup.h>
28 #include <linux/suspend.h>
29 #include <linux/fault-inject.h>
30 #include <linux/random.h>
31 #include <linux/slab.h>
34 #include <linux/jiffies.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/mmc.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
43 #include <linux/mmc/slot-gpio.h>
55 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_start);
56 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_end);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_start);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_end);
60 /* If the device is not responding */
61 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
64 * Background operations can take a long time, depending on the housekeeping
65 * operations the card has to perform.
67 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
69 static struct workqueue_struct *workqueue;
70 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
73 * Enabling software CRCs on the data blocks can be a significant (30%)
74 * performance cost, and for other reasons may not always be desired.
75 * So we allow it it to be disabled.
78 module_param(use_spi_crc, bool, 0);
81 * Internal function. Schedule delayed work in the MMC work queue.
83 static int mmc_schedule_delayed_work(struct delayed_work *work,
86 return queue_delayed_work(workqueue, work, delay);
90 * Internal function. Flush all scheduled work from the MMC work queue.
92 static void mmc_flush_scheduled_work(void)
94 flush_workqueue(workqueue);
97 #ifdef CONFIG_FAIL_MMC_REQUEST
100 * Internal function. Inject random data errors.
101 * If mmc_data is NULL no errors are injected.
103 static void mmc_should_fail_request(struct mmc_host *host,
104 struct mmc_request *mrq)
106 struct mmc_command *cmd = mrq->cmd;
107 struct mmc_data *data = mrq->data;
108 static const int data_errors[] = {
117 if (cmd->error || data->error ||
118 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
121 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
122 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
123 data->fault_injected = true;
126 #else /* CONFIG_FAIL_MMC_REQUEST */
128 static inline void mmc_should_fail_request(struct mmc_host *host,
129 struct mmc_request *mrq)
133 #endif /* CONFIG_FAIL_MMC_REQUEST */
135 static bool mmc_is_data_request(struct mmc_request *mmc_request)
137 switch (mmc_request->cmd->opcode) {
138 case MMC_READ_SINGLE_BLOCK:
139 case MMC_READ_MULTIPLE_BLOCK:
140 case MMC_WRITE_BLOCK:
141 case MMC_WRITE_MULTIPLE_BLOCK:
148 static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
150 struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
152 if (!clk_scaling->enable)
156 spin_lock_bh(&clk_scaling->lock);
158 clk_scaling->start_busy = ktime_get();
159 clk_scaling->is_busy_started = true;
162 spin_unlock_bh(&clk_scaling->lock);
165 static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
167 struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
169 if (!clk_scaling->enable)
173 spin_lock_bh(&clk_scaling->lock);
175 if (!clk_scaling->is_busy_started) {
180 clk_scaling->total_busy_time_us +=
181 ktime_to_us(ktime_sub(ktime_get(),
182 clk_scaling->start_busy));
183 pr_debug("%s: accumulated busy time is %lu usec\n",
184 mmc_hostname(host), clk_scaling->total_busy_time_us);
185 clk_scaling->is_busy_started = false;
189 spin_unlock_bh(&clk_scaling->lock);
193 * mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
194 * @host: pointer to mmc host structure
195 * @lock_needed: flag indication if locking is needed
197 * This function starts the busy timer in case it was not already started.
199 void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
202 if (!host->clk_scaling.enable)
206 spin_lock_bh(&host->clk_scaling.lock);
208 if (!host->clk_scaling.is_busy_started &&
209 !test_bit(CMDQ_STATE_DCMD_ACTIVE,
210 &host->cmdq_ctx.curr_state)) {
211 host->clk_scaling.start_busy = ktime_get();
212 host->clk_scaling.is_busy_started = true;
216 spin_unlock_bh(&host->clk_scaling.lock);
218 EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);
221 * mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
222 * @host: pointer to mmc host structure
223 * @lock_needed: flag indication if locking is needed
225 * This function stops the busy timer in case it is the last data request.
226 * In case the current request is not the last one, the busy time till
227 * now will be accumulated and the counter will be restarted.
229 void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
230 bool lock_needed, bool is_cmdq_dcmd)
232 if (!host->clk_scaling.enable)
236 spin_lock_bh(&host->clk_scaling.lock);
239 * For CQ mode: In completion of DCMD request, start busy time in
240 * case of pending data requests
243 if (host->cmdq_ctx.data_active_reqs) {
244 host->clk_scaling.is_busy_started = true;
245 host->clk_scaling.start_busy = ktime_get();
250 host->clk_scaling.total_busy_time_us +=
251 ktime_to_us(ktime_sub(ktime_get(),
252 host->clk_scaling.start_busy));
254 if (host->cmdq_ctx.data_active_reqs) {
255 host->clk_scaling.is_busy_started = true;
256 host->clk_scaling.start_busy = ktime_get();
258 host->clk_scaling.is_busy_started = false;
262 spin_unlock_bh(&host->clk_scaling.lock);
265 EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);
268 * mmc_can_scale_clk() - Check clock scaling capability
269 * @host: pointer to mmc host structure
271 bool mmc_can_scale_clk(struct mmc_host *host)
274 pr_err("bad host parameter\n");
279 return host->caps2 & MMC_CAP2_CLK_SCALE;
281 EXPORT_SYMBOL(mmc_can_scale_clk);
283 static int mmc_devfreq_get_dev_status(struct device *dev,
284 struct devfreq_dev_status *status)
286 struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
287 struct mmc_devfeq_clk_scaling *clk_scaling;
288 bool disable = false;
291 pr_err("bad host parameter\n");
296 clk_scaling = &host->clk_scaling;
298 if (!clk_scaling->enable)
301 spin_lock_bh(&clk_scaling->lock);
303 /* accumulate the busy time of ongoing work */
304 memset(status, 0, sizeof(*status));
305 if (clk_scaling->is_busy_started) {
306 if (mmc_card_cmdq(host->card)) {
307 /* the "busy-timer" will be restarted in case there
308 * are pending data requests */
309 mmc_cmdq_clk_scaling_stop_busy(host, false, false);
311 mmc_clk_scaling_stop_busy(host, false);
312 mmc_clk_scaling_start_busy(host, false);
316 if (host->ops->check_temp &&
317 host->card->clk_scaling_highest > UHS_DDR50_MAX_DTR)
318 disable = host->ops->check_temp(host);
319 /* busy_time=0 for running at low freq*/
321 status->busy_time = 0;
323 status->busy_time = clk_scaling->total_busy_time_us;
324 status->total_time = ktime_to_us(ktime_sub(ktime_get(),
325 clk_scaling->measure_interval_start));
326 clk_scaling->total_busy_time_us = 0;
327 status->current_frequency = clk_scaling->curr_freq;
328 clk_scaling->measure_interval_start = ktime_get();
330 pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
332 (status->busy_time*100)/status->total_time,
333 status->total_time, status->busy_time,
334 status->current_frequency);
336 spin_unlock_bh(&clk_scaling->lock);
341 static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
343 struct mmc_card *card = host->card;
347 * If the current partition type is RPMB, clock switching may not
348 * work properly as sending tuning command (CMD21) is illegal in
351 if (!card || (mmc_card_mmc(card) &&
352 (card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
353 mmc_card_doing_bkops(card))))
356 if (mmc_send_status(card, &status)) {
357 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
361 return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
364 int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
368 err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
369 (!host->cmdq_ctx.active_reqs));
370 if (host->cmdq_ctx.active_reqs) {
371 pr_err("%s: %s: unexpected active requests (%lu)\n",
372 mmc_hostname(host), __func__,
373 host->cmdq_ctx.active_reqs);
377 err = mmc_cmdq_halt(host, true);
379 pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
380 mmc_hostname(host), __func__, err);
387 EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
389 int mmc_clk_update_freq(struct mmc_host *host,
390 unsigned long freq, enum mmc_load state)
396 pr_err("bad host parameter\n");
401 mmc_host_clk_hold(host);
402 cmdq_mode = mmc_card_cmdq(host->card);
404 /* make sure the card supports the frequency we want */
405 if (unlikely(freq > host->card->clk_scaling_highest)) {
406 freq = host->card->clk_scaling_highest;
407 pr_warn("%s: %s: frequency was overridden to %lu\n",
408 mmc_hostname(host), __func__,
409 host->card->clk_scaling_highest);
412 if (unlikely(freq < host->card->clk_scaling_lowest)) {
413 freq = host->card->clk_scaling_lowest;
414 pr_warn("%s: %s: frequency was overridden to %lu\n",
415 mmc_hostname(host), __func__,
416 host->card->clk_scaling_lowest);
419 if (freq == host->clk_scaling.curr_freq)
422 if (host->ops->notify_load) {
423 err = host->ops->notify_load(host, state);
425 pr_err("%s: %s: fail on notify_load\n",
426 mmc_hostname(host), __func__);
432 err = mmc_cmdq_halt_on_empty_queue(host);
434 pr_err("%s: %s: failed halting queue (%d)\n",
435 mmc_hostname(host), __func__, err);
440 if (!mmc_is_valid_state_for_clk_scaling(host)) {
441 pr_debug("%s: invalid state for clock scaling - skipping",
446 err = host->bus_ops->change_bus_speed(host, &freq);
448 host->clk_scaling.curr_freq = freq;
450 pr_err("%s: %s: failed (%d) at freq=%lu\n",
451 mmc_hostname(host), __func__, err, freq);
455 if (mmc_cmdq_halt(host, false))
456 pr_err("%s: %s: cmdq unhalt failed\n",
457 mmc_hostname(host), __func__);
462 /* restore previous state */
463 if (host->ops->notify_load)
464 if (host->ops->notify_load(host,
465 host->clk_scaling.state))
466 pr_err("%s: %s: fail on notify_load restore\n",
467 mmc_hostname(host), __func__);
470 mmc_host_clk_release(host);
473 EXPORT_SYMBOL(mmc_clk_update_freq);
475 int mmc_recovery_fallback_lower_speed(struct mmc_host *host)
481 if (host->sdr104_wa && mmc_card_sd(host->card) &&
482 (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
483 !host->card->sdr104_blocked) {
484 pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
485 mmc_hostname(host), __func__);
486 mmc_host_clear_sdr104(host);
487 err = mmc_hw_reset(host);
488 host->card->sdr104_blocked = true;
490 /* If sdr104_wa is not present, just return status */
491 err = host->bus_ops->alive(host);
494 pr_err("%s: %s: Fallback to lower speed mode failed with err=%d\n",
495 mmc_hostname(host), __func__, err);
500 static int mmc_devfreq_set_target(struct device *dev,
501 unsigned long *freq, u32 devfreq_flags)
503 struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
504 struct mmc_devfeq_clk_scaling *clk_scaling;
507 unsigned long pflags = current->flags;
509 /* Ensure scaling would happen even in memory pressure conditions */
510 current->flags |= PF_MEMALLOC;
512 if (!(host && freq)) {
513 pr_err("%s: unexpected host/freq parameter\n", __func__);
518 clk_scaling = &host->clk_scaling;
520 if (!clk_scaling->enable)
523 pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
524 *freq, current->comm);
526 if ((clk_scaling->curr_freq == *freq) ||
527 clk_scaling->skip_clk_scale_freq_update)
530 /* No need to scale the clocks if they are gated */
531 if (!host->ios.clock)
534 spin_lock_bh(&clk_scaling->lock);
535 if (clk_scaling->clk_scaling_in_progress) {
536 pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
538 spin_unlock_bh(&clk_scaling->lock);
541 clk_scaling->need_freq_change = true;
542 clk_scaling->target_freq = *freq;
543 clk_scaling->state = *freq < clk_scaling->curr_freq ?
544 MMC_LOAD_LOW : MMC_LOAD_HIGH;
545 spin_unlock_bh(&clk_scaling->lock);
547 abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
551 if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
555 * In case we were able to claim host there is no need to
556 * defer the frequency change. It will be done now
558 clk_scaling->need_freq_change = false;
560 mmc_host_clk_hold(host);
561 err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
562 if (err && err != -EAGAIN) {
563 pr_err("%s: clock scale to %lu failed with error %d\n",
564 mmc_hostname(host), *freq, err);
565 err = mmc_recovery_fallback_lower_speed(host);
567 pr_debug("%s: clock change to %lu finished successfully (%s)\n",
568 mmc_hostname(host), *freq, current->comm);
572 mmc_host_clk_release(host);
574 mmc_release_host(host);
576 tsk_restore_flags(current, pflags, PF_MEMALLOC);
581 * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
582 * @host: pointer to mmc host structure
584 * This function does clock scaling in case "need_freq_change" flag was set
585 * by the clock scaling logic.
587 void mmc_deferred_scaling(struct mmc_host *host)
589 unsigned long target_freq;
592 if (!host->clk_scaling.enable)
595 if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
598 spin_lock_bh(&host->clk_scaling.lock);
600 if (host->clk_scaling.clk_scaling_in_progress ||
601 !(host->clk_scaling.need_freq_change)) {
602 spin_unlock_bh(&host->clk_scaling.lock);
607 atomic_inc(&host->clk_scaling.devfreq_abort);
608 target_freq = host->clk_scaling.target_freq;
609 host->clk_scaling.clk_scaling_in_progress = true;
610 host->clk_scaling.need_freq_change = false;
611 spin_unlock_bh(&host->clk_scaling.lock);
612 pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
614 target_freq, current->comm);
616 err = mmc_clk_update_freq(host, target_freq,
617 host->clk_scaling.state);
618 if (err && err != -EAGAIN) {
619 pr_err("%s: failed on deferred scale clocks (%d)\n",
620 mmc_hostname(host), err);
621 mmc_recovery_fallback_lower_speed(host);
623 pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
625 target_freq, current->comm);
627 host->clk_scaling.clk_scaling_in_progress = false;
628 atomic_dec(&host->clk_scaling.devfreq_abort);
630 EXPORT_SYMBOL(mmc_deferred_scaling);
632 static int mmc_devfreq_create_freq_table(struct mmc_host *host)
635 struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
637 pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
639 host->card->clk_scaling_lowest,
640 host->card->clk_scaling_highest);
643 * Create the frequency table and initialize it with default values.
644 * Initialize it with platform specific frequencies if the frequency
645 * table supplied by platform driver is present, otherwise initialize
646 * it with min and max frequencies supported by the card.
648 if (!clk_scaling->freq_table) {
649 if (clk_scaling->pltfm_freq_table_sz)
650 clk_scaling->freq_table_sz =
651 clk_scaling->pltfm_freq_table_sz;
653 clk_scaling->freq_table_sz = 2;
655 clk_scaling->freq_table = kzalloc(
656 (clk_scaling->freq_table_sz *
657 sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
658 if (!clk_scaling->freq_table)
661 if (clk_scaling->pltfm_freq_table) {
662 memcpy(clk_scaling->freq_table,
663 clk_scaling->pltfm_freq_table,
664 (clk_scaling->pltfm_freq_table_sz *
665 sizeof(*(clk_scaling->pltfm_freq_table))));
667 pr_debug("%s: no frequency table defined - setting default\n",
669 clk_scaling->freq_table[0] =
670 host->card->clk_scaling_lowest;
671 clk_scaling->freq_table[1] =
672 host->card->clk_scaling_highest;
677 if (host->card->clk_scaling_lowest >
678 clk_scaling->freq_table[0])
679 pr_debug("%s: frequency table undershot possible freq\n",
682 for (i = 0; i < clk_scaling->freq_table_sz; i++) {
683 if (clk_scaling->freq_table[i] <=
684 host->card->clk_scaling_highest)
686 clk_scaling->freq_table[i] =
687 host->card->clk_scaling_highest;
688 clk_scaling->freq_table_sz = i + 1;
689 pr_debug("%s: frequency table overshot possible freq (%d)\n",
690 mmc_hostname(host), clk_scaling->freq_table[i]);
695 clk_scaling->devfreq_profile.freq_table = clk_scaling->freq_table;
696 clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
698 for (i = 0; i < clk_scaling->freq_table_sz; i++)
699 pr_debug("%s: freq[%d] = %u\n",
700 mmc_hostname(host), i, clk_scaling->freq_table[i]);
706 * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
707 * @host: pointer to mmc host structure
709 * Initialize clock scaling for supported hosts. It is assumed that the caller
710 * ensure clock is running at maximum possible frequency before calling this
711 * function. Shall use struct devfreq_simple_ondemand_data to configure
714 int mmc_init_clk_scaling(struct mmc_host *host)
718 if (!host || !host->card) {
719 pr_err("%s: unexpected host/card parameters\n",
724 if (!mmc_can_scale_clk(host) ||
725 !host->bus_ops->change_bus_speed) {
726 pr_debug("%s: clock scaling is not supported\n",
731 pr_debug("registering %s dev (%p) to devfreq",
735 if (host->clk_scaling.devfreq) {
736 pr_err("%s: dev is already registered for dev %p\n",
741 spin_lock_init(&host->clk_scaling.lock);
742 atomic_set(&host->clk_scaling.devfreq_abort, 0);
743 host->clk_scaling.curr_freq = host->ios.clock;
744 host->clk_scaling.clk_scaling_in_progress = false;
745 host->clk_scaling.need_freq_change = false;
746 host->clk_scaling.is_busy_started = false;
748 host->clk_scaling.devfreq_profile.polling_ms =
749 host->clk_scaling.polling_delay_ms;
750 host->clk_scaling.devfreq_profile.get_dev_status =
751 mmc_devfreq_get_dev_status;
752 host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
753 host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
755 host->clk_scaling.ondemand_gov_data.simple_scaling = true;
756 host->clk_scaling.ondemand_gov_data.upthreshold =
757 host->clk_scaling.upthreshold;
758 host->clk_scaling.ondemand_gov_data.downdifferential =
759 host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
761 err = mmc_devfreq_create_freq_table(host);
763 pr_err("%s: fail to create devfreq frequency table\n",
768 pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
770 host->clk_scaling.ondemand_gov_data.upthreshold,
771 host->clk_scaling.ondemand_gov_data.downdifferential,
772 host->clk_scaling.devfreq_profile.polling_ms);
773 host->clk_scaling.devfreq = devfreq_add_device(
775 &host->clk_scaling.devfreq_profile,
777 &host->clk_scaling.ondemand_gov_data);
778 if (!host->clk_scaling.devfreq) {
779 pr_err("%s: unable to register with devfreq\n",
784 pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n",
786 dev_name(mmc_classdev(host)),
788 host->clk_scaling.devfreq,
791 host->clk_scaling.enable = true;
795 EXPORT_SYMBOL(mmc_init_clk_scaling);
798 * mmc_suspend_clk_scaling() - suspend clock scaling
799 * @host: pointer to mmc host structure
801 * This API will suspend devfreq feature for the specific host.
802 * The statistics collected by mmc will be cleared.
803 * This function is intended to be called by the pm callbacks
804 * (e.g. runtime_suspend, suspend) of the mmc device
806 int mmc_suspend_clk_scaling(struct mmc_host *host)
811 WARN(1, "bad host parameter\n");
815 if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
818 if (!host->clk_scaling.devfreq) {
819 pr_err("%s: %s: no devfreq is assosiated with this device\n",
820 mmc_hostname(host), __func__);
824 atomic_inc(&host->clk_scaling.devfreq_abort);
826 err = devfreq_suspend_device(host->clk_scaling.devfreq);
828 pr_err("%s: %s: failed to suspend devfreq\n",
829 mmc_hostname(host), __func__);
832 host->clk_scaling.enable = false;
834 host->clk_scaling.total_busy_time_us = 0;
836 pr_debug("%s: devfreq suspended\n", mmc_hostname(host));
840 EXPORT_SYMBOL(mmc_suspend_clk_scaling);
843 * mmc_resume_clk_scaling() - resume clock scaling
844 * @host: pointer to mmc host structure
846 * This API will resume devfreq feature for the specific host.
847 * This API is intended to be called by the pm callbacks
848 * (e.g. runtime_suspend, suspend) of the mmc device
850 int mmc_resume_clk_scaling(struct mmc_host *host)
854 u32 devfreq_max_clk = 0;
855 u32 devfreq_min_clk = 0;
858 WARN(1, "bad host parameter\n");
862 if (!mmc_can_scale_clk(host))
866 * If clock scaling is already exited when resume is called, like
867 * during mmc shutdown, it is not an error and should not fail the
870 if (!host->clk_scaling.devfreq) {
871 pr_warn("%s: %s: no devfreq is assosiated with this device\n",
872 mmc_hostname(host), __func__);
876 atomic_set(&host->clk_scaling.devfreq_abort, 0);
878 max_clk_idx = host->clk_scaling.freq_table_sz - 1;
879 devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
880 devfreq_min_clk = host->clk_scaling.freq_table[0];
882 host->clk_scaling.curr_freq = devfreq_max_clk;
883 if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
884 host->clk_scaling.curr_freq = devfreq_min_clk;
886 host->clk_scaling.clk_scaling_in_progress = false;
887 host->clk_scaling.need_freq_change = false;
889 err = devfreq_resume_device(host->clk_scaling.devfreq);
891 pr_err("%s: %s: failed to resume devfreq (%d)\n",
892 mmc_hostname(host), __func__, err);
894 host->clk_scaling.enable = true;
895 pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
900 EXPORT_SYMBOL(mmc_resume_clk_scaling);
903 * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
904 * @host: pointer to mmc host structure
906 * Disable clock scaling permanently.
908 int mmc_exit_clk_scaling(struct mmc_host *host)
913 pr_err("%s: bad host parameter\n", __func__);
918 if (!mmc_can_scale_clk(host))
921 if (!host->clk_scaling.devfreq) {
922 pr_err("%s: %s: no devfreq is assosiated with this device\n",
923 mmc_hostname(host), __func__);
927 err = mmc_suspend_clk_scaling(host);
929 pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
930 mmc_hostname(host), __func__, err);
934 err = devfreq_remove_device(host->clk_scaling.devfreq);
936 pr_err("%s: remove devfreq failed (%d)\n",
937 mmc_hostname(host), err);
941 host->clk_scaling.devfreq = NULL;
942 atomic_set(&host->clk_scaling.devfreq_abort, 1);
944 kfree(host->clk_scaling.freq_table);
945 host->clk_scaling.freq_table = NULL;
947 pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
951 EXPORT_SYMBOL(mmc_exit_clk_scaling);
954 * mmc_request_done - finish processing an MMC request
955 * @host: MMC host which completed request
956 * @mrq: MMC request which request
958 * MMC drivers should call this function when they have completed
959 * their processing of a request.
961 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
963 struct mmc_command *cmd = mrq->cmd;
964 int err = cmd->error;
965 #ifdef CONFIG_MMC_PERF_PROFILING
969 if (host->clk_scaling.is_busy_started)
970 mmc_clk_scaling_stop_busy(host, true);
972 /* Flag re-tuning needed on CRC errors */
973 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
974 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
975 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
976 (mrq->data && mrq->data->error == -EILSEQ) ||
977 (mrq->stop && mrq->stop->error == -EILSEQ)))
978 mmc_retune_needed(host);
980 if (err && cmd->retries && mmc_host_is_spi(host)) {
981 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
985 if (err && cmd->retries && !mmc_card_removed(host->card)) {
987 * Request starter must handle retries - see
988 * mmc_wait_for_req_done().
993 mmc_should_fail_request(host, mrq);
995 led_trigger_event(host->led, LED_OFF);
998 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
999 mmc_hostname(host), mrq->sbc->opcode,
1001 mrq->sbc->resp[0], mrq->sbc->resp[1],
1002 mrq->sbc->resp[2], mrq->sbc->resp[3]);
1005 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
1006 mmc_hostname(host), cmd->opcode, err,
1007 cmd->resp[0], cmd->resp[1],
1008 cmd->resp[2], cmd->resp[3]);
1011 #ifdef CONFIG_MMC_PERF_PROFILING
1012 if (host->perf_enable) {
1013 diff = ktime_sub(ktime_get(), host->perf.start);
1014 if (mrq->data->flags == MMC_DATA_READ) {
1015 host->perf.rbytes_drv +=
1016 mrq->data->bytes_xfered;
1017 host->perf.rtime_drv =
1018 ktime_add(host->perf.rtime_drv,
1021 host->perf.wbytes_drv +=
1022 mrq->data->bytes_xfered;
1023 host->perf.wtime_drv =
1024 ktime_add(host->perf.wtime_drv,
1029 pr_debug("%s: %d bytes transferred: %d\n",
1031 mrq->data->bytes_xfered, mrq->data->error);
1033 if (mrq->lat_hist_enabled) {
1037 completion = ktime_get();
1038 delta_us = ktime_us_delta(completion,
1040 blk_update_latency_hist(
1041 (mrq->data->flags & MMC_DATA_READ) ?
1042 &host->io_lat_read :
1043 &host->io_lat_write, delta_us);
1046 trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
1050 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
1051 mmc_hostname(host), mrq->stop->opcode,
1053 mrq->stop->resp[0], mrq->stop->resp[1],
1054 mrq->stop->resp[2], mrq->stop->resp[3]);
1060 mmc_host_clk_release(host);
1064 EXPORT_SYMBOL(mmc_request_done);
1066 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
1070 /* Assumes host controller has been runtime resumed by mmc_claim_host */
1071 err = mmc_retune(host);
1073 mrq->cmd->error = err;
1074 mmc_request_done(host, mrq);
1079 * For sdio rw commands we must wait for card busy otherwise some
1080 * sdio devices won't work properly.
1082 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
1083 int tries = 500; /* Wait aprox 500ms at maximum */
1085 while (host->ops->card_busy(host) && --tries)
1089 mrq->cmd->error = -EBUSY;
1090 mmc_request_done(host, mrq);
1095 host->ops->request(host, mrq);
1098 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
1100 #ifdef CONFIG_MMC_DEBUG
1102 struct scatterlist *sg;
1104 mmc_retune_hold(host);
1106 if (mmc_card_removed(host->card))
1110 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
1111 mmc_hostname(host), mrq->sbc->opcode,
1112 mrq->sbc->arg, mrq->sbc->flags);
1115 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
1116 mmc_hostname(host), mrq->cmd->opcode,
1117 mrq->cmd->arg, mrq->cmd->flags);
1120 pr_debug("%s: blksz %d blocks %d flags %08x "
1121 "tsac %d ms nsac %d\n",
1122 mmc_hostname(host), mrq->data->blksz,
1123 mrq->data->blocks, mrq->data->flags,
1124 mrq->data->timeout_ns / 1000000,
1125 mrq->data->timeout_clks);
1129 pr_debug("%s: CMD%u arg %08x flags %08x\n",
1130 mmc_hostname(host), mrq->stop->opcode,
1131 mrq->stop->arg, mrq->stop->flags);
1134 WARN_ON(!host->claimed);
1136 mrq->cmd->error = 0;
1137 mrq->cmd->mrq = mrq;
1139 mrq->sbc->error = 0;
1140 mrq->sbc->mrq = mrq;
1143 BUG_ON(mrq->data->blksz > host->max_blk_size);
1144 BUG_ON(mrq->data->blocks > host->max_blk_count);
1145 BUG_ON(mrq->data->blocks * mrq->data->blksz >
1146 host->max_req_size);
1148 #ifdef CONFIG_MMC_DEBUG
1150 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
1152 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
1155 mrq->cmd->data = mrq->data;
1156 mrq->data->error = 0;
1157 mrq->data->mrq = mrq;
1159 mrq->data->stop = mrq->stop;
1160 mrq->stop->error = 0;
1161 mrq->stop->mrq = mrq;
1163 #ifdef CONFIG_MMC_PERF_PROFILING
1164 if (host->perf_enable)
1165 host->perf.start = ktime_get();
1168 mmc_host_clk_hold(host);
1169 led_trigger_event(host->led, LED_FULL);
1171 if (mmc_is_data_request(mrq)) {
1172 mmc_deferred_scaling(host);
1173 mmc_clk_scaling_start_busy(host, true);
1176 __mmc_start_request(host, mrq);
1181 static int mmc_cmdq_check_retune(struct mmc_host *host)
1186 if (!host->need_retune || host->doing_retune || !host->card ||
1187 mmc_card_hs400es(host->card) ||
1188 (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR))
1191 cmdq_mode = mmc_card_cmdq(host->card);
1193 err = mmc_cmdq_halt(host, true);
1195 pr_err("%s: %s: failed halting queue (%d)\n",
1196 mmc_hostname(host), __func__, err);
1197 host->cmdq_ops->dumpstate(host);
1202 mmc_retune_hold(host);
1203 err = mmc_retune(host);
1204 mmc_retune_release(host);
1207 if (mmc_cmdq_halt(host, false)) {
1208 pr_err("%s: %s: cmdq unhalt failed\n",
1209 mmc_hostname(host), __func__);
1210 host->cmdq_ops->dumpstate(host);
1215 pr_debug("%s: %s: Retuning done err: %d\n",
1216 mmc_hostname(host), __func__, err);
1221 static int mmc_start_cmdq_request(struct mmc_host *host,
1222 struct mmc_request *mrq)
1227 pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
1228 mmc_hostname(host), mrq->data->blksz,
1229 mrq->data->blocks, mrq->data->flags,
1230 mrq->data->timeout_ns / NSEC_PER_MSEC,
1231 mrq->data->timeout_clks);
1233 BUG_ON(mrq->data->blksz > host->max_blk_size);
1234 BUG_ON(mrq->data->blocks > host->max_blk_count);
1235 BUG_ON(mrq->data->blocks * mrq->data->blksz >
1236 host->max_req_size);
1237 mrq->data->error = 0;
1238 mrq->data->mrq = mrq;
1242 mrq->cmd->error = 0;
1243 mrq->cmd->mrq = mrq;
1246 mmc_host_clk_hold(host);
1247 mmc_cmdq_check_retune(host);
1248 if (likely(host->cmdq_ops->request)) {
1249 ret = host->cmdq_ops->request(host, mrq);
1252 pr_err("%s: %s: cmdq request host op is not available\n",
1253 mmc_hostname(host), __func__);
1257 mmc_host_clk_release(host);
1258 pr_err("%s: %s: issue request failed, err=%d\n",
1259 mmc_hostname(host), __func__, ret);
1266 * mmc_blk_init_bkops_statistics - initialize bkops statistics
1267 * @card: MMC card to start BKOPS
1269 * Initialize and enable the bkops statistics
1271 void mmc_blk_init_bkops_statistics(struct mmc_card *card)
1274 struct mmc_bkops_stats *stats;
1279 stats = &card->bkops.stats;
1280 spin_lock(&stats->lock);
1282 stats->manual_start = 0;
1284 stats->auto_start = 0;
1285 stats->auto_stop = 0;
1286 for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
1287 stats->level[i] = 0;
1288 stats->enabled = true;
1290 spin_unlock(&stats->lock);
1292 EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
1294 static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
1296 spin_lock_irq(&stats->lock);
1299 spin_unlock_irq(&stats->lock);
1302 static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
1304 spin_lock_irq(&stats->lock);
1306 stats->manual_start++;
1307 spin_unlock_irq(&stats->lock);
1310 static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
1312 spin_lock_irq(&stats->lock);
1314 stats->auto_start++;
1315 spin_unlock_irq(&stats->lock);
1318 static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
1320 spin_lock_irq(&stats->lock);
1323 spin_unlock_irq(&stats->lock);
1326 static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
1329 BUG_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
1330 spin_lock_irq(&stats->lock);
1332 stats->level[level]++;
1333 spin_unlock_irq(&stats->lock);
1337 * mmc_set_auto_bkops - set auto BKOPS for supported cards
1338 * @card: MMC card to start BKOPS
1339 * @enable: enable/disable flag
1340 * Configure the card to run automatic BKOPS.
1342 * Should be called when host is claimed.
1344 int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
1352 if (unlikely(!mmc_card_support_auto_bkops(card))) {
1353 pr_err("%s: %s: card doesn't support auto bkops\n",
1354 mmc_hostname(card->host), __func__);
1359 if (mmc_card_doing_auto_bkops(card))
1361 bkops_en = card->ext_csd.bkops_en | EXT_CSD_BKOPS_AUTO_EN;
1363 if (!mmc_card_doing_auto_bkops(card))
1365 bkops_en = card->ext_csd.bkops_en & ~EXT_CSD_BKOPS_AUTO_EN;
1368 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
1371 pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
1372 mmc_hostname(card->host), __func__, enable, ret);
1375 mmc_card_set_auto_bkops(card);
1376 mmc_update_bkops_auto_on(&card->bkops.stats);
1378 mmc_card_clr_auto_bkops(card);
1379 mmc_update_bkops_auto_off(&card->bkops.stats);
1381 card->ext_csd.bkops_en = bkops_en;
1382 pr_debug("%s: %s: bkops state %x\n",
1383 mmc_hostname(card->host), __func__, bkops_en);
1388 EXPORT_SYMBOL(mmc_set_auto_bkops);
1391 * mmc_check_bkops - check BKOPS for supported cards
1392 * @card: MMC card to check BKOPS
1394 * Read the BKOPS status in order to determine whether the
1395 * card requires bkops to be started.
1397 void mmc_check_bkops(struct mmc_card *card)
1403 if (mmc_card_doing_bkops(card))
1406 err = mmc_read_bkops_status(card);
1408 pr_err("%s: Failed to read bkops status: %d\n",
1409 mmc_hostname(card->host), err);
1413 card->bkops.needs_check = false;
1415 mmc_update_bkops_level(&card->bkops.stats,
1416 card->ext_csd.raw_bkops_status);
1418 card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
1420 EXPORT_SYMBOL(mmc_check_bkops);
1423 * mmc_start_manual_bkops - start BKOPS for supported cards
1424 * @card: MMC card to start BKOPS
1426 * Send START_BKOPS to the card.
1427 * The function should be called with claimed host.
1429 void mmc_start_manual_bkops(struct mmc_card *card)
1435 if (unlikely(!mmc_card_configured_manual_bkops(card)))
1438 if (mmc_card_doing_bkops(card))
1441 mmc_retune_hold(card->host);
1443 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
1444 1, 0, false, true, false);
1446 pr_err("%s: Error %d starting manual bkops\n",
1447 mmc_hostname(card->host), err);
1449 mmc_card_set_doing_bkops(card);
1450 mmc_update_bkops_start(&card->bkops.stats);
1451 card->bkops.needs_bkops = false;
1454 mmc_retune_release(card->host);
1456 EXPORT_SYMBOL(mmc_start_manual_bkops);
1459 * mmc_wait_data_done() - done callback for data request
1460 * @mrq: done data request
1462 * Wakes up mmc context, passed as a callback to host controller driver
1464 static void mmc_wait_data_done(struct mmc_request *mrq)
1466 unsigned long flags;
1467 struct mmc_context_info *context_info = &mrq->host->context_info;
1469 spin_lock_irqsave(&context_info->lock, flags);
1470 context_info->is_done_rcv = true;
1471 wake_up_interruptible(&context_info->wait);
1472 spin_unlock_irqrestore(&context_info->lock, flags);
1475 static void mmc_wait_done(struct mmc_request *mrq)
1477 complete(&mrq->completion);
1481 *__mmc_start_data_req() - starts data request
1482 * @host: MMC host to start the request
1483 * @mrq: data request to start
1485 * Sets the done callback to be called when request is completed by the card.
1486 * Starts data mmc request execution
1488 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
1492 mrq->done = mmc_wait_data_done;
1495 err = mmc_start_request(host, mrq);
1497 mrq->cmd->error = err;
1498 mmc_wait_data_done(mrq);
1504 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
1508 init_completion(&mrq->completion);
1509 mrq->done = mmc_wait_done;
1511 err = mmc_start_request(host, mrq);
1513 mrq->cmd->error = err;
1514 complete(&mrq->completion);
1521 * mmc_wait_for_data_req_done() - wait for request completed
1522 * @host: MMC host to prepare the command.
1523 * @mrq: MMC request to wait for
1525 * Blocks MMC context till host controller will ack end of data request
1526 * execution or new request notification arrives from the block layer.
1527 * Handles command retries.
1529 * Returns enum mmc_blk_status after checking errors.
1531 static int mmc_wait_for_data_req_done(struct mmc_host *host,
1532 struct mmc_request *mrq,
1533 struct mmc_async_req *next_req)
1535 struct mmc_command *cmd;
1536 struct mmc_context_info *context_info = &host->context_info;
1538 bool is_done_rcv = false;
1539 unsigned long flags;
1542 wait_event_interruptible(context_info->wait,
1543 (context_info->is_done_rcv ||
1544 context_info->is_new_req));
1545 spin_lock_irqsave(&context_info->lock, flags);
1546 is_done_rcv = context_info->is_done_rcv;
1547 context_info->is_waiting_last_req = false;
1548 spin_unlock_irqrestore(&context_info->lock, flags);
1550 context_info->is_done_rcv = false;
1551 context_info->is_new_req = false;
1554 if (!cmd->error || !cmd->retries ||
1555 mmc_card_removed(host->card)) {
1556 err = host->areq->err_check(host->card,
1558 break; /* return err */
1560 mmc_retune_recheck(host);
1561 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
1563 cmd->opcode, cmd->error);
1566 __mmc_start_request(host, mrq);
1567 continue; /* wait for done/new event again */
1569 } else if (context_info->is_new_req) {
1570 context_info->is_new_req = false;
1572 return MMC_BLK_NEW_REQUEST;
1575 mmc_retune_release(host);
1579 static void mmc_wait_for_req_done(struct mmc_host *host,
1580 struct mmc_request *mrq)
1582 struct mmc_command *cmd;
1585 wait_for_completion_io(&mrq->completion);
1590 * If host has timed out waiting for the sanitize/bkops
1591 * to complete, card might be still in programming state
1592 * so let's try to bring the card out of programming
1595 if ((cmd->bkops_busy || cmd->sanitize_busy) && cmd->error == -ETIMEDOUT) {
1596 if (!mmc_interrupt_hpi(host->card)) {
1597 pr_warn("%s: %s: Interrupted sanitize/bkops\n",
1598 mmc_hostname(host), __func__);
1602 pr_err("%s: %s: Failed to interrupt sanitize\n",
1603 mmc_hostname(host), __func__);
1606 if (!cmd->error || !cmd->retries ||
1607 mmc_card_removed(host->card)) {
1608 if (cmd->error && !cmd->retries &&
1609 cmd->opcode != MMC_SEND_STATUS &&
1610 cmd->opcode != MMC_SEND_TUNING_BLOCK &&
1611 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1612 mmc_recovery_fallback_lower_speed(host);
1616 mmc_retune_recheck(host);
1618 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
1619 mmc_hostname(host), cmd->opcode, cmd->error);
1622 __mmc_start_request(host, mrq);
1625 mmc_retune_release(host);
1629 * mmc_pre_req - Prepare for a new request
1630 * @host: MMC host to prepare command
1631 * @mrq: MMC request to prepare for
1632 * @is_first_req: true if there is no previous started request
1633 * that may run in parellel to this call, otherwise false
1635 * mmc_pre_req() is called in prior to mmc_start_req() to let
1636 * host prepare for the new request. Preparation of a request may be
1637 * performed while another request is running on the host.
1639 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
1642 if (host->ops->pre_req) {
1643 mmc_host_clk_hold(host);
1644 host->ops->pre_req(host, mrq, is_first_req);
1645 mmc_host_clk_release(host);
1650 * mmc_post_req - Post process a completed request
1651 * @host: MMC host to post process command
1652 * @mrq: MMC request to post process for
1653 * @err: Error, if non zero, clean up any resources made in pre_req
1655 * Let the host post process a completed request. Post processing of
1656 * a request may be performed while another reuqest is running.
1658 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
1661 if (host->ops->post_req) {
1662 mmc_host_clk_hold(host);
1663 host->ops->post_req(host, mrq, err);
1664 mmc_host_clk_release(host);
1669 * mmc_cmdq_discard_card_queue - discard the task[s] in the device
1670 * @host: host instance
1671 * @tasks: mask of tasks to be knocked off
1672 * 0: remove all queued tasks
1674 int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
1676 return mmc_discard_queue(host, tasks);
1678 EXPORT_SYMBOL(mmc_cmdq_discard_queue);
1682 * mmc_cmdq_post_req - post process of a completed request
1683 * @host: host instance
1684 * @tag: the request tag.
1685 * @err: non-zero is error, success otherwise
1687 void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
1689 if (likely(host->cmdq_ops->post_req))
1690 host->cmdq_ops->post_req(host, tag, err);
1692 EXPORT_SYMBOL(mmc_cmdq_post_req);
1695 * mmc_cmdq_halt - halt/un-halt the command queue engine
1696 * @host: host instance
1697 * @halt: true - halt, un-halt otherwise
1699 * Host halts the command queue engine. It should complete
1700 * the ongoing transfer and release the bus.
1701 * All legacy commands can be sent upon successful
1702 * completion of this function.
1703 * Returns 0 on success, negative otherwise
1705 int mmc_cmdq_halt(struct mmc_host *host, bool halt)
1709 if (mmc_host_cq_disable(host)) {
1710 pr_debug("%s: %s: CQE is already disabled\n",
1711 mmc_hostname(host), __func__);
1715 if ((halt && mmc_host_halt(host)) ||
1716 (!halt && !mmc_host_halt(host))) {
1717 pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(host),
1718 __func__, halt ? "halted" : "un-halted");
1722 mmc_host_clk_hold(host);
1723 if (host->cmdq_ops->halt) {
1724 err = host->cmdq_ops->halt(host, halt);
1725 if (!err && host->ops->notify_halt)
1726 host->ops->notify_halt(host, halt);
1728 mmc_host_set_halt(host);
1729 else if (!err && !halt) {
1730 mmc_host_clr_halt(host);
1731 wake_up(&host->cmdq_ctx.wait);
1736 mmc_host_clk_release(host);
1739 EXPORT_SYMBOL(mmc_cmdq_halt);
1741 int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
1743 struct mmc_request *mrq = &cmdq_req->mrq;
1746 if (mmc_card_removed(host->card)) {
1747 mrq->cmd->error = -ENOMEDIUM;
1750 return mmc_start_cmdq_request(host, mrq);
1752 EXPORT_SYMBOL(mmc_cmdq_start_req);
1754 static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
1756 mmc_host_clk_release(mrq->host);
1757 complete(&mrq->completion);
1760 int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
1761 struct mmc_cmdq_req *cmdq_req)
1763 struct mmc_request *mrq = &cmdq_req->mrq;
1764 struct mmc_command *cmd = mrq->cmd;
1767 init_completion(&mrq->completion);
1768 mrq->done = mmc_cmdq_dcmd_req_done;
1769 err = mmc_cmdq_start_req(host, cmdq_req);
1773 wait_for_completion_io(&mrq->completion);
1775 pr_err("%s: DCMD %d failed with err %d\n",
1776 mmc_hostname(host), cmd->opcode,
1779 mmc_host_clk_hold(host);
1780 host->cmdq_ops->dumpstate(host);
1781 mmc_host_clk_release(host);
1785 EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
1787 int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
1789 return __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
1790 EXT_CSD_FLUSH_CACHE, 1,
1793 EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
1796 * mmc_start_req - start a non-blocking request
1797 * @host: MMC host to start command
1798 * @areq: async request to start
1799 * @error: out parameter returns 0 for success, otherwise non zero
1801 * Start a new MMC custom command request for a host.
1802 * If there is on ongoing async request wait for completion
1803 * of that request and start the new one and return.
1804 * Does not wait for the new request to complete.
1806 * Returns the completed request, NULL in case of none completed.
1807 * Wait for the an ongoing request (previoulsy started) to complete and
1808 * return the completed request. If there is no ongoing request, NULL
1809 * is returned without waiting. NULL is not an error condition.
1811 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
1812 struct mmc_async_req *areq, int *error)
1815 struct mmc_async_req *data = host->areq;
1817 /* Prepare a new request */
1819 mmc_pre_req(host, areq->mrq, !host->areq);
1822 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
1823 if (err == MMC_BLK_NEW_REQUEST) {
1827 * The previous request was not completed,
1833 * Check BKOPS urgency for each R1 response
1835 if (host->card && mmc_card_mmc(host->card) &&
1836 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
1837 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
1838 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
1840 /* Cancel the prepared request */
1842 mmc_post_req(host, areq->mrq, -EINVAL);
1844 mmc_check_bkops(host->card);
1846 /* prepare the request again */
1848 mmc_pre_req(host, areq->mrq, !host->areq);
1854 if (host->latency_hist_enabled) {
1855 areq->mrq->io_start = ktime_get();
1856 areq->mrq->lat_hist_enabled = 1;
1858 areq->mrq->lat_hist_enabled = 0;
1860 trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
1861 areq->mrq->cmd->arg,
1863 __mmc_start_data_req(host, areq->mrq);
1867 mmc_post_req(host, host->areq->mrq, 0);
1870 mmc_post_req(host, areq->mrq, -EINVAL);
1881 EXPORT_SYMBOL(mmc_start_req);
1884 * mmc_wait_for_req - start a request and wait for completion
1885 * @host: MMC host to start command
1886 * @mrq: MMC request to start
1888 * Start a new MMC custom command request for a host, and wait
1889 * for the command to complete. Does not attempt to parse the
1892 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
1894 if (mmc_bus_needs_resume(host))
1895 mmc_resume_bus(host);
1897 __mmc_start_req(host, mrq);
1898 mmc_wait_for_req_done(host, mrq);
1900 EXPORT_SYMBOL(mmc_wait_for_req);
1903 * mmc_interrupt_hpi - Issue for High priority Interrupt
1904 * @card: the MMC card associated with the HPI transfer
1906 * Issued High Priority Interrupt, and check for card status
1907 * until out-of prg-state.
1909 int mmc_interrupt_hpi(struct mmc_card *card)
1913 unsigned long prg_wait;
1917 if (!card->ext_csd.hpi_en) {
1918 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
1922 mmc_claim_host(card->host);
1923 err = mmc_send_status(card, &status);
1925 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
1929 switch (R1_CURRENT_STATE(status)) {
1931 case R1_STATE_READY:
1935 * In idle and transfer states, HPI is not needed and the caller
1936 * can issue the next intended command immediately
1942 /* In all other states, it's illegal to issue HPI */
1943 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
1944 mmc_hostname(card->host), R1_CURRENT_STATE(status));
1949 err = mmc_send_hpi_cmd(card, &status);
1951 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
1953 err = mmc_send_status(card, &status);
1955 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
1957 if (time_after(jiffies, prg_wait)) {
1958 err = mmc_send_status(card, &status);
1959 if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
1967 mmc_release_host(card->host);
1970 EXPORT_SYMBOL(mmc_interrupt_hpi);
1973 * mmc_wait_for_cmd - start a command and wait for completion
1974 * @host: MMC host to start command
1975 * @cmd: MMC command to start
1976 * @retries: maximum number of retries
1978 * Start a new MMC command for a host, and wait for the command
1979 * to complete. Return any error that occurred while the command
1980 * was executing. Do not attempt to parse the response.
1982 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
1984 struct mmc_request mrq = {NULL};
1986 WARN_ON(!host->claimed);
1988 memset(cmd->resp, 0, sizeof(cmd->resp));
1989 cmd->retries = retries;
1994 mmc_wait_for_req(host, &mrq);
1999 EXPORT_SYMBOL(mmc_wait_for_cmd);
2002 * mmc_stop_bkops - stop ongoing BKOPS
2003 * @card: MMC card to check BKOPS
2005 * Send HPI command to stop ongoing background operations to
2006 * allow rapid servicing of foreground operations, e.g. read/
2007 * writes. Wait until the card comes out of the programming state
2008 * to avoid errors in servicing read/write requests.
2010 int mmc_stop_bkops(struct mmc_card *card)
2015 if (unlikely(!mmc_card_configured_manual_bkops(card)))
2017 if (!mmc_card_doing_bkops(card))
2020 err = mmc_interrupt_hpi(card);
2023 * If err is EINVAL, we can't issue an HPI.
2024 * It should complete the BKOPS.
2026 if (!err || (err == -EINVAL)) {
2027 mmc_card_clr_doing_bkops(card);
2028 mmc_update_bkops_hpi(&card->bkops.stats);
2029 mmc_retune_release(card->host);
2035 EXPORT_SYMBOL(mmc_stop_bkops);
2037 int mmc_read_bkops_status(struct mmc_card *card)
2042 mmc_claim_host(card->host);
2043 err = mmc_get_ext_csd(card, &ext_csd);
2044 mmc_release_host(card->host);
2048 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
2049 MMC_BKOPS_URGENCY_MASK;
2050 card->ext_csd.raw_exception_status =
2051 ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & (EXT_CSD_URGENT_BKOPS |
2052 EXT_CSD_DYNCAP_NEEDED |
2053 EXT_CSD_SYSPOOL_EXHAUSTED
2054 | EXT_CSD_PACKED_FAILURE);
2059 EXPORT_SYMBOL(mmc_read_bkops_status);
2062 * mmc_set_data_timeout - set the timeout for a data command
2063 * @data: data phase for command
2064 * @card: the MMC card associated with the data transfer
2066 * Computes the data timeout parameters according to the
2067 * correct algorithm given the card type.
2069 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
2078 * SDIO cards only define an upper 1 s limit on access.
2080 if (mmc_card_sdio(card)) {
2081 data->timeout_ns = 1000000000;
2082 data->timeout_clks = 0;
2087 * SD cards use a 100 multiplier rather than 10
2089 mult = mmc_card_sd(card) ? 100 : 10;
2092 * Scale up the multiplier (and therefore the timeout) by
2093 * the r2w factor for writes.
2095 if (data->flags & MMC_DATA_WRITE)
2096 mult <<= card->csd.r2w_factor;
2098 data->timeout_ns = card->csd.tacc_ns * mult;
2099 data->timeout_clks = card->csd.tacc_clks * mult;
2102 * SD cards also have an upper limit on the timeout.
2104 if (mmc_card_sd(card)) {
2105 unsigned int timeout_us, limit_us;
2107 timeout_us = data->timeout_ns / 1000;
2108 if (mmc_host_clk_rate(card->host))
2109 timeout_us += data->timeout_clks * 1000 /
2110 (mmc_host_clk_rate(card->host) / 1000);
2112 if (data->flags & MMC_DATA_WRITE)
2114 * The MMC spec "It is strongly recommended
2115 * for hosts to implement more than 500ms
2116 * timeout value even if the card indicates
2117 * the 250ms maximum busy length." Even the
2118 * previous value of 300ms is known to be
2119 * insufficient for some cards.
2126 * SDHC cards always use these fixed values.
2128 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
2129 data->timeout_ns = limit_us * 1000;
2130 data->timeout_clks = 0;
2133 /* assign limit value if invalid */
2134 if (timeout_us == 0)
2135 data->timeout_ns = limit_us * 1000;
2139 * Some cards require longer data read timeout than indicated in CSD.
2140 * Address this by setting the read timeout to a "reasonably high"
2141 * value. For the cards tested, 600ms has proven enough. If necessary,
2142 * this value can be increased if other problematic cards require this.
2143 * Certain Hynix 5.x cards giving read timeout even with 300ms.
2144 * Increasing further to max value (4s).
2146 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
2147 data->timeout_ns = 4000000000u;
2148 data->timeout_clks = 0;
2152 * Some cards need very high timeouts if driven in SPI mode.
2153 * The worst observed timeout was 900ms after writing a
2154 * continuous stream of data until the internal logic
2157 if (mmc_host_is_spi(card->host)) {
2158 if (data->flags & MMC_DATA_WRITE) {
2159 if (data->timeout_ns < 1000000000)
2160 data->timeout_ns = 1000000000; /* 1s */
2162 if (data->timeout_ns < 100000000)
2163 data->timeout_ns = 100000000; /* 100ms */
2166 /* Increase the timeout values for some bad INAND MCP devices */
2167 if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
2168 data->timeout_ns = 4000000000u; /* 4s */
2169 data->timeout_clks = 0;
2172 EXPORT_SYMBOL(mmc_set_data_timeout);
2175 * mmc_align_data_size - pads a transfer size to a more optimal value
2176 * @card: the MMC card associated with the data transfer
2177 * @sz: original transfer size
2179 * Pads the original data size with a number of extra bytes in
2180 * order to avoid controller bugs and/or performance hits
2181 * (e.g. some controllers revert to PIO for certain sizes).
2183 * Returns the improved size, which might be unmodified.
2185 * Note that this function is only relevant when issuing a
2186 * single scatter gather entry.
2188 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
2191 * FIXME: We don't have a system for the controller to tell
2192 * the core about its problems yet, so for now we just 32-bit
2195 sz = ((sz + 3) / 4) * 4;
2199 EXPORT_SYMBOL(mmc_align_data_size);
2202 * __mmc_claim_host - exclusively claim a host
2203 * @host: mmc host to claim
2204 * @abort: whether or not the operation should be aborted
2206 * Claim a host for a set of operations. If @abort is non null and
2207 * dereference a non-zero value then this will return prematurely with
2208 * that non-zero value without acquiring the lock. Returns zero
2209 * with the lock held otherwise.
2211 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
2213 DECLARE_WAITQUEUE(wait, current);
2214 unsigned long flags;
2220 add_wait_queue(&host->wq, &wait);
2222 spin_lock_irqsave(&host->lock, flags);
2224 set_current_state(TASK_UNINTERRUPTIBLE);
2225 stop = abort ? atomic_read(abort) : 0;
2226 if (stop || !host->claimed || host->claimer == current)
2228 spin_unlock_irqrestore(&host->lock, flags);
2230 spin_lock_irqsave(&host->lock, flags);
2232 set_current_state(TASK_RUNNING);
2235 host->claimer = current;
2236 host->claim_cnt += 1;
2237 if (host->claim_cnt == 1)
2241 spin_unlock_irqrestore(&host->lock, flags);
2242 remove_wait_queue(&host->wq, &wait);
2245 pm_runtime_get_sync(mmc_dev(host));
2247 if (host->ops->enable && !stop && host->claim_cnt == 1)
2248 host->ops->enable(host);
2252 EXPORT_SYMBOL(__mmc_claim_host);
2255 * mmc_try_claim_host - try exclusively to claim a host
2256 * and keep trying for given time, with a gap of 10ms
2257 * @host: mmc host to claim
2258 * @dealy_ms: delay in ms
2260 * Returns %1 if the host is claimed, %0 otherwise.
2262 int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
2264 int claimed_host = 0;
2265 unsigned long flags;
2266 int retry_cnt = delay_ms/10;
2270 spin_lock_irqsave(&host->lock, flags);
2271 if (!host->claimed || host->claimer == current) {
2273 host->claimer = current;
2274 host->claim_cnt += 1;
2276 if (host->claim_cnt == 1)
2279 spin_unlock_irqrestore(&host->lock, flags);
2282 } while (!claimed_host && retry_cnt--);
2285 pm_runtime_get_sync(mmc_dev(host));
2287 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
2288 host->ops->enable(host);
2289 return claimed_host;
2291 EXPORT_SYMBOL(mmc_try_claim_host);
2294 * mmc_release_host - release a host
2295 * @host: mmc host to release
2297 * Release a MMC host, allowing others to claim the host
2298 * for their operations.
2300 void mmc_release_host(struct mmc_host *host)
2302 unsigned long flags;
2304 WARN_ON(!host->claimed);
2306 if (host->ops->disable && host->claim_cnt == 1)
2307 host->ops->disable(host);
2309 spin_lock_irqsave(&host->lock, flags);
2310 if (--host->claim_cnt) {
2311 /* Release for nested claim */
2312 spin_unlock_irqrestore(&host->lock, flags);
2315 host->claimer = NULL;
2316 spin_unlock_irqrestore(&host->lock, flags);
2318 pm_runtime_mark_last_busy(mmc_dev(host));
2319 pm_runtime_put_autosuspend(mmc_dev(host));
2322 EXPORT_SYMBOL(mmc_release_host);
2325 * This is a helper function, which fetches a runtime pm reference for the
2326 * card device and also claims the host.
2328 void mmc_get_card(struct mmc_card *card)
2330 pm_runtime_get_sync(&card->dev);
2331 mmc_claim_host(card->host);
2333 if (mmc_bus_needs_resume(card->host))
2334 mmc_resume_bus(card->host);
2336 EXPORT_SYMBOL(mmc_get_card);
2340 * This is a helper function, which releases the host and drops the runtime
2341 * pm reference for the card device.
2343 void mmc_put_card(struct mmc_card *card)
2345 mmc_release_host(card->host);
2346 pm_runtime_mark_last_busy(&card->dev);
2347 pm_runtime_put_autosuspend(&card->dev);
2349 EXPORT_SYMBOL(mmc_put_card);
2352 * Internal function that does the actual ios call to the host driver,
2353 * optionally printing some debug output.
2355 void mmc_set_ios(struct mmc_host *host)
2357 struct mmc_ios *ios = &host->ios;
2359 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
2360 "width %u timing %u\n",
2361 mmc_hostname(host), ios->clock, ios->bus_mode,
2362 ios->power_mode, ios->chip_select, ios->vdd,
2363 1 << ios->bus_width, ios->timing);
2366 mmc_set_ungated(host);
2367 host->ops->set_ios(host, ios);
2368 if (ios->old_rate != ios->clock) {
2369 if (likely(ios->clk_ts)) {
2370 char trace_info[80];
2371 snprintf(trace_info, 80,
2372 "%s: freq_KHz %d --> %d | t = %d",
2373 mmc_hostname(host), ios->old_rate / 1000,
2374 ios->clock / 1000, jiffies_to_msecs(
2375 (long)jiffies - (long)ios->clk_ts));
2376 trace_mmc_clk(trace_info);
2378 ios->old_rate = ios->clock;
2379 ios->clk_ts = jiffies;
2382 EXPORT_SYMBOL(mmc_set_ios);
2385 * Control chip select pin on a host.
2387 void mmc_set_chip_select(struct mmc_host *host, int mode)
2389 mmc_host_clk_hold(host);
2390 host->ios.chip_select = mode;
2392 mmc_host_clk_release(host);
2396 * Sets the host clock to the highest possible frequency that
2399 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
2401 WARN_ON(hz && hz < host->f_min);
2403 if (hz > host->f_max)
2406 host->ios.clock = hz;
2410 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
2412 mmc_host_clk_hold(host);
2413 __mmc_set_clock(host, hz);
2414 mmc_host_clk_release(host);
2417 #ifdef CONFIG_MMC_CLKGATE
2419 * This gates the clock by setting it to 0 Hz.
2421 void mmc_gate_clock(struct mmc_host *host)
2423 unsigned long flags;
2425 WARN_ON(!host->ios.clock);
2427 spin_lock_irqsave(&host->clk_lock, flags);
2428 host->clk_old = host->ios.clock;
2429 host->ios.clock = 0;
2430 host->clk_gated = true;
2431 spin_unlock_irqrestore(&host->clk_lock, flags);
2436 * This restores the clock from gating by using the cached
2439 void mmc_ungate_clock(struct mmc_host *host)
2442 * We should previously have gated the clock, so the clock shall
2443 * be 0 here! The clock may however be 0 during initialization,
2444 * when some request operations are performed before setting
2445 * the frequency. When ungate is requested in that situation
2446 * we just ignore the call.
2448 if (host->clk_old) {
2449 WARN_ON(host->ios.clock);
2450 /* This call will also set host->clk_gated to false */
2451 __mmc_set_clock(host, host->clk_old);
2453 * We have seen that host controller's clock tuning circuit may
2454 * go out of sync if controller clocks are gated.
2455 * To workaround this issue, we are triggering retuning of the
2456 * tuning circuit after ungating the controller clocks.
2458 mmc_retune_needed(host);
2462 void mmc_set_ungated(struct mmc_host *host)
2464 unsigned long flags;
2467 * We've been given a new frequency while the clock is gated,
2468 * so make sure we regard this as ungating it.
2470 spin_lock_irqsave(&host->clk_lock, flags);
2471 host->clk_gated = false;
2472 spin_unlock_irqrestore(&host->clk_lock, flags);
2476 void mmc_set_ungated(struct mmc_host *host)
2480 void mmc_gate_clock(struct mmc_host *host)
2485 int mmc_execute_tuning(struct mmc_card *card)
2487 struct mmc_host *host = card->host;
2491 if (!host->ops->execute_tuning)
2494 if (mmc_card_mmc(card))
2495 opcode = MMC_SEND_TUNING_BLOCK_HS200;
2497 opcode = MMC_SEND_TUNING_BLOCK;
2499 mmc_host_clk_hold(host);
2500 err = host->ops->execute_tuning(host, opcode);
2501 mmc_host_clk_release(host);
2504 pr_err("%s: tuning execution failed\n", mmc_hostname(host));
2506 mmc_retune_enable(host);
2512 * Change the bus mode (open drain/push-pull) of a host.
2514 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
2516 mmc_host_clk_hold(host);
2517 host->ios.bus_mode = mode;
2519 mmc_host_clk_release(host);
2523 * Change data bus width of a host.
2525 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
2527 mmc_host_clk_hold(host);
2528 host->ios.bus_width = width;
2530 mmc_host_clk_release(host);
2534 * Set initial state after a power cycle or a hw_reset.
2536 void mmc_set_initial_state(struct mmc_host *host)
2538 mmc_retune_disable(host);
2540 if (mmc_host_is_spi(host))
2541 host->ios.chip_select = MMC_CS_HIGH;
2543 host->ios.chip_select = MMC_CS_DONTCARE;
2544 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2546 host->ios.bus_width = MMC_BUS_WIDTH_1;
2547 host->ios.timing = MMC_TIMING_LEGACY;
2548 host->ios.drv_type = 0;
2554 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
2555 * @vdd: voltage (mV)
2556 * @low_bits: prefer low bits in boundary cases
2558 * This function returns the OCR bit number according to the provided @vdd
2559 * value. If conversion is not possible a negative errno value returned.
2561 * Depending on the @low_bits flag the function prefers low or high OCR bits
2562 * on boundary voltages. For example,
2563 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
2564 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
2566 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
2568 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
2570 const int max_bit = ilog2(MMC_VDD_35_36);
2573 if (vdd < 1650 || vdd > 3600)
2576 if (vdd >= 1650 && vdd <= 1950)
2577 return ilog2(MMC_VDD_165_195);
2582 /* Base 2000 mV, step 100 mV, bit's base 8. */
2583 bit = (vdd - 2000) / 100 + 8;
2590 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
2591 * @vdd_min: minimum voltage value (mV)
2592 * @vdd_max: maximum voltage value (mV)
2594 * This function returns the OCR mask bits according to the provided @vdd_min
2595 * and @vdd_max values. If conversion is not possible the function returns 0.
2597 * Notes wrt boundary cases:
2598 * This function sets the OCR bits for all boundary voltages, for example
2599 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
2600 * MMC_VDD_34_35 mask.
2602 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
2606 if (vdd_max < vdd_min)
2609 /* Prefer high bits for the boundary vdd_max values. */
2610 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
2614 /* Prefer low bits for the boundary vdd_min values. */
2615 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
2619 /* Fill the mask, from max bit to min bit. */
2620 while (vdd_max >= vdd_min)
2621 mask |= 1 << vdd_max--;
2625 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
2630 * mmc_of_parse_voltage - return mask of supported voltages
2631 * @np: The device node need to be parsed.
2632 * @mask: mask of voltages available for MMC/SD/SDIO
2634 * 1. Return zero on success.
2635 * 2. Return negative errno: voltage-range is invalid.
2637 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
2639 const u32 *voltage_ranges;
2642 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
2643 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
2644 if (!voltage_ranges) {
2645 pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
2649 pr_err("%s: voltage-ranges empty\n", np->full_name);
2653 for (i = 0; i < num_ranges; i++) {
2654 const int j = i * 2;
2657 ocr_mask = mmc_vddrange_to_ocrmask(
2658 be32_to_cpu(voltage_ranges[j]),
2659 be32_to_cpu(voltage_ranges[j + 1]));
2661 pr_err("%s: voltage-range #%d is invalid\n",
2670 EXPORT_SYMBOL(mmc_of_parse_voltage);
2672 #endif /* CONFIG_OF */
2674 static int mmc_of_get_func_num(struct device_node *node)
2679 ret = of_property_read_u32(node, "reg", ®);
2686 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
2689 struct device_node *node;
2691 if (!host->parent || !host->parent->of_node)
2694 for_each_child_of_node(host->parent->of_node, node) {
2695 if (mmc_of_get_func_num(node) == func_num)
2702 #ifdef CONFIG_REGULATOR
2705 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
2706 * @vdd_bit: OCR bit number
2707 * @min_uV: minimum voltage value (mV)
2708 * @max_uV: maximum voltage value (mV)
2710 * This function returns the voltage range according to the provided OCR
2711 * bit number. If conversion is not possible a negative errno value returned.
2713 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
2721 * REVISIT mmc_vddrange_to_ocrmask() may have set some
2722 * bits this regulator doesn't quite support ... don't
2723 * be too picky, most cards and regulators are OK with
2724 * a 0.1V range goof (it's a small error percentage).
2726 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
2728 *min_uV = 1650 * 1000;
2729 *max_uV = 1950 * 1000;
2731 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
2732 *max_uV = *min_uV + 100 * 1000;
2739 * mmc_regulator_get_ocrmask - return mask of supported voltages
2740 * @supply: regulator to use
2742 * This returns either a negative errno, or a mask of voltages that
2743 * can be provided to MMC/SD/SDIO devices using the specified voltage
2744 * regulator. This would normally be called before registering the
2747 int mmc_regulator_get_ocrmask(struct regulator *supply)
2755 count = regulator_count_voltages(supply);
2759 for (i = 0; i < count; i++) {
2760 vdd_uV = regulator_list_voltage(supply, i);
2764 vdd_mV = vdd_uV / 1000;
2765 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
2769 vdd_uV = regulator_get_voltage(supply);
2773 vdd_mV = vdd_uV / 1000;
2774 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
2779 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
2782 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
2783 * @mmc: the host to regulate
2784 * @supply: regulator to use
2785 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
2787 * Returns zero on success, else negative errno.
2789 * MMC host drivers may use this to enable or disable a regulator using
2790 * a particular supply voltage. This would normally be called from the
2793 int mmc_regulator_set_ocr(struct mmc_host *mmc,
2794 struct regulator *supply,
2795 unsigned short vdd_bit)
2801 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
2803 result = regulator_set_voltage(supply, min_uV, max_uV);
2804 if (result == 0 && !mmc->regulator_enabled) {
2805 result = regulator_enable(supply);
2807 mmc->regulator_enabled = true;
2809 } else if (mmc->regulator_enabled) {
2810 result = regulator_disable(supply);
2812 mmc->regulator_enabled = false;
2816 dev_err(mmc_dev(mmc),
2817 "could not set regulator OCR (%d)\n", result);
2820 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
2822 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
2823 int min_uV, int target_uV,
2827 * Check if supported first to avoid errors since we may try several
2828 * signal levels during power up and don't want to show errors.
2830 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
2833 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
2838 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
2840 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
2841 * That will match the behavior of old boards where VQMMC and VMMC were supplied
2842 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
2843 * SD card spec also define VQMMC in terms of VMMC.
2844 * If this is not possible we'll try the full 2.7-3.6V of the spec.
2846 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
2847 * requested voltage. This is definitely a good idea for UHS where there's a
2848 * separate regulator on the card that's trying to make 1.8V and it's best if
2851 * This function is expected to be used by a controller's
2852 * start_signal_voltage_switch() function.
2854 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
2856 struct device *dev = mmc_dev(mmc);
2857 int ret, volt, min_uV, max_uV;
2859 /* If no vqmmc supply then we can't change the voltage */
2860 if (IS_ERR(mmc->supply.vqmmc))
2863 switch (ios->signal_voltage) {
2864 case MMC_SIGNAL_VOLTAGE_120:
2865 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
2866 1100000, 1200000, 1300000);
2867 case MMC_SIGNAL_VOLTAGE_180:
2868 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
2869 1700000, 1800000, 1950000);
2870 case MMC_SIGNAL_VOLTAGE_330:
2871 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
2875 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
2876 __func__, volt, max_uV);
2878 min_uV = max(volt - 300000, 2700000);
2879 max_uV = min(max_uV + 200000, 3600000);
2882 * Due to a limitation in the current implementation of
2883 * regulator_set_voltage_triplet() which is taking the lowest
2884 * voltage possible if below the target, search for a suitable
2885 * voltage in two steps and try to stay close to vmmc
2886 * with a 0.3V tolerance at first.
2888 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
2889 min_uV, volt, max_uV))
2892 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
2893 2700000, volt, 3600000);
2898 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
2900 #endif /* CONFIG_REGULATOR */
2902 int mmc_regulator_get_supply(struct mmc_host *mmc)
2904 struct device *dev = mmc_dev(mmc);
2907 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
2908 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
2910 if (IS_ERR(mmc->supply.vmmc)) {
2911 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
2912 return -EPROBE_DEFER;
2913 dev_info(dev, "No vmmc regulator found\n");
2915 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
2917 mmc->ocr_avail = ret;
2919 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
2922 if (IS_ERR(mmc->supply.vqmmc)) {
2923 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
2924 return -EPROBE_DEFER;
2925 dev_info(dev, "No vqmmc regulator found\n");
2930 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
2933 * Mask off any voltages we don't support and select
2934 * the lowest voltage
2936 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
2941 * Sanity check the voltages that the card claims to
2945 dev_warn(mmc_dev(host),
2946 "card claims to support voltages below defined range\n");
2950 ocr &= host->ocr_avail;
2952 dev_warn(mmc_dev(host), "no support for card's volts\n");
2956 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
2959 mmc_power_cycle(host, ocr);
2963 if (bit != host->ios.vdd)
2964 dev_warn(mmc_dev(host), "exceeding card's volts\n");
2970 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
2973 int old_signal_voltage = host->ios.signal_voltage;
2975 host->ios.signal_voltage = signal_voltage;
2976 if (host->ops->start_signal_voltage_switch) {
2977 mmc_host_clk_hold(host);
2978 err = host->ops->start_signal_voltage_switch(host, &host->ios);
2979 mmc_host_clk_release(host);
2983 host->ios.signal_voltage = old_signal_voltage;
2989 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
2991 struct mmc_command cmd = {0};
2998 * Send CMD11 only if the request is to switch the card to
3001 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
3002 return __mmc_set_signal_voltage(host, signal_voltage);
3005 * If we cannot switch voltages, return failure so the caller
3006 * can continue without UHS mode
3008 if (!host->ops->start_signal_voltage_switch)
3010 if (!host->ops->card_busy)
3011 pr_warn("%s: cannot verify signal voltage switch\n",
3012 mmc_hostname(host));
3014 cmd.opcode = SD_SWITCH_VOLTAGE;
3016 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
3019 * Hold the clock reference so clock doesn't get auto gated during this
3020 * voltage switch sequence.
3022 mmc_host_clk_hold(host);
3023 err = mmc_wait_for_cmd(host, &cmd, 0);
3025 if (err == -ETIMEDOUT) {
3026 pr_debug("%s: voltage switching failed with err %d\n",
3027 mmc_hostname(host), err);
3035 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
3040 * The card should drive cmd and dat[0:3] low immediately
3041 * after the response of cmd11, but wait 1 ms to be sure
3044 if (host->ops->card_busy && !host->ops->card_busy(host)) {
3049 * During a signal voltage level switch, the clock must be gated
3050 * for 5 ms according to the SD spec
3052 host->card_clock_off = true;
3053 clock = host->ios.clock;
3054 host->ios.clock = 0;
3057 if (__mmc_set_signal_voltage(host, signal_voltage)) {
3059 * Voltages may not have been switched, but we've already
3060 * sent CMD11, so a power cycle is required anyway
3063 host->ios.clock = clock;
3065 host->card_clock_off = false;
3069 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
3071 host->ios.clock = clock;
3074 host->card_clock_off = false;
3075 /* Wait for at least 1 ms according to spec */
3079 * Failure to switch is indicated by the card holding
3082 if (host->ops->card_busy && host->ops->card_busy(host))
3087 pr_debug("%s: Signal voltage switch failed, "
3088 "power cycling card\n", mmc_hostname(host));
3089 mmc_power_cycle(host, ocr);
3093 mmc_host_clk_release(host);
3099 * Select timing parameters for host.
3101 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
3103 mmc_host_clk_hold(host);
3104 host->ios.timing = timing;
3106 mmc_host_clk_release(host);
3110 * Select appropriate driver type for host.
3112 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
3114 mmc_host_clk_hold(host);
3115 host->ios.drv_type = drv_type;
3117 mmc_host_clk_release(host);
3120 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
3121 int card_drv_type, int *drv_type)
3123 struct mmc_host *host = card->host;
3124 int host_drv_type = SD_DRIVER_TYPE_B;
3129 if (!host->ops->select_drive_strength)
3132 /* Use SD definition of driver strength for hosts */
3133 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
3134 host_drv_type |= SD_DRIVER_TYPE_A;
3136 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
3137 host_drv_type |= SD_DRIVER_TYPE_C;
3139 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
3140 host_drv_type |= SD_DRIVER_TYPE_D;
3143 * The drive strength that the hardware can support
3144 * depends on the board design. Pass the appropriate
3145 * information and let the hardware specific code
3146 * return what is possible given the options
3148 mmc_host_clk_hold(host);
3149 drive_strength = host->ops->select_drive_strength(card, max_dtr,
3153 mmc_host_clk_release(host);
3155 return drive_strength;
3159 * Apply power to the MMC stack. This is a two-stage process.
3160 * First, we enable power to the card without the clock running.
3161 * We then wait a bit for the power to stabilise. Finally,
3162 * enable the bus drivers and clock to the card.
3164 * We must _NOT_ enable the clock prior to power stablising.
3166 * If a host does all the power sequencing itself, ignore the
3167 * initial MMC_POWER_UP stage.
3169 void mmc_power_up(struct mmc_host *host, u32 ocr)
3171 if (host->ios.power_mode == MMC_POWER_ON)
3174 mmc_host_clk_hold(host);
3176 mmc_pwrseq_pre_power_on(host);
3178 host->ios.vdd = fls(ocr) - 1;
3179 host->ios.power_mode = MMC_POWER_UP;
3180 /* Set initial state and call mmc_set_ios */
3181 mmc_set_initial_state(host);
3183 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
3184 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
3185 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
3186 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
3187 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
3188 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
3189 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
3192 * This delay should be sufficient to allow the power supply
3193 * to reach the minimum voltage.
3197 mmc_pwrseq_post_power_on(host);
3199 host->ios.clock = host->f_init;
3201 host->ios.power_mode = MMC_POWER_ON;
3205 * This delay must be at least 74 clock sizes, or 1 ms, or the
3206 * time required to reach a stable voltage.
3210 mmc_host_clk_release(host);
3213 void mmc_power_off(struct mmc_host *host)
3215 if (host->ios.power_mode == MMC_POWER_OFF)
3218 mmc_host_clk_hold(host);
3220 mmc_pwrseq_power_off(host);
3222 host->ios.clock = 0;
3225 host->ios.power_mode = MMC_POWER_OFF;
3226 /* Set initial state and call mmc_set_ios */
3227 mmc_set_initial_state(host);
3230 * Some configurations, such as the 802.11 SDIO card in the OLPC
3231 * XO-1.5, require a short delay after poweroff before the card
3232 * can be successfully turned on again.
3236 mmc_host_clk_release(host);
3239 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
3241 mmc_power_off(host);
3242 /* Wait at least 1 ms according to SD spec */
3244 mmc_power_up(host, ocr);
3248 * Cleanup when the last reference to the bus operator is dropped.
3250 static void __mmc_release_bus(struct mmc_host *host)
3253 BUG_ON(host->bus_refs);
3254 BUG_ON(!host->bus_dead);
3256 host->bus_ops = NULL;
3260 * Increase reference count of bus operator
3262 static inline void mmc_bus_get(struct mmc_host *host)
3264 unsigned long flags;
3266 spin_lock_irqsave(&host->lock, flags);
3268 spin_unlock_irqrestore(&host->lock, flags);
3272 * Decrease reference count of bus operator and free it if
3273 * it is the last reference.
3275 static inline void mmc_bus_put(struct mmc_host *host)
3277 unsigned long flags;
3279 spin_lock_irqsave(&host->lock, flags);
3281 if ((host->bus_refs == 0) && host->bus_ops)
3282 __mmc_release_bus(host);
3283 spin_unlock_irqrestore(&host->lock, flags);
3286 int mmc_resume_bus(struct mmc_host *host)
3288 unsigned long flags;
3290 int card_present = true;
3292 if (!mmc_bus_needs_resume(host))
3295 pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
3296 spin_lock_irqsave(&host->lock, flags);
3297 host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
3298 spin_unlock_irqrestore(&host->lock, flags);
3301 if (host->ops->get_cd)
3302 card_present = host->ops->get_cd(host);
3304 if (host->bus_ops && !host->bus_dead && host->card && card_present) {
3305 mmc_power_up(host, host->card->ocr);
3306 BUG_ON(!host->bus_ops->resume);
3307 err = host->bus_ops->resume(host);
3309 pr_err("%s: bus resume: failed: %d\n",
3310 mmc_hostname(host), err);
3311 err = mmc_hw_reset(host);
3313 pr_err("%s: reset: failed: %d\n",
3314 mmc_hostname(host), err);
3317 mmc_card_clr_suspended(host->card);
3320 if (mmc_card_cmdq(host->card)) {
3321 err = mmc_cmdq_halt(host, false);
3323 pr_err("%s: %s: unhalt failed: %d\n",
3324 mmc_hostname(host), __func__, err);
3330 pr_debug("%s: Deferred resume completed\n", mmc_hostname(host));
3333 EXPORT_SYMBOL(mmc_resume_bus);
3336 * Assign a mmc bus handler to a host. Only one bus handler may control a
3337 * host at any given time.
3339 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
3341 unsigned long flags;
3346 WARN_ON(!host->claimed);
3348 spin_lock_irqsave(&host->lock, flags);
3350 BUG_ON(host->bus_ops);
3351 BUG_ON(host->bus_refs);
3353 host->bus_ops = ops;
3357 spin_unlock_irqrestore(&host->lock, flags);
3361 * Remove the current bus handler from a host.
3363 void mmc_detach_bus(struct mmc_host *host)
3365 unsigned long flags;
3369 WARN_ON(!host->claimed);
3370 WARN_ON(!host->bus_ops);
3372 spin_lock_irqsave(&host->lock, flags);
3376 spin_unlock_irqrestore(&host->lock, flags);
3381 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
3384 #ifdef CONFIG_MMC_DEBUG
3385 unsigned long flags;
3386 spin_lock_irqsave(&host->lock, flags);
3387 WARN_ON(host->removed);
3388 spin_unlock_irqrestore(&host->lock, flags);
3392 * If the device is configured as wakeup, we prevent a new sleep for
3393 * 5 s to give provision for user space to consume the event.
3395 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
3396 device_can_wakeup(mmc_dev(host)))
3397 pm_wakeup_event(mmc_dev(host), 5000);
3399 host->detect_change = 1;
3401 * Change in cd_gpio state, so make sure detection part is
3402 * not overided because of manual resume.
3404 if (cd_irq && mmc_bus_manual_resume(host))
3405 host->ignore_bus_resume_flags = true;
3407 mmc_schedule_delayed_work(&host->detect, delay);
3411 * mmc_detect_change - process change of state on a MMC socket
3412 * @host: host which changed state.
3413 * @delay: optional delay to wait before detection (jiffies)
3415 * MMC drivers should call this when they detect a card has been
3416 * inserted or removed. The MMC layer will confirm that any
3417 * present card is still functional, and initialize any newly
3420 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
3422 _mmc_detect_change(host, delay, true);
3424 EXPORT_SYMBOL(mmc_detect_change);
3426 void mmc_init_erase(struct mmc_card *card)
3430 if (is_power_of_2(card->erase_size))
3431 card->erase_shift = ffs(card->erase_size) - 1;
3433 card->erase_shift = 0;
3436 * It is possible to erase an arbitrarily large area of an SD or MMC
3437 * card. That is not desirable because it can take a long time
3438 * (minutes) potentially delaying more important I/O, and also the
3439 * timeout calculations become increasingly hugely over-estimated.
3440 * Consequently, 'pref_erase' is defined as a guide to limit erases
3441 * to that size and alignment.
3443 * For SD cards that define Allocation Unit size, limit erases to one
3444 * Allocation Unit at a time. For MMC cards that define High Capacity
3445 * Erase Size, whether it is switched on or not, limit to that size.
3446 * Otherwise just have a stab at a good value. For modern cards it
3447 * will end up being 4MiB. Note that if the value is too small, it
3448 * can end up taking longer to erase.
3450 if (mmc_card_sd(card) && card->ssr.au) {
3451 card->pref_erase = card->ssr.au;
3452 card->erase_shift = ffs(card->ssr.au) - 1;
3453 } else if (card->ext_csd.hc_erase_size) {
3454 card->pref_erase = card->ext_csd.hc_erase_size;
3455 } else if (card->erase_size) {
3456 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
3458 card->pref_erase = 512 * 1024 / 512;
3460 card->pref_erase = 1024 * 1024 / 512;
3462 card->pref_erase = 2 * 1024 * 1024 / 512;
3464 card->pref_erase = 4 * 1024 * 1024 / 512;
3465 if (card->pref_erase < card->erase_size)
3466 card->pref_erase = card->erase_size;
3468 sz = card->pref_erase % card->erase_size;
3470 card->pref_erase += card->erase_size - sz;
3473 card->pref_erase = 0;
3476 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
3477 unsigned int arg, unsigned int qty)
3479 unsigned int erase_timeout;
3481 if (arg == MMC_DISCARD_ARG ||
3482 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
3483 erase_timeout = card->ext_csd.trim_timeout;
3484 } else if (card->ext_csd.erase_group_def & 1) {
3485 /* High Capacity Erase Group Size uses HC timeouts */
3486 if (arg == MMC_TRIM_ARG)
3487 erase_timeout = card->ext_csd.trim_timeout;
3489 erase_timeout = card->ext_csd.hc_erase_timeout;
3491 /* CSD Erase Group Size uses write timeout */
3492 unsigned int mult = (10 << card->csd.r2w_factor);
3493 unsigned int timeout_clks = card->csd.tacc_clks * mult;
3494 unsigned int timeout_us;
3496 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
3497 if (card->csd.tacc_ns < 1000000)
3498 timeout_us = (card->csd.tacc_ns * mult) / 1000;
3500 timeout_us = (card->csd.tacc_ns / 1000) * mult;
3503 * ios.clock is only a target. The real clock rate might be
3504 * less but not that much less, so fudge it by multiplying by 2.
3507 timeout_us += (timeout_clks * 1000) /
3508 (mmc_host_clk_rate(card->host) / 1000);
3510 erase_timeout = timeout_us / 1000;
3513 * Theoretically, the calculation could underflow so round up
3514 * to 1ms in that case.
3520 /* Multiplier for secure operations */
3521 if (arg & MMC_SECURE_ARGS) {
3522 if (arg == MMC_SECURE_ERASE_ARG)
3523 erase_timeout *= card->ext_csd.sec_erase_mult;
3525 erase_timeout *= card->ext_csd.sec_trim_mult;
3528 erase_timeout *= qty;
3531 * Ensure at least a 1 second timeout for SPI as per
3532 * 'mmc_set_data_timeout()'
3534 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
3535 erase_timeout = 1000;
3537 return erase_timeout;
3540 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
3544 unsigned int erase_timeout;
3546 if (card->ssr.erase_timeout) {
3547 /* Erase timeout specified in SD Status Register (SSR) */
3548 erase_timeout = card->ssr.erase_timeout * qty +
3549 card->ssr.erase_offset;
3552 * Erase timeout not specified in SD Status Register (SSR) so
3553 * use 250ms per write block.
3555 erase_timeout = 250 * qty;
3558 /* Must not be less than 1 second */
3559 if (erase_timeout < 1000)
3560 erase_timeout = 1000;
3562 return erase_timeout;
3565 static unsigned int mmc_erase_timeout(struct mmc_card *card,
3569 if (mmc_card_sd(card))
3570 return mmc_sd_erase_timeout(card, arg, qty);
3572 return mmc_mmc_erase_timeout(card, arg, qty);
3575 static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
3580 * qty is used to calculate the erase timeout which depends on how many
3581 * erase groups (or allocation units in SD terminology) are affected.
3582 * We count erasing part of an erase group as one erase group.
3583 * For SD, the allocation units are always a power of 2. For MMC, the
3584 * erase group size is almost certainly also power of 2, but it does not
3585 * seem to insist on that in the JEDEC standard, so we fall back to
3586 * division in that case. SD may not specify an allocation unit size,
3587 * in which case the timeout is based on the number of write blocks.
3589 * Note that the timeout for secure trim 2 will only be correct if the
3590 * number of erase groups specified is the same as the total of all
3591 * preceding secure trim 1 commands. Since the power may have been
3592 * lost since the secure trim 1 commands occurred, it is generally
3593 * impossible to calculate the secure trim 2 timeout correctly.
3595 if (card->erase_shift)
3596 qty += ((to >> card->erase_shift) -
3597 (from >> card->erase_shift)) + 1;
3598 else if (mmc_card_sd(card))
3599 qty += to - from + 1;
3601 qty += ((to / card->erase_size) -
3602 (from / card->erase_size)) + 1;
3606 static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
3607 struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
3609 struct mmc_command *cmd = cmdq_req->mrq.cmd;
3612 memset(cmd, 0, sizeof(struct mmc_command));
3614 cmd->opcode = opcode;
3616 if (cmd->opcode == MMC_ERASE) {
3617 cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
3618 cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
3620 cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
3623 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
3625 pr_err("mmc_erase: group start error %d, status %#x\n",
3632 static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
3633 struct mmc_card *card, unsigned int from,
3634 unsigned int to, unsigned int arg)
3636 struct mmc_command *cmd = cmdq_req->mrq.cmd;
3637 unsigned int qty = 0;
3638 unsigned long timeout;
3639 unsigned int fr, nr;
3644 trace_mmc_blk_erase_start(arg, fr, nr);
3646 qty = mmc_get_erase_qty(card, from, to);
3648 if (!mmc_card_blockaddr(card)) {
3653 err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
3658 err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
3663 err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
3668 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
3670 memset(cmd, 0, sizeof(struct mmc_command));
3671 cmd->opcode = MMC_SEND_STATUS;
3672 cmd->arg = card->rca << 16;
3673 cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
3674 /* Do not retry else we can't see errors */
3675 err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
3676 if (err || (cmd->resp[0] & 0xFDF92000)) {
3677 pr_err("error %d requesting status %#x\n",
3682 /* Timeout if the device never becomes ready for data and
3683 * never leaves the program state.
3685 if (time_after(jiffies, timeout)) {
3686 pr_err("%s: Card stuck in programming state! %s\n",
3687 mmc_hostname(card->host), __func__);
3691 } while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
3692 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
3694 trace_mmc_blk_erase_end(arg, fr, nr);
3698 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
3699 unsigned int to, unsigned int arg)
3701 struct mmc_command cmd = {0};
3702 unsigned int qty = 0;
3703 unsigned long timeout;
3704 unsigned int fr, nr;
3709 trace_mmc_blk_erase_start(arg, fr, nr);
3711 qty = mmc_get_erase_qty(card, from, to);
3713 if (!mmc_card_blockaddr(card)) {
3718 mmc_retune_hold(card->host);
3719 if (mmc_card_sd(card))
3720 cmd.opcode = SD_ERASE_WR_BLK_START;
3722 cmd.opcode = MMC_ERASE_GROUP_START;
3724 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
3725 err = mmc_wait_for_cmd(card->host, &cmd, 0);
3727 pr_err("mmc_erase: group start error %d, "
3728 "status %#x\n", err, cmd.resp[0]);
3733 memset(&cmd, 0, sizeof(struct mmc_command));
3734 if (mmc_card_sd(card))
3735 cmd.opcode = SD_ERASE_WR_BLK_END;
3737 cmd.opcode = MMC_ERASE_GROUP_END;
3739 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
3740 err = mmc_wait_for_cmd(card->host, &cmd, 0);
3742 pr_err("mmc_erase: group end error %d, status %#x\n",
3748 memset(&cmd, 0, sizeof(struct mmc_command));
3749 cmd.opcode = MMC_ERASE;
3751 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
3752 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
3753 err = mmc_wait_for_cmd(card->host, &cmd, 0);
3755 pr_err("mmc_erase: erase error %d, status %#x\n",
3761 if (mmc_host_is_spi(card->host))
3764 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
3766 memset(&cmd, 0, sizeof(struct mmc_command));
3767 cmd.opcode = MMC_SEND_STATUS;
3768 cmd.arg = card->rca << 16;
3769 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
3770 /* Do not retry else we can't see errors */
3771 err = mmc_wait_for_cmd(card->host, &cmd, 0);
3772 if (err || (cmd.resp[0] & 0xFDF92000)) {
3773 pr_err("error %d requesting status %#x\n",
3779 /* Timeout if the device never becomes ready for data and
3780 * never leaves the program state.
3782 if (time_after(jiffies, timeout)) {
3783 pr_err("%s: Card stuck in programming state! %s\n",
3784 mmc_hostname(card->host), __func__);
3789 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
3790 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
3792 mmc_retune_release(card->host);
3793 trace_mmc_blk_erase_end(arg, fr, nr);
3797 int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
3798 unsigned int nr, unsigned int arg)
3800 if (!(card->host->caps & MMC_CAP_ERASE) ||
3801 !(card->csd.cmdclass & CCC_ERASE))
3804 if (!card->erase_size)
3807 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
3810 if ((arg & MMC_SECURE_ARGS) &&
3811 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
3814 if ((arg & MMC_TRIM_ARGS) &&
3815 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
3818 if (arg == MMC_SECURE_ERASE_ARG) {
3819 if (from % card->erase_size || nr % card->erase_size)
3825 int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
3826 struct mmc_card *card, unsigned int from, unsigned int nr,
3829 unsigned int rem, to = from + nr;
3832 ret = mmc_erase_sanity_check(card, from, nr, arg);
3836 if (arg == MMC_ERASE_ARG) {
3837 rem = from % card->erase_size;
3839 rem = card->erase_size - rem;
3846 rem = nr % card->erase_size;
3859 /* 'from' and 'to' are inclusive */
3862 return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
3864 EXPORT_SYMBOL(mmc_cmdq_erase);
3867 * mmc_erase - erase sectors.
3868 * @card: card to erase
3869 * @from: first sector to erase
3870 * @nr: number of sectors to erase
3871 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
3873 * Caller must claim host before calling this function.
3875 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
3878 unsigned int rem, to = from + nr;
3881 ret = mmc_erase_sanity_check(card, from, nr, arg);
3885 if (arg == MMC_ERASE_ARG) {
3886 rem = from % card->erase_size;
3888 rem = card->erase_size - rem;
3895 rem = nr % card->erase_size;
3908 /* 'from' and 'to' are inclusive */
3912 * Special case where only one erase-group fits in the timeout budget:
3913 * If the region crosses an erase-group boundary on this particular
3914 * case, we will be trimming more than one erase-group which, does not
3915 * fit in the timeout budget of the controller, so we need to split it
3916 * and call mmc_do_erase() twice if necessary. This special case is
3917 * identified by the card->eg_boundary flag.
3919 rem = card->erase_size - (from % card->erase_size);
3920 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
3921 ret = mmc_do_erase(card, from, from + rem - 1, arg);
3923 if ((ret) || (to <= from))
3927 return mmc_do_erase(card, from, to, arg);
3929 EXPORT_SYMBOL(mmc_erase);
3931 int mmc_can_erase(struct mmc_card *card)
3933 if ((card->host->caps & MMC_CAP_ERASE) &&
3934 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
3938 EXPORT_SYMBOL(mmc_can_erase);
3940 int mmc_can_trim(struct mmc_card *card)
3942 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
3943 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
3947 EXPORT_SYMBOL(mmc_can_trim);
3949 int mmc_can_discard(struct mmc_card *card)
3952 * As there's no way to detect the discard support bit at v4.5
3953 * use the s/w feature support filed.
3955 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
3959 EXPORT_SYMBOL(mmc_can_discard);
3961 int mmc_can_sanitize(struct mmc_card *card)
3963 if (!mmc_can_trim(card) && !mmc_can_erase(card))
3965 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
3969 EXPORT_SYMBOL(mmc_can_sanitize);
3971 int mmc_can_secure_erase_trim(struct mmc_card *card)
3973 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
3974 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3978 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
3980 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
3983 if (!card->erase_size)
3985 if (from % card->erase_size || nr % card->erase_size)
3989 EXPORT_SYMBOL(mmc_erase_group_aligned);
3991 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
3994 struct mmc_host *host = card->host;
3995 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
3996 unsigned int last_timeout = 0;
3998 if (card->erase_shift)
3999 max_qty = UINT_MAX >> card->erase_shift;
4000 else if (mmc_card_sd(card))
4003 max_qty = UINT_MAX / card->erase_size;
4005 /* Find the largest qty with an OK timeout */
4008 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
4009 timeout = mmc_erase_timeout(card, arg, qty + x);
4010 if (timeout > host->max_busy_timeout)
4012 if (timeout < last_timeout)
4014 last_timeout = timeout;
4024 * When specifying a sector range to trim, chances are we might cross
4025 * an erase-group boundary even if the amount of sectors is less than
4027 * If we can only fit one erase-group in the controller timeout budget,
4028 * we have to care that erase-group boundaries are not crossed by a
4029 * single trim operation. We flag that special case with "eg_boundary".
4030 * In all other cases we can just decrement qty and pretend that we
4031 * always touch (qty + 1) erase-groups as a simple optimization.
4034 card->eg_boundary = 1;
4038 /* Convert qty to sectors */
4039 if (card->erase_shift)
4040 max_discard = qty << card->erase_shift;
4041 else if (mmc_card_sd(card))
4042 max_discard = qty + 1;
4044 max_discard = qty * card->erase_size;
4049 unsigned int mmc_calc_max_discard(struct mmc_card *card)
4051 struct mmc_host *host = card->host;
4052 unsigned int max_discard, max_trim;
4054 if (!host->max_busy_timeout ||
4055 (host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
4059 * Without erase_group_def set, MMC erase timeout depends on clock
4060 * frequence which can change. In that case, the best choice is
4061 * just the preferred erase size.
4063 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
4064 return card->pref_erase;
4066 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
4067 if (mmc_can_trim(card)) {
4068 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
4069 if (max_trim < max_discard)
4070 max_discard = max_trim;
4071 } else if (max_discard < card->erase_size) {
4074 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
4075 mmc_hostname(host), max_discard, host->max_busy_timeout);
4078 EXPORT_SYMBOL(mmc_calc_max_discard);
4080 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
4082 struct mmc_command cmd = {0};
4084 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
4087 cmd.opcode = MMC_SET_BLOCKLEN;
4089 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
4090 return mmc_wait_for_cmd(card->host, &cmd, 5);
4092 EXPORT_SYMBOL(mmc_set_blocklen);
4094 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
4097 struct mmc_command cmd = {0};
4099 cmd.opcode = MMC_SET_BLOCK_COUNT;
4100 cmd.arg = blockcount & 0x0000FFFF;
4103 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
4104 return mmc_wait_for_cmd(card->host, &cmd, 5);
4106 EXPORT_SYMBOL(mmc_set_blockcount);
4108 static void mmc_hw_reset_for_init(struct mmc_host *host)
4110 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
4112 mmc_host_clk_hold(host);
4113 host->ops->hw_reset(host);
4114 mmc_host_clk_release(host);
4118 * mmc_cmdq_hw_reset: Helper API for doing
4119 * reset_all of host and reinitializing card.
4120 * This must be called with mmc_claim_host
4121 * acquired by the caller.
4123 int mmc_cmdq_hw_reset(struct mmc_host *host)
4125 if (!host->bus_ops->reset)
4128 return host->bus_ops->reset(host);
4130 EXPORT_SYMBOL(mmc_cmdq_hw_reset);
4132 int mmc_hw_reset(struct mmc_host *host)
4140 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
4145 ret = host->bus_ops->reset(host);
4149 pr_warn("%s: tried to reset card, got error %d\n",
4150 mmc_hostname(host), ret);
4154 EXPORT_SYMBOL(mmc_hw_reset);
4156 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
4158 host->f_init = freq;
4160 #ifdef CONFIG_MMC_DEBUG
4161 pr_info("%s: %s: trying to init card at %u Hz\n",
4162 mmc_hostname(host), __func__, host->f_init);
4164 mmc_power_up(host, host->ocr_avail);
4167 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
4168 * do a hardware reset if possible.
4170 mmc_hw_reset_for_init(host);
4173 * sdio_reset sends CMD52 to reset card. Since we do not know
4174 * if the card is being re-initialized, just send it. CMD52
4175 * should be ignored by SD/eMMC cards.
4180 mmc_send_if_cond(host, host->ocr_avail);
4182 /* Order's important: probe SDIO, then SD, then MMC */
4183 if (!mmc_attach_sdio(host))
4185 if (!mmc_attach_sd(host))
4187 if (!mmc_attach_mmc(host))
4190 mmc_power_off(host);
4194 int _mmc_detect_card_removed(struct mmc_host *host)
4198 if (host->caps & MMC_CAP_NONREMOVABLE)
4201 if (!host->card || mmc_card_removed(host->card))
4204 ret = host->bus_ops->alive(host);
4207 * Card detect status and alive check may be out of sync if card is
4208 * removed slowly, when card detect switch changes while card/slot
4209 * pads are still contacted in hardware (refer to "SD Card Mechanical
4210 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
4211 * detect work 200ms later for this case.
4213 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
4214 mmc_detect_change(host, msecs_to_jiffies(200));
4215 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
4219 if (host->ops->get_cd && host->ops->get_cd(host)) {
4220 ret = mmc_recovery_fallback_lower_speed(host);
4222 mmc_card_set_removed(host->card);
4223 if (host->card->sdr104_blocked) {
4224 mmc_host_set_sdr104(host);
4225 host->card->sdr104_blocked = false;
4227 pr_debug("%s: card remove detected\n",
4228 mmc_hostname(host));
4235 int mmc_detect_card_removed(struct mmc_host *host)
4237 struct mmc_card *card = host->card;
4240 WARN_ON(!host->claimed);
4245 ret = mmc_card_removed(card);
4247 * The card will be considered unchanged unless we have been asked to
4248 * detect a change or host requires polling to provide card detection.
4250 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
4253 host->detect_change = 0;
4255 ret = _mmc_detect_card_removed(host);
4256 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
4258 * Schedule a detect work as soon as possible to let a
4259 * rescan handle the card removal.
4261 cancel_delayed_work(&host->detect);
4262 _mmc_detect_change(host, 0, false);
4268 EXPORT_SYMBOL(mmc_detect_card_removed);
4271 * This should be called to make sure that detect work(mmc_rescan)
4272 * is completed.Drivers may use this function from async schedule/probe
4273 * contexts to make sure that the bootdevice detection is completed on
4274 * completion of async_schedule.
4276 void mmc_flush_detect_work(struct mmc_host *host)
4278 flush_delayed_work(&host->detect);
4280 EXPORT_SYMBOL(mmc_flush_detect_work);
4282 void mmc_rescan(struct work_struct *work)
4284 unsigned long flags;
4285 struct mmc_host *host =
4286 container_of(work, struct mmc_host, detect.work);
4288 if (host->trigger_card_event && host->ops->card_event) {
4289 host->ops->card_event(host);
4290 host->trigger_card_event = false;
4293 spin_lock_irqsave(&host->lock, flags);
4294 if (host->rescan_disable) {
4295 spin_unlock_irqrestore(&host->lock, flags);
4298 spin_unlock_irqrestore(&host->lock, flags);
4300 /* If there is a non-removable card registered, only scan once */
4301 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
4303 host->rescan_entered = 1;
4308 * if there is a _removable_ card registered, check whether it is
4311 if (host->bus_ops && !host->bus_dead
4312 && !(host->caps & MMC_CAP_NONREMOVABLE))
4313 host->bus_ops->detect(host);
4315 host->detect_change = 0;
4316 if (host->ignore_bus_resume_flags)
4317 host->ignore_bus_resume_flags = false;
4320 * Let mmc_bus_put() free the bus/bus_ops if we've found that
4321 * the card is no longer present.
4326 /* if there still is a card present, stop here */
4327 if (host->bus_ops != NULL) {
4333 * Only we can add a new handler, so it's safe to
4334 * release the lock here.
4338 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
4339 host->ops->get_cd(host) == 0) {
4340 mmc_claim_host(host);
4341 mmc_power_off(host);
4342 mmc_release_host(host);
4346 mmc_claim_host(host);
4347 mmc_rescan_try_freq(host, host->f_min);
4348 mmc_release_host(host);
4351 if (host->caps & MMC_CAP_NEEDS_POLL)
4352 mmc_schedule_delayed_work(&host->detect, HZ);
4355 void mmc_start_host(struct mmc_host *host)
4357 mmc_claim_host(host);
4358 host->f_init = max(freqs[0], host->f_min);
4359 host->rescan_disable = 0;
4360 host->ios.power_mode = MMC_POWER_UNDEFINED;
4362 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
4363 mmc_power_off(host);
4365 mmc_power_up(host, host->ocr_avail);
4367 mmc_gpiod_request_cd_irq(host);
4368 mmc_release_host(host);
4369 _mmc_detect_change(host, 0, false);
4372 void mmc_stop_host(struct mmc_host *host)
4374 #ifdef CONFIG_MMC_DEBUG
4375 unsigned long flags;
4376 spin_lock_irqsave(&host->lock, flags);
4378 spin_unlock_irqrestore(&host->lock, flags);
4380 if (host->slot.cd_irq >= 0)
4381 disable_irq(host->slot.cd_irq);
4383 host->rescan_disable = 1;
4384 cancel_delayed_work_sync(&host->detect);
4385 mmc_flush_scheduled_work();
4387 /* clear pm flags now and let card drivers set them as needed */
4391 if (host->bus_ops && !host->bus_dead) {
4392 /* Calling bus_ops->remove() with a claimed host can deadlock */
4393 host->bus_ops->remove(host);
4394 mmc_claim_host(host);
4395 mmc_detach_bus(host);
4396 mmc_power_off(host);
4397 mmc_release_host(host);
4405 mmc_claim_host(host);
4406 mmc_power_off(host);
4407 mmc_release_host(host);
4410 int mmc_power_save_host(struct mmc_host *host)
4414 #ifdef CONFIG_MMC_DEBUG
4415 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
4420 if (!host->bus_ops || host->bus_dead) {
4425 if (host->bus_ops->power_save)
4426 ret = host->bus_ops->power_save(host);
4430 mmc_power_off(host);
4434 EXPORT_SYMBOL(mmc_power_save_host);
4436 int mmc_power_restore_host(struct mmc_host *host)
4440 #ifdef CONFIG_MMC_DEBUG
4441 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
4446 if (!host->bus_ops || host->bus_dead) {
4451 mmc_power_up(host, host->card->ocr);
4452 mmc_claim_host(host);
4453 ret = host->bus_ops->power_restore(host);
4454 mmc_release_host(host);
4460 EXPORT_SYMBOL(mmc_power_restore_host);
4463 * Add barrier request to the requests in cache
4465 int mmc_cache_barrier(struct mmc_card *card)
4467 struct mmc_host *host = card->host;
4470 if (!card->ext_csd.cache_ctrl ||
4471 (card->quirks & MMC_QUIRK_CACHE_DISABLE))
4474 if (!mmc_card_mmc(card))
4477 if (!card->ext_csd.barrier_en)
4481 * If a device receives maximum supported barrier
4482 * requests, a barrier command is treated as a
4483 * flush command. Hence, it is betetr to use
4484 * flush timeout instead a generic CMD6 timeout
4486 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
4487 EXT_CSD_FLUSH_CACHE, 0x2, 0);
4489 pr_err("%s: cache barrier error %d\n",
4490 mmc_hostname(host), err);
4494 EXPORT_SYMBOL(mmc_cache_barrier);
4497 * Flush the cache to the non-volatile storage.
4499 int mmc_flush_cache(struct mmc_card *card)
4503 if (mmc_card_mmc(card) &&
4504 (card->ext_csd.cache_size > 0) &&
4505 (card->ext_csd.cache_ctrl & 1) &&
4506 (!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
4507 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
4508 EXT_CSD_FLUSH_CACHE, 1, 0);
4509 if (err == -ETIMEDOUT) {
4510 pr_err("%s: cache flush timeout\n",
4511 mmc_hostname(card->host));
4512 err = mmc_interrupt_hpi(card);
4514 pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
4515 mmc_hostname(card->host), err);
4519 pr_err("%s: cache flush error %d\n",
4520 mmc_hostname(card->host), err);
4526 EXPORT_SYMBOL(mmc_flush_cache);
4530 /* Do the card removal on suspend if card is assumed removeable
4531 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
4534 int mmc_pm_notify(struct notifier_block *notify_block,
4535 unsigned long mode, void *unused)
4537 struct mmc_host *host = container_of(
4538 notify_block, struct mmc_host, pm_notify);
4539 unsigned long flags;
4540 int err = 0, present = 0;
4543 case PM_RESTORE_PREPARE:
4544 case PM_HIBERNATION_PREPARE:
4545 if (host->bus_ops && host->bus_ops->pre_hibernate)
4546 host->bus_ops->pre_hibernate(host);
4547 case PM_SUSPEND_PREPARE:
4548 spin_lock_irqsave(&host->lock, flags);
4549 host->rescan_disable = 1;
4550 spin_unlock_irqrestore(&host->lock, flags);
4551 cancel_delayed_work_sync(&host->detect);
4556 /* Validate prerequisites for suspend */
4557 if (host->bus_ops->pre_suspend)
4558 err = host->bus_ops->pre_suspend(host);
4562 if (!mmc_card_is_removable(host)) {
4563 dev_warn(mmc_dev(host),
4564 "pre_suspend failed for non-removable host: "
4566 /* Avoid removing non-removable hosts */
4570 /* Calling bus_ops->remove() with a claimed host can deadlock */
4571 host->bus_ops->remove(host);
4572 mmc_claim_host(host);
4573 mmc_detach_bus(host);
4574 mmc_power_off(host);
4575 mmc_release_host(host);
4579 case PM_POST_RESTORE:
4580 case PM_POST_HIBERNATION:
4581 if (host->bus_ops && host->bus_ops->post_hibernate)
4582 host->bus_ops->post_hibernate(host);
4583 case PM_POST_SUSPEND:
4585 spin_lock_irqsave(&host->lock, flags);
4586 host->rescan_disable = 0;
4587 if (mmc_card_is_removable(host))
4588 present = !!mmc_gpio_get_cd(host);
4590 if (mmc_bus_manual_resume(host) &&
4591 !host->ignore_bus_resume_flags &&
4593 spin_unlock_irqrestore(&host->lock, flags);
4596 spin_unlock_irqrestore(&host->lock, flags);
4597 _mmc_detect_change(host, 0, false);
4606 * mmc_init_context_info() - init synchronization context
4609 * Init struct context_info needed to implement asynchronous
4610 * request mechanism, used by mmc core, host driver and mmc requests
4613 void mmc_init_context_info(struct mmc_host *host)
4615 spin_lock_init(&host->context_info.lock);
4616 host->context_info.is_new_req = false;
4617 host->context_info.is_done_rcv = false;
4618 host->context_info.is_waiting_last_req = false;
4619 init_waitqueue_head(&host->context_info.wait);
4622 #ifdef CONFIG_MMC_EMBEDDED_SDIO
4623 void mmc_set_embedded_sdio_data(struct mmc_host *host,
4624 struct sdio_cis *cis,
4625 struct sdio_cccr *cccr,
4626 struct sdio_embedded_func *funcs,
4629 host->embedded_sdio_data.cis = cis;
4630 host->embedded_sdio_data.cccr = cccr;
4631 host->embedded_sdio_data.funcs = funcs;
4632 host->embedded_sdio_data.num_funcs = num_funcs;
4635 EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
4638 static int __init mmc_init(void)
4642 workqueue = alloc_ordered_workqueue("kmmcd", 0);
4646 ret = mmc_register_bus();
4648 goto destroy_workqueue;
4650 ret = mmc_register_host_class();
4652 goto unregister_bus;
4654 ret = sdio_register_bus();
4656 goto unregister_host_class;
4660 unregister_host_class:
4661 mmc_unregister_host_class();
4663 mmc_unregister_bus();
4665 destroy_workqueue(workqueue);
4670 static void __exit mmc_exit(void)
4672 sdio_unregister_bus();
4673 mmc_unregister_host_class();
4674 mmc_unregister_bus();
4675 destroy_workqueue(workqueue);
4680 latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
4682 struct mmc_host *host = cls_dev_to_mmc_host(dev);
4683 size_t written_bytes;
4685 written_bytes = blk_latency_hist_show("Read", &host->io_lat_read,
4687 written_bytes += blk_latency_hist_show("Write", &host->io_lat_write,
4688 buf + written_bytes, PAGE_SIZE - written_bytes);
4690 return written_bytes;
4694 * Values permitted 0, 1, 2.
4695 * 0 -> Disable IO latency histograms (default)
4696 * 1 -> Enable IO latency histograms
4697 * 2 -> Zero out IO latency histograms
4700 latency_hist_store(struct device *dev, struct device_attribute *attr,
4701 const char *buf, size_t count)
4703 struct mmc_host *host = cls_dev_to_mmc_host(dev);
4706 if (kstrtol(buf, 0, &value))
4708 if (value == BLK_IO_LAT_HIST_ZERO) {
4709 memset(&host->io_lat_read, 0, sizeof(host->io_lat_read));
4710 memset(&host->io_lat_write, 0, sizeof(host->io_lat_write));
4711 } else if (value == BLK_IO_LAT_HIST_ENABLE ||
4712 value == BLK_IO_LAT_HIST_DISABLE)
4713 host->latency_hist_enabled = value;
4717 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
4718 latency_hist_show, latency_hist_store);
4721 mmc_latency_hist_sysfs_init(struct mmc_host *host)
4723 if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
4724 dev_err(&host->class_dev,
4725 "Failed to create latency_hist sysfs entry\n");
4729 mmc_latency_hist_sysfs_exit(struct mmc_host *host)
4731 device_remove_file(&host->class_dev, &dev_attr_latency_hist);
4735 subsys_initcall(mmc_init);
4736 module_exit(mmc_exit);
4738 MODULE_LICENSE("GPL");