2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <scsi/ufs/ioctl.h>
42 #include <linux/devfreq.h>
43 #include <linux/nls.h>
45 #include <linux/blkdev.h>
46 #include <asm/unaligned.h>
50 #include "ufs_quirks.h"
51 #include "ufs-debugfs.h"
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ufs.h>
57 #ifdef CONFIG_DEBUG_FS
59 static int ufshcd_tag_req_type(struct request *rq)
61 int rq_type = TS_WRITE;
63 if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
64 rq_type = TS_NOT_SUPPORTED;
65 else if (rq->cmd_flags & REQ_FLUSH)
67 else if (rq_data_dir(rq) == READ)
68 rq_type = (rq->cmd_flags & REQ_URGENT) ?
69 TS_URGENT_READ : TS_READ;
70 else if (rq->cmd_flags & REQ_URGENT)
71 rq_type = TS_URGENT_WRITE;
76 static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
78 ufsdbg_set_err_state(hba);
79 if (type < UFS_ERR_MAX)
80 hba->ufs_stats.err_stats[type]++;
83 static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
86 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
87 u64 **tag_stats = hba->ufs_stats.tag_stats;
90 if (!hba->ufs_stats.enabled)
93 tag_stats[tag][TS_TAG]++;
94 if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
97 WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
98 rq_type = ufshcd_tag_req_type(rq);
99 if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
100 tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
103 static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
104 struct scsi_cmnd *cmd)
106 struct request *rq = cmd ? cmd->request : NULL;
108 if (rq && rq->cmd_type & REQ_TYPE_FS)
109 hba->ufs_stats.q_depth--;
112 static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
115 struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
116 s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
117 lrbp->issue_time_stamp);
119 /* update general request statistics */
120 if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
121 hba->ufs_stats.req_stats[TS_TAG].min = delta;
122 hba->ufs_stats.req_stats[TS_TAG].count++;
123 hba->ufs_stats.req_stats[TS_TAG].sum += delta;
124 if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
125 hba->ufs_stats.req_stats[TS_TAG].max = delta;
126 if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
127 hba->ufs_stats.req_stats[TS_TAG].min = delta;
129 rq_type = ufshcd_tag_req_type(rq);
130 if (rq_type == TS_NOT_SUPPORTED)
133 /* update request type specific statistics */
134 if (hba->ufs_stats.req_stats[rq_type].count == 0)
135 hba->ufs_stats.req_stats[rq_type].min = delta;
136 hba->ufs_stats.req_stats[rq_type].count++;
137 hba->ufs_stats.req_stats[rq_type].sum += delta;
138 if (delta > hba->ufs_stats.req_stats[rq_type].max)
139 hba->ufs_stats.req_stats[rq_type].max = delta;
140 if (delta < hba->ufs_stats.req_stats[rq_type].min)
141 hba->ufs_stats.req_stats[rq_type].min = delta;
145 ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
147 if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
148 hba->ufs_stats.query_stats_arr[opcode][idn]++;
152 static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
156 static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
157 struct scsi_cmnd *cmd)
161 static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
166 void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
171 void ufshcd_update_query_stats(struct ufs_hba *hba,
172 enum query_opcode opcode, u8 idn)
177 #define PWR_INFO_MASK 0xF
178 #define PWR_RX_OFFSET 4
180 #define UFSHCD_REQ_SENSE_SIZE 18
182 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
183 UTP_TASK_REQ_COMPL |\
185 /* UIC command timeout, unit: ms */
186 #define UIC_CMD_TIMEOUT 500
188 /* NOP OUT retries waiting for NOP IN response */
189 #define NOP_OUT_RETRIES 10
190 /* Timeout after 30 msecs if NOP OUT hangs without response */
191 #define NOP_OUT_TIMEOUT 30 /* msecs */
193 /* Query request retries */
194 #define QUERY_REQ_RETRIES 3
195 /* Query request timeout */
196 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
198 /* Task management command timeout */
199 #define TM_CMD_TIMEOUT 100 /* msecs */
201 /* maximum number of retries for a general UIC command */
202 #define UFS_UIC_COMMAND_RETRIES 3
204 /* maximum number of link-startup retries */
205 #define DME_LINKSTARTUP_RETRIES 3
207 /* Maximum retries for Hibern8 enter */
208 #define UIC_HIBERN8_ENTER_RETRIES 3
210 /* maximum number of reset retries before giving up */
211 #define MAX_HOST_RESET_RETRIES 5
213 /* Expose the flag value from utp_upiu_query.value */
214 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
216 /* Interrupt aggregation default timeout, unit: 40us */
217 #define INT_AGGR_DEF_TO 0x02
219 /* default value of auto suspend is 3 seconds */
220 #define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
222 #define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE 10
223 #define UFSHCD_CLK_GATING_DELAY_MS_PERF 50
225 /* IOCTL opcode for command - ufs set device read only */
226 #define UFS_IOCTL_BLKROSET BLKROSET
228 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
230 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
234 _ret = ufshcd_enable_vreg(_dev, _vreg); \
236 _ret = ufshcd_disable_vreg(_dev, _vreg); \
240 #define ufshcd_hex_dump(prefix_str, buf, len) \
241 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
243 static u32 ufs_query_desc_max_size[] = {
244 QUERY_DESC_DEVICE_MAX_SIZE,
245 QUERY_DESC_CONFIGURAION_MAX_SIZE,
246 QUERY_DESC_UNIT_MAX_SIZE,
247 QUERY_DESC_RFU_MAX_SIZE,
248 QUERY_DESC_INTERCONNECT_MAX_SIZE,
249 QUERY_DESC_STRING_MAX_SIZE,
250 QUERY_DESC_RFU_MAX_SIZE,
251 QUERY_DESC_GEOMETRY_MAZ_SIZE,
252 QUERY_DESC_POWER_MAX_SIZE,
253 QUERY_DESC_HEALTH_MAX_SIZE,
254 QUERY_DESC_RFU_MAX_SIZE,
258 UFSHCD_MAX_CHANNEL = 0,
260 UFSHCD_CMD_PER_LUN = 32,
261 UFSHCD_CAN_QUEUE = 32,
268 UFSHCD_STATE_OPERATIONAL,
271 /* UFSHCD error handling flags */
273 UFSHCD_EH_IN_PROGRESS = (1 << 0),
276 /* UFSHCD UIC layer error flags */
278 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
279 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
280 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
281 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
282 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
283 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
286 /* Interrupt configuration options */
293 #define DEFAULT_UFSHCD_DBG_PRINT_EN UFSHCD_DBG_PRINT_ALL
295 #define ufshcd_set_eh_in_progress(h) \
296 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
297 #define ufshcd_eh_in_progress(h) \
298 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
299 #define ufshcd_clear_eh_in_progress(h) \
300 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
302 #define ufshcd_set_ufs_dev_active(h) \
303 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
304 #define ufshcd_set_ufs_dev_sleep(h) \
305 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
306 #define ufshcd_set_ufs_dev_poweroff(h) \
307 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
308 #define ufshcd_is_ufs_dev_active(h) \
309 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
310 #define ufshcd_is_ufs_dev_sleep(h) \
311 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
312 #define ufshcd_is_ufs_dev_poweroff(h) \
313 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
315 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
316 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
317 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
318 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
319 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
320 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
321 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
324 static inline enum ufs_dev_pwr_mode
325 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
327 return ufs_pm_lvl_states[lvl].dev_state;
330 static inline enum uic_link_state
331 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
333 return ufs_pm_lvl_states[lvl].link_state;
336 static inline enum ufs_pm_level
337 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
338 enum uic_link_state link_state)
340 enum ufs_pm_level lvl;
342 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
343 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
344 (ufs_pm_lvl_states[lvl].link_state == link_state))
348 /* if no match found, return the level 0 */
352 static inline bool ufshcd_is_valid_pm_lvl(int lvl)
354 if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
360 static irqreturn_t ufshcd_intr(int irq, void *__hba);
361 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
362 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
363 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
364 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
365 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
366 static void ufshcd_hba_exit(struct ufs_hba *hba);
367 static int ufshcd_probe_hba(struct ufs_hba *hba);
368 static int ufshcd_enable_clocks(struct ufs_hba *hba);
369 static int ufshcd_disable_clocks(struct ufs_hba *hba,
370 bool is_gating_context);
371 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
372 bool is_gating_context);
373 static void ufshcd_hold_all(struct ufs_hba *hba);
374 static void ufshcd_release_all(struct ufs_hba *hba);
375 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
376 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
377 static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
378 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
379 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
380 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
381 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
382 static void ufshcd_release_all(struct ufs_hba *hba);
383 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
384 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
385 static int ufshcd_devfreq_target(struct device *dev,
386 unsigned long *freq, u32 flags);
387 static int ufshcd_devfreq_get_dev_status(struct device *dev,
388 struct devfreq_dev_status *stat);
390 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
391 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
393 .downdifferential = 65,
397 static void *gov_data = &ufshcd_ondemand_data;
399 static void *gov_data;
402 static struct devfreq_dev_profile ufs_devfreq_profile = {
404 .target = ufshcd_devfreq_target,
405 .get_dev_status = ufshcd_devfreq_get_dev_status,
408 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
410 return tag >= 0 && tag < hba->nutrs;
413 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
415 if (!hba->is_irq_enabled) {
416 enable_irq(hba->irq);
417 hba->is_irq_enabled = true;
421 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
423 if (hba->is_irq_enabled) {
424 disable_irq(hba->irq);
425 hba->is_irq_enabled = false;
429 void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
432 bool unblock = false;
434 spin_lock_irqsave(hba->host->host_lock, flags);
435 hba->scsi_block_reqs_cnt--;
436 unblock = !hba->scsi_block_reqs_cnt;
437 spin_unlock_irqrestore(hba->host->host_lock, flags);
439 scsi_unblock_requests(hba->host);
441 EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
443 static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
445 if (!hba->scsi_block_reqs_cnt++)
446 scsi_block_requests(hba->host);
449 void ufshcd_scsi_block_requests(struct ufs_hba *hba)
453 spin_lock_irqsave(hba->host->host_lock, flags);
454 __ufshcd_scsi_block_requests(hba);
455 spin_unlock_irqrestore(hba->host->host_lock, flags);
457 EXPORT_SYMBOL(ufshcd_scsi_block_requests);
459 static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
466 /* Assert reset if ctrl == true */
468 ret = pinctrl_select_state(hba->pctrl,
469 pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
471 ret = pinctrl_select_state(hba->pctrl,
472 pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
475 dev_err(hba->dev, "%s: %s failed with err %d\n",
476 __func__, ctrl ? "Assert" : "Deassert", ret);
481 static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
483 return ufshcd_device_reset_ctrl(hba, true);
486 static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
488 return ufshcd_device_reset_ctrl(hba, false);
491 static int ufshcd_reset_device(struct ufs_hba *hba)
495 /* reset the connected UFS device */
496 ret = ufshcd_assert_device_reset(hba);
500 * The reset signal is active low.
501 * The UFS device shall detect more than or equal to 1us of positive
502 * or negative RST_n pulse width.
503 * To be on safe side, keep the reset low for atleast 10us.
505 usleep_range(10, 15);
507 ret = ufshcd_deassert_device_reset(hba);
510 /* same as assert, wait for atleast 10us after deassert */
511 usleep_range(10, 15);
516 /* replace non-printable or non-ASCII characters with spaces */
517 static inline void ufshcd_remove_non_printable(char *val)
522 if (*val < 0x20 || *val > 0x7e)
526 #define UFSHCD_MAX_CMD_LOGGING 200
528 #ifdef CONFIG_TRACEPOINTS
529 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
530 struct ufshcd_cmd_log_entry *entry, u8 opcode)
532 if (trace_ufshcd_command_enabled()) {
533 u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
535 trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
536 entry->doorbell, entry->transfer_len, intr,
541 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
542 struct ufshcd_cmd_log_entry *entry, u8 opcode)
547 #ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
548 static void ufshcd_cmd_log_init(struct ufs_hba *hba)
550 /* Allocate log entries */
551 if (!hba->cmd_log.entries) {
552 hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
553 sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
554 if (!hba->cmd_log.entries)
556 dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
561 #ifdef CONFIG_TRACEPOINTS
562 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
563 unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
564 sector_t lba, int transfer_len, u8 opcode)
566 struct ufshcd_cmd_log_entry *entry;
568 if (!hba->cmd_log.entries)
571 entry = &hba->cmd_log.entries[hba->cmd_log.pos];
574 entry->cmd_type = cmd_type;
575 entry->cmd_id = cmd_id;
577 entry->transfer_len = transfer_len;
579 entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
581 entry->tstamp = ktime_get();
582 entry->outstanding_reqs = hba->outstanding_reqs;
583 entry->seq_num = hba->cmd_log.seq_num;
584 hba->cmd_log.seq_num++;
586 (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
588 ufshcd_add_command_trace(hba, entry, opcode);
592 static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
593 unsigned int tag, u8 cmd_id, u8 idn)
595 __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
596 0xff, (sector_t)-1, -1, -1);
599 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
601 ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
604 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
608 struct ufshcd_cmd_log_entry *p;
610 if (!hba->cmd_log.entries)
613 pos = hba->cmd_log.pos;
614 for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
615 p = &hba->cmd_log.entries[pos];
616 pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
618 if (ktime_to_us(p->tstamp)) {
619 pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
620 p->cmd_type, p->str, p->seq_num,
621 p->lun, p->cmd_id, (unsigned long long)p->lba,
622 p->transfer_len, p->tag, p->doorbell,
623 p->outstanding_reqs, p->idn,
624 ktime_to_us(p->tstamp));
625 usleep_range(1000, 1100);
630 static void ufshcd_cmd_log_init(struct ufs_hba *hba)
634 #ifdef CONFIG_TRACEPOINTS
635 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
636 unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
637 sector_t lba, int transfer_len, u8 opcode)
639 struct ufshcd_cmd_log_entry entry;
643 entry.cmd_id = cmd_id;
644 entry.transfer_len = transfer_len;
645 entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
648 ufshcd_add_command_trace(hba, &entry, opcode);
652 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
656 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
661 #ifdef CONFIG_TRACEPOINTS
662 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
663 unsigned int tag, const char *str)
665 struct ufshcd_lrb *lrbp;
666 char *cmd_type = NULL;
668 u8 cmd_id = 0, idn = 0;
670 int transfer_len = -1;
672 lrbp = &hba->lrb[tag];
674 if (lrbp->cmd) { /* data phase exists */
675 opcode = (u8)(*lrbp->cmd->cmnd);
676 if ((opcode == READ_10) || (opcode == WRITE_10)) {
678 * Currently we only fully trace read(10) and write(10)
681 if (lrbp->cmd->request && lrbp->cmd->request->bio)
683 lrbp->cmd->request->bio->bi_iter.bi_sector;
684 transfer_len = be32_to_cpu(
685 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
689 if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
691 cmd_id = (u8)(*lrbp->cmd->cmnd);
692 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
693 if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
696 } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
698 cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
699 idn = hba->dev_cmd.query.request.upiu_req.idn;
703 __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
704 lrbp->lun, lba, transfer_len, opcode);
707 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
708 unsigned int tag, const char *str)
713 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
715 struct ufs_clk_info *clki;
716 struct list_head *head = &hba->clk_list_head;
718 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
721 if (!head || list_empty(head))
724 list_for_each_entry(clki, head, list) {
725 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
727 dev_err(hba->dev, "clk: %s, rate: %u\n",
728 clki->name, clki->curr_freq);
732 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
733 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
737 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
740 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
741 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
743 if (err_hist->reg[p] == 0)
745 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
746 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
750 static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
752 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
756 * hex_dump reads its data without the readl macro. This might
757 * cause inconsistency issues on some platform, as the printed
758 * values may be from cache and not the most recent value.
759 * To know whether you are looking at an un-cached version verify
760 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
761 * during platform/pci probe function.
763 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
764 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
765 hba->ufs_version, hba->capabilities);
767 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
768 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
770 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
771 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
772 hba->ufs_stats.hibern8_exit_cnt);
774 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
775 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
776 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
777 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
778 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
780 ufshcd_print_clk_freqs(hba);
782 ufshcd_vops_dbg_register_dump(hba, no_sleep);
785 static void ufshcd_print_host_regs(struct ufs_hba *hba)
787 __ufshcd_print_host_regs(hba, false);
791 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
793 struct ufshcd_lrb *lrbp;
797 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
800 for_each_set_bit(tag, &bitmap, hba->nutrs) {
801 lrbp = &hba->lrb[tag];
803 dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
804 tag, ktime_to_us(lrbp->issue_time_stamp));
806 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
807 tag, (u64)lrbp->utrd_dma_addr);
808 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
809 sizeof(struct utp_transfer_req_desc));
810 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
811 (u64)lrbp->ucd_req_dma_addr);
812 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
813 sizeof(struct utp_upiu_req));
814 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
815 (u64)lrbp->ucd_rsp_dma_addr);
816 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
817 sizeof(struct utp_upiu_rsp));
819 le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
820 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries phys@0x%llx",
821 tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
823 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
824 sizeof(struct ufshcd_sg_entry) * prdt_length);
828 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
830 struct utp_task_req_desc *tmrdp;
833 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
836 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
837 tmrdp = &hba->utmrdl_base_addr[tag];
838 dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
839 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
840 sizeof(struct request_desc_header));
841 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
843 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
844 sizeof(struct utp_upiu_req));
845 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
847 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
848 sizeof(struct utp_task_req_desc));
852 static void ufshcd_print_fsm_state(struct ufs_hba *hba)
854 int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
856 err = ufshcd_dme_get(hba,
857 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
858 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
860 dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
862 err = ufshcd_dme_get(hba,
863 UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
864 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
866 dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
870 static void ufshcd_print_host_state(struct ufs_hba *hba)
872 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
875 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
876 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
877 hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
878 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
879 hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
880 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
881 hba->curr_dev_pwr_mode, hba->uic_link_state);
882 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
883 hba->pm_op_in_progress, hba->is_sys_suspended);
884 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
885 hba->auto_bkops_enabled, hba->host->host_self_blocked);
886 dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
887 hba->clk_gating.state, hba->hibern8_on_idle.state);
888 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
889 hba->eh_flags, hba->req_abort_count);
890 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
891 hba->capabilities, hba->caps);
892 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
897 * ufshcd_print_pwr_info - print power params as saved in hba
899 * @hba: per-adapter instance
901 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
913 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
916 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
918 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
919 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
920 names[hba->pwr_info.pwr_rx],
921 names[hba->pwr_info.pwr_tx],
922 hba->pwr_info.hs_rate);
926 * ufshcd_wait_for_register - wait for register value to change
927 * @hba - per-adapter interface
928 * @reg - mmio register offset
929 * @mask - mask to apply to read register value
930 * @val - wait condition
931 * @interval_us - polling interval in microsecs
932 * @timeout_ms - timeout in millisecs
933 * @can_sleep - perform sleep or just spin
934 * Returns -ETIMEDOUT on error, zero on success
936 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
937 u32 val, unsigned long interval_us,
938 unsigned long timeout_ms, bool can_sleep)
941 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
943 /* ignore bits that we don't intend to wait on */
946 while ((ufshcd_readl(hba, reg) & mask) != val) {
948 usleep_range(interval_us, interval_us + 50);
951 if (time_after(jiffies, timeout)) {
952 if ((ufshcd_readl(hba, reg) & mask) != val)
962 * ufshcd_get_intr_mask - Get the interrupt bit mask
963 * @hba - Pointer to adapter instance
965 * Returns interrupt bit mask per version
967 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
971 switch (hba->ufs_version) {
972 case UFSHCI_VERSION_10:
973 intr_mask = INTERRUPT_MASK_ALL_VER_10;
975 /* allow fall through */
976 case UFSHCI_VERSION_11:
977 case UFSHCI_VERSION_20:
978 intr_mask = INTERRUPT_MASK_ALL_VER_11;
980 /* allow fall through */
981 case UFSHCI_VERSION_21:
983 intr_mask = INTERRUPT_MASK_ALL_VER_21;
986 if (!ufshcd_is_crypto_supported(hba))
987 intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
993 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
994 * @hba - Pointer to adapter instance
996 * Returns UFSHCI version supported by the controller
998 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
1000 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
1001 return ufshcd_vops_get_ufs_hci_version(hba);
1003 return ufshcd_readl(hba, REG_UFS_VERSION);
1007 * ufshcd_is_device_present - Check if any device connected to
1008 * the host controller
1009 * @hba: pointer to adapter instance
1011 * Returns 1 if device present, 0 if no device detected
1013 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
1015 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
1016 DEVICE_PRESENT) ? 1 : 0;
1020 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
1021 * @lrb: pointer to local command reference block
1023 * This function is used to get the OCS field from UTRD
1024 * Returns the OCS field in the UTRD
1026 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
1028 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
1032 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
1033 * @task_req_descp: pointer to utp_task_req_desc structure
1035 * This function is used to get the OCS field from UTMRD
1036 * Returns the OCS field in the UTMRD
1039 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
1041 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
1045 * ufshcd_get_tm_free_slot - get a free slot for task management request
1046 * @hba: per adapter instance
1047 * @free_slot: pointer to variable with available slot value
1049 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
1050 * Returns 0 if free slot is not available, else return 1 with tag value
1053 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
1062 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
1063 if (tag >= hba->nutmrs)
1065 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
1073 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
1075 clear_bit_unlock(slot, &hba->tm_slots_in_use);
1079 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
1080 * @hba: per adapter instance
1081 * @pos: position of the bit to be cleared
1083 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
1085 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1089 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
1090 * @hba: per adapter instance
1091 * @tag: position of the bit to be cleared
1093 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
1095 __clear_bit(tag, &hba->outstanding_reqs);
1099 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
1100 * @reg: Register value of host controller status
1102 * Returns integer, 0 on Success and positive value if failed
1104 static inline int ufshcd_get_lists_status(u32 reg)
1107 * The mask 0xFF is for the following HCS register bits
1115 return ((reg & 0xFF) >> 1) ^ 0x07;
1119 * ufshcd_get_uic_cmd_result - Get the UIC command result
1120 * @hba: Pointer to adapter instance
1122 * This function gets the result of UIC command completion
1123 * Returns 0 on success, non zero value on error
1125 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
1127 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
1128 MASK_UIC_COMMAND_RESULT;
1132 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
1133 * @hba: Pointer to adapter instance
1135 * This function gets UIC command argument3
1136 * Returns 0 on success, non zero value on error
1138 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
1140 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
1144 * ufshcd_get_req_rsp - returns the TR response transaction type
1145 * @ucd_rsp_ptr: pointer to response UPIU
1148 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
1150 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
1154 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
1155 * @ucd_rsp_ptr: pointer to response UPIU
1157 * This function gets the response status and scsi_status from response UPIU
1158 * Returns the response result code.
1161 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
1163 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
1167 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
1168 * from response UPIU
1169 * @ucd_rsp_ptr: pointer to response UPIU
1171 * Return the data segment length.
1173 static inline unsigned int
1174 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
1176 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1177 MASK_RSP_UPIU_DATA_SEG_LEN;
1181 * ufshcd_is_exception_event - Check if the device raised an exception event
1182 * @ucd_rsp_ptr: pointer to response UPIU
1184 * The function checks if the device raised an exception event indicated in
1185 * the Device Information field of response UPIU.
1187 * Returns true if exception is raised, false otherwise.
1189 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
1191 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1192 MASK_RSP_EXCEPTION_EVENT ? true : false;
1196 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
1197 * @hba: per adapter instance
1200 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
1202 ufshcd_writel(hba, INT_AGGR_ENABLE |
1203 INT_AGGR_COUNTER_AND_TIMER_RESET,
1204 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1208 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
1209 * @hba: per adapter instance
1210 * @cnt: Interrupt aggregation counter threshold
1211 * @tmout: Interrupt aggregation timeout value
1214 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
1216 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
1217 INT_AGGR_COUNTER_THLD_VAL(cnt) |
1218 INT_AGGR_TIMEOUT_VAL(tmout),
1219 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1223 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
1224 * @hba: per adapter instance
1226 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
1228 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1232 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
1233 * When run-stop registers are set to 1, it indicates the
1234 * host controller that it can process the requests
1235 * @hba: per adapter instance
1237 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
1239 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
1240 REG_UTP_TASK_REQ_LIST_RUN_STOP);
1241 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
1242 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
1246 * ufshcd_hba_start - Start controller initialization sequence
1247 * @hba: per adapter instance
1249 static inline void ufshcd_hba_start(struct ufs_hba *hba)
1251 u32 val = CONTROLLER_ENABLE;
1253 if (ufshcd_is_crypto_supported(hba))
1254 val |= CRYPTO_GENERAL_ENABLE;
1255 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
1259 * ufshcd_is_hba_active - Get controller state
1260 * @hba: per adapter instance
1262 * Returns zero if controller is active, 1 otherwise
1264 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
1266 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
1269 static const char *ufschd_uic_link_state_to_string(
1270 enum uic_link_state state)
1273 case UIC_LINK_OFF_STATE: return "OFF";
1274 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
1275 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
1276 default: return "UNKNOWN";
1280 static const char *ufschd_ufs_dev_pwr_mode_to_string(
1281 enum ufs_dev_pwr_mode state)
1284 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
1285 case UFS_SLEEP_PWR_MODE: return "SLEEP";
1286 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
1287 default: return "UNKNOWN";
1291 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
1293 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
1294 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1295 (hba->ufs_version == UFSHCI_VERSION_11))
1296 return UFS_UNIPRO_VER_1_41;
1298 return UFS_UNIPRO_VER_1_6;
1300 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1302 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1305 * If both host and device support UniPro ver1.6 or later, PA layer
1306 * parameters tuning happens during link startup itself.
1308 * We can manually tune PA layer parameters if either host or device
1309 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1310 * logic simple, we will only do manual tuning if local unipro version
1311 * doesn't support ver1.6 or later.
1313 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
1320 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1321 * @hba: per adapter instance
1322 * @scale_up: If True, set max possible frequency othewise set low frequency
1324 * Returns 0 if successful
1325 * Returns < 0 for any other errors
1327 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1330 struct ufs_clk_info *clki;
1331 struct list_head *head = &hba->clk_list_head;
1333 if (!head || list_empty(head))
1336 list_for_each_entry(clki, head, list) {
1337 if (!IS_ERR_OR_NULL(clki->clk)) {
1338 if (scale_up && clki->max_freq) {
1339 if (clki->curr_freq == clki->max_freq)
1342 ret = clk_set_rate(clki->clk, clki->max_freq);
1344 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1345 __func__, clki->name,
1346 clki->max_freq, ret);
1349 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1350 "scaled up", clki->name,
1353 clki->curr_freq = clki->max_freq;
1355 } else if (!scale_up && clki->min_freq) {
1356 if (clki->curr_freq == clki->min_freq)
1359 ret = clk_set_rate(clki->clk, clki->min_freq);
1361 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1362 __func__, clki->name,
1363 clki->min_freq, ret);
1366 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1367 "scaled down", clki->name,
1370 clki->curr_freq = clki->min_freq;
1373 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1374 clki->name, clk_get_rate(clki->clk));
1382 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1383 * @hba: per adapter instance
1384 * @scale_up: True if scaling up and false if scaling down
1386 * Returns 0 if successful
1387 * Returns < 0 for any other errors
1389 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1393 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1397 ret = ufshcd_set_clk_freq(hba, scale_up);
1401 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1403 ufshcd_set_clk_freq(hba, !scale_up);
1410 static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
1412 hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
1413 cancel_work_sync(&hba->clk_gating.gate_work);
1416 static void ufshcd_ungate_work(struct work_struct *work)
1419 unsigned long flags;
1420 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1421 clk_gating.ungate_work);
1423 ufshcd_cancel_gate_work(hba);
1425 spin_lock_irqsave(hba->host->host_lock, flags);
1426 if (hba->clk_gating.state == CLKS_ON) {
1427 spin_unlock_irqrestore(hba->host->host_lock, flags);
1431 spin_unlock_irqrestore(hba->host->host_lock, flags);
1432 ufshcd_hba_vreg_set_hpm(hba);
1433 ufshcd_enable_clocks(hba);
1435 /* Exit from hibern8 */
1436 if (ufshcd_can_hibern8_during_gating(hba)) {
1437 /* Prevent gating in this path */
1438 hba->clk_gating.is_suspended = true;
1439 if (ufshcd_is_link_hibern8(hba)) {
1440 ret = ufshcd_uic_hibern8_exit(hba);
1442 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1445 ufshcd_set_link_active(hba);
1447 hba->clk_gating.is_suspended = false;
1450 ufshcd_scsi_unblock_requests(hba);
1454 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1455 * Also, exit from hibern8 mode and set the link as active.
1456 * @hba: per adapter instance
1457 * @async: This indicates whether caller should ungate clocks asynchronously.
1459 int ufshcd_hold(struct ufs_hba *hba, bool async)
1462 unsigned long flags;
1464 if (!ufshcd_is_clkgating_allowed(hba))
1466 spin_lock_irqsave(hba->host->host_lock, flags);
1467 hba->clk_gating.active_reqs++;
1469 if (ufshcd_eh_in_progress(hba)) {
1470 spin_unlock_irqrestore(hba->host->host_lock, flags);
1475 switch (hba->clk_gating.state) {
1478 * Wait for the ungate work to complete if in progress.
1479 * Though the clocks may be in ON state, the link could
1480 * still be in hibner8 state if hibern8 is allowed
1481 * during clock gating.
1482 * Make sure we exit hibern8 state also in addition to
1485 if (ufshcd_can_hibern8_during_gating(hba) &&
1486 ufshcd_is_link_hibern8(hba)) {
1487 spin_unlock_irqrestore(hba->host->host_lock, flags);
1488 flush_work(&hba->clk_gating.ungate_work);
1489 spin_lock_irqsave(hba->host->host_lock, flags);
1495 * If the timer was active but the callback was not running
1496 * we have nothing to do, just change state and return.
1498 if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
1499 hba->clk_gating.state = CLKS_ON;
1500 trace_ufshcd_clk_gating(dev_name(hba->dev),
1501 hba->clk_gating.state);
1505 * If we are here, it means gating work is either done or
1506 * currently running. Hence, fall through to cancel gating
1507 * work and to enable clocks.
1510 __ufshcd_scsi_block_requests(hba);
1511 hba->clk_gating.state = REQ_CLKS_ON;
1512 trace_ufshcd_clk_gating(dev_name(hba->dev),
1513 hba->clk_gating.state);
1514 queue_work(hba->clk_gating.clk_gating_workq,
1515 &hba->clk_gating.ungate_work);
1517 * fall through to check if we should wait for this
1518 * work to be done or not.
1523 hba->clk_gating.active_reqs--;
1527 spin_unlock_irqrestore(hba->host->host_lock, flags);
1528 flush_work(&hba->clk_gating.ungate_work);
1529 /* Make sure state is CLKS_ON before returning */
1530 spin_lock_irqsave(hba->host->host_lock, flags);
1533 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1534 __func__, hba->clk_gating.state);
1537 spin_unlock_irqrestore(hba->host->host_lock, flags);
1539 hba->ufs_stats.clk_hold.ts = ktime_get();
1542 EXPORT_SYMBOL_GPL(ufshcd_hold);
1544 static void ufshcd_gate_work(struct work_struct *work)
1546 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1547 clk_gating.gate_work);
1548 unsigned long flags;
1550 spin_lock_irqsave(hba->host->host_lock, flags);
1552 * In case you are here to cancel this work the gating state
1553 * would be marked as REQ_CLKS_ON. In this case save time by
1554 * skipping the gating work and exit after changing the clock
1557 if (hba->clk_gating.is_suspended ||
1558 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1559 hba->clk_gating.state = CLKS_ON;
1560 trace_ufshcd_clk_gating(dev_name(hba->dev),
1561 hba->clk_gating.state);
1565 if (hba->clk_gating.active_reqs
1566 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1567 || hba->lrb_in_use || hba->outstanding_tasks
1568 || hba->active_uic_cmd || hba->uic_async_done)
1571 spin_unlock_irqrestore(hba->host->host_lock, flags);
1573 if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
1574 hba->hibern8_on_idle.is_enabled)
1576 * Hibern8 enter work (on Idle) needs clocks to be ON hence
1577 * make sure that it is flushed before turning off the clocks.
1579 flush_delayed_work(&hba->hibern8_on_idle.enter_work);
1581 /* put the link into hibern8 mode before turning off clocks */
1582 if (ufshcd_can_hibern8_during_gating(hba)) {
1583 if (ufshcd_uic_hibern8_enter(hba)) {
1584 hba->clk_gating.state = CLKS_ON;
1585 trace_ufshcd_clk_gating(dev_name(hba->dev),
1586 hba->clk_gating.state);
1589 ufshcd_set_link_hibern8(hba);
1593 * If auto hibern8 is supported then the link will already
1594 * be in hibern8 state and the ref clock can be gated.
1596 if ((ufshcd_is_auto_hibern8_supported(hba) ||
1597 !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
1598 ufshcd_disable_clocks(hba, true);
1600 /* If link is active, device ref_clk can't be switched off */
1601 ufshcd_disable_clocks_skip_ref_clk(hba, true);
1603 /* Put the host controller in low power mode if possible */
1604 ufshcd_hba_vreg_set_lpm(hba);
1607 * In case you are here to cancel this work the gating state
1608 * would be marked as REQ_CLKS_ON. In this case keep the state
1609 * as REQ_CLKS_ON which would anyway imply that clocks are off
1610 * and a request to turn them on is pending. By doing this way,
1611 * we keep the state machine in tact and this would ultimately
1612 * prevent from doing cancel work multiple times when there are
1613 * new requests arriving before the current cancel work is done.
1615 spin_lock_irqsave(hba->host->host_lock, flags);
1616 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1617 hba->clk_gating.state = CLKS_OFF;
1618 trace_ufshcd_clk_gating(dev_name(hba->dev),
1619 hba->clk_gating.state);
1622 spin_unlock_irqrestore(hba->host->host_lock, flags);
1627 /* host lock must be held before calling this variant */
1628 static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
1630 if (!ufshcd_is_clkgating_allowed(hba))
1633 hba->clk_gating.active_reqs--;
1635 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1636 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1637 || hba->lrb_in_use || hba->outstanding_tasks
1638 || hba->active_uic_cmd || hba->uic_async_done
1639 || ufshcd_eh_in_progress(hba) || no_sched)
1642 hba->clk_gating.state = REQ_CLKS_OFF;
1643 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1644 hba->ufs_stats.clk_rel.ts = ktime_get();
1646 hrtimer_start(&hba->clk_gating.gate_hrtimer,
1647 ms_to_ktime(hba->clk_gating.delay_ms),
1651 void ufshcd_release(struct ufs_hba *hba, bool no_sched)
1653 unsigned long flags;
1655 spin_lock_irqsave(hba->host->host_lock, flags);
1656 __ufshcd_release(hba, no_sched);
1657 spin_unlock_irqrestore(hba->host->host_lock, flags);
1659 EXPORT_SYMBOL_GPL(ufshcd_release);
1661 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1662 struct device_attribute *attr, char *buf)
1664 struct ufs_hba *hba = dev_get_drvdata(dev);
1666 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1669 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1670 struct device_attribute *attr, const char *buf, size_t count)
1672 struct ufs_hba *hba = dev_get_drvdata(dev);
1673 unsigned long flags, value;
1675 if (kstrtoul(buf, 0, &value))
1678 spin_lock_irqsave(hba->host->host_lock, flags);
1679 hba->clk_gating.delay_ms = value;
1680 spin_unlock_irqrestore(hba->host->host_lock, flags);
1684 static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
1685 struct device_attribute *attr, char *buf)
1687 struct ufs_hba *hba = dev_get_drvdata(dev);
1689 return snprintf(buf, PAGE_SIZE, "%lu\n",
1690 hba->clk_gating.delay_ms_pwr_save);
1693 static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
1694 struct device_attribute *attr, const char *buf, size_t count)
1696 struct ufs_hba *hba = dev_get_drvdata(dev);
1697 unsigned long flags, value;
1699 if (kstrtoul(buf, 0, &value))
1702 spin_lock_irqsave(hba->host->host_lock, flags);
1704 hba->clk_gating.delay_ms_pwr_save = value;
1705 if (ufshcd_is_clkscaling_supported(hba) &&
1706 !hba->clk_scaling.is_scaled_up)
1707 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
1709 spin_unlock_irqrestore(hba->host->host_lock, flags);
1713 static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
1714 struct device_attribute *attr, char *buf)
1716 struct ufs_hba *hba = dev_get_drvdata(dev);
1718 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
1721 static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
1722 struct device_attribute *attr, const char *buf, size_t count)
1724 struct ufs_hba *hba = dev_get_drvdata(dev);
1725 unsigned long flags, value;
1727 if (kstrtoul(buf, 0, &value))
1730 spin_lock_irqsave(hba->host->host_lock, flags);
1732 hba->clk_gating.delay_ms_perf = value;
1733 if (ufshcd_is_clkscaling_supported(hba) &&
1734 hba->clk_scaling.is_scaled_up)
1735 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
1737 spin_unlock_irqrestore(hba->host->host_lock, flags);
1741 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1742 struct device_attribute *attr, char *buf)
1744 struct ufs_hba *hba = dev_get_drvdata(dev);
1746 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1749 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1750 struct device_attribute *attr, const char *buf, size_t count)
1752 struct ufs_hba *hba = dev_get_drvdata(dev);
1753 unsigned long flags;
1756 if (kstrtou32(buf, 0, &value))
1760 if (value == hba->clk_gating.is_enabled)
1764 ufshcd_release(hba, false);
1766 spin_lock_irqsave(hba->host->host_lock, flags);
1767 hba->clk_gating.active_reqs++;
1768 spin_unlock_irqrestore(hba->host->host_lock, flags);
1771 hba->clk_gating.is_enabled = value;
1776 static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
1777 struct hrtimer *timer)
1779 struct ufs_hba *hba = container_of(timer, struct ufs_hba,
1780 clk_gating.gate_hrtimer);
1782 queue_work(hba->clk_gating.clk_gating_workq,
1783 &hba->clk_gating.gate_work);
1785 return HRTIMER_NORESTART;
1788 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1790 struct ufs_clk_gating *gating = &hba->clk_gating;
1791 char wq_name[sizeof("ufs_clk_gating_00")];
1793 hba->clk_gating.state = CLKS_ON;
1795 if (!ufshcd_is_clkgating_allowed(hba))
1799 * Disable hibern8 during clk gating if
1800 * auto hibern8 is supported
1802 if (ufshcd_is_auto_hibern8_supported(hba))
1803 hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1805 INIT_WORK(&gating->gate_work, ufshcd_gate_work);
1806 INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
1808 * Clock gating work must be executed only after auto hibern8
1809 * timeout has expired in the hardware or after aggressive
1810 * hibern8 on idle software timeout. Using jiffy based low
1811 * resolution delayed work is not reliable to guarantee this,
1812 * hence use a high resolution timer to make sure we schedule
1813 * the gate work precisely more than hibern8 timeout.
1815 * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
1817 hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1818 gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
1820 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1821 hba->host->host_no);
1822 hba->clk_gating.clk_gating_workq =
1823 create_singlethread_workqueue(wq_name);
1825 gating->is_enabled = true;
1827 gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
1828 gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
1830 /* start with performance mode */
1831 gating->delay_ms = gating->delay_ms_perf;
1833 if (!ufshcd_is_clkscaling_supported(hba))
1834 goto scaling_not_supported;
1836 gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
1837 gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
1838 sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
1839 gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
1840 gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
1841 if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
1842 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
1844 gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
1845 gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
1846 sysfs_attr_init(&gating->delay_perf_attr.attr);
1847 gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
1848 gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
1849 if (device_create_file(hba->dev, &gating->delay_perf_attr))
1850 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
1852 goto add_clkgate_enable;
1854 scaling_not_supported:
1855 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1856 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1857 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1858 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1859 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
1860 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1861 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1864 gating->enable_attr.show = ufshcd_clkgate_enable_show;
1865 gating->enable_attr.store = ufshcd_clkgate_enable_store;
1866 sysfs_attr_init(&gating->enable_attr.attr);
1867 gating->enable_attr.attr.name = "clkgate_enable";
1868 gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
1869 if (device_create_file(hba->dev, &gating->enable_attr))
1870 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1873 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1875 if (!ufshcd_is_clkgating_allowed(hba))
1877 if (ufshcd_is_clkscaling_supported(hba)) {
1878 device_remove_file(hba->dev,
1879 &hba->clk_gating.delay_pwr_save_attr);
1880 device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
1882 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1884 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1885 ufshcd_cancel_gate_work(hba);
1886 cancel_work_sync(&hba->clk_gating.ungate_work);
1887 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1890 static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
1892 ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
1893 AUTO_HIBERN8_IDLE_TIMER_MASK,
1894 AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
1895 REG_AUTO_HIBERN8_IDLE_TIMER);
1896 /* Make sure the timer gets applied before further operations */
1901 * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
1903 * @hba: per adapter instance
1904 * @async: This indicates whether caller wants to exit hibern8 asynchronously.
1906 * Exit from hibern8 mode and set the link as active.
1908 * Return 0 on success, non-zero on failure.
1910 static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
1913 unsigned long flags;
1915 if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1918 spin_lock_irqsave(hba->host->host_lock, flags);
1919 hba->hibern8_on_idle.active_reqs++;
1921 if (ufshcd_eh_in_progress(hba)) {
1922 spin_unlock_irqrestore(hba->host->host_lock, flags);
1927 switch (hba->hibern8_on_idle.state) {
1928 case HIBERN8_EXITED:
1930 case REQ_HIBERN8_ENTER:
1931 if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
1932 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1933 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1934 hba->hibern8_on_idle.state);
1938 * If we here, it means Hibern8 enter work is either done or
1939 * currently running. Hence, fall through to cancel hibern8
1940 * work and exit hibern8.
1942 case HIBERN8_ENTERED:
1943 __ufshcd_scsi_block_requests(hba);
1944 hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
1945 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1946 hba->hibern8_on_idle.state);
1947 schedule_work(&hba->hibern8_on_idle.exit_work);
1949 * fall through to check if we should wait for this
1950 * work to be done or not.
1952 case REQ_HIBERN8_EXIT:
1955 hba->hibern8_on_idle.active_reqs--;
1958 spin_unlock_irqrestore(hba->host->host_lock, flags);
1959 flush_work(&hba->hibern8_on_idle.exit_work);
1960 /* Make sure state is HIBERN8_EXITED before returning */
1961 spin_lock_irqsave(hba->host->host_lock, flags);
1965 dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
1966 __func__, hba->hibern8_on_idle.state);
1969 spin_unlock_irqrestore(hba->host->host_lock, flags);
1974 /* host lock must be held before calling this variant */
1975 static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
1977 unsigned long delay_in_jiffies;
1979 if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1982 hba->hibern8_on_idle.active_reqs--;
1983 BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
1985 if (hba->hibern8_on_idle.active_reqs
1986 || hba->hibern8_on_idle.is_suspended
1987 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1988 || hba->lrb_in_use || hba->outstanding_tasks
1989 || hba->active_uic_cmd || hba->uic_async_done
1990 || ufshcd_eh_in_progress(hba) || no_sched)
1993 hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
1994 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1995 hba->hibern8_on_idle.state);
1997 * Scheduling the delayed work after 1 jiffies will make the work to
1998 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
1999 * for hibern8 enter work as it may impact the performance if it gets
2000 * scheduled almost immediately. Hence make sure that hibern8 enter
2001 * work gets scheduled atleast after 2 jiffies (any time between
2002 * 1000/HZ ms to 2000/HZ ms).
2004 delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
2005 if (delay_in_jiffies == 1)
2008 schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
2012 static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
2014 unsigned long flags;
2016 spin_lock_irqsave(hba->host->host_lock, flags);
2017 __ufshcd_hibern8_release(hba, no_sched);
2018 spin_unlock_irqrestore(hba->host->host_lock, flags);
2021 static void ufshcd_hibern8_enter_work(struct work_struct *work)
2023 struct ufs_hba *hba = container_of(work, struct ufs_hba,
2024 hibern8_on_idle.enter_work.work);
2025 unsigned long flags;
2027 spin_lock_irqsave(hba->host->host_lock, flags);
2028 if (hba->hibern8_on_idle.is_suspended) {
2029 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2030 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2031 hba->hibern8_on_idle.state);
2035 if (hba->hibern8_on_idle.active_reqs
2036 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
2037 || hba->lrb_in_use || hba->outstanding_tasks
2038 || hba->active_uic_cmd || hba->uic_async_done)
2041 spin_unlock_irqrestore(hba->host->host_lock, flags);
2043 if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
2045 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2046 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2047 hba->hibern8_on_idle.state);
2050 ufshcd_set_link_hibern8(hba);
2053 * In case you are here to cancel this work the hibern8_on_idle.state
2054 * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
2055 * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
2056 * and a request to exit from it is pending. By doing this way,
2057 * we keep the state machine in tact and this would ultimately
2058 * prevent from doing cancel work multiple times when there are
2059 * new requests arriving before the current cancel work is done.
2061 spin_lock_irqsave(hba->host->host_lock, flags);
2062 if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
2063 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
2064 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2065 hba->hibern8_on_idle.state);
2068 spin_unlock_irqrestore(hba->host->host_lock, flags);
2073 static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
2074 unsigned long delay_ms)
2076 pm_runtime_get_sync(hba->dev);
2077 ufshcd_hold_all(hba);
2078 ufshcd_scsi_block_requests(hba);
2079 down_write(&hba->lock);
2080 /* wait for all the outstanding requests to finish */
2081 ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
2082 ufshcd_set_auto_hibern8_timer(hba, delay_ms);
2083 up_write(&hba->lock);
2084 ufshcd_scsi_unblock_requests(hba);
2085 ufshcd_release_all(hba);
2086 pm_runtime_put_sync(hba->dev);
2089 static void ufshcd_hibern8_exit_work(struct work_struct *work)
2092 unsigned long flags;
2093 struct ufs_hba *hba = container_of(work, struct ufs_hba,
2094 hibern8_on_idle.exit_work);
2096 cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
2098 spin_lock_irqsave(hba->host->host_lock, flags);
2099 if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
2100 || ufshcd_is_link_active(hba)) {
2101 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2102 spin_unlock_irqrestore(hba->host->host_lock, flags);
2105 spin_unlock_irqrestore(hba->host->host_lock, flags);
2107 /* Exit from hibern8 */
2108 if (ufshcd_is_link_hibern8(hba)) {
2109 hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
2110 ufshcd_hold(hba, false);
2111 ret = ufshcd_uic_hibern8_exit(hba);
2112 hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
2113 ufshcd_release(hba, false);
2115 spin_lock_irqsave(hba->host->host_lock, flags);
2116 ufshcd_set_link_active(hba);
2117 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2118 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2119 hba->hibern8_on_idle.state);
2120 spin_unlock_irqrestore(hba->host->host_lock, flags);
2124 ufshcd_scsi_unblock_requests(hba);
2127 static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
2128 struct device_attribute *attr, char *buf)
2130 struct ufs_hba *hba = dev_get_drvdata(dev);
2132 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
2135 static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
2136 struct device_attribute *attr, const char *buf, size_t count)
2138 struct ufs_hba *hba = dev_get_drvdata(dev);
2139 unsigned long flags, value;
2142 if (kstrtoul(buf, 0, &value))
2145 spin_lock_irqsave(hba->host->host_lock, flags);
2146 if (hba->hibern8_on_idle.delay_ms == value)
2149 if (value >= hba->clk_gating.delay_ms_pwr_save ||
2150 value >= hba->clk_gating.delay_ms_perf) {
2151 dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
2152 value, hba->clk_gating.delay_ms_pwr_save,
2153 hba->clk_gating.delay_ms_perf);
2154 spin_unlock_irqrestore(hba->host->host_lock, flags);
2158 hba->hibern8_on_idle.delay_ms = value;
2159 spin_unlock_irqrestore(hba->host->host_lock, flags);
2161 /* Update auto hibern8 timer value if supported */
2162 if (change && ufshcd_is_auto_hibern8_supported(hba) &&
2163 hba->hibern8_on_idle.is_enabled)
2164 __ufshcd_set_auto_hibern8_timer(hba,
2165 hba->hibern8_on_idle.delay_ms);
2170 static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
2171 struct device_attribute *attr, char *buf)
2173 struct ufs_hba *hba = dev_get_drvdata(dev);
2175 return snprintf(buf, PAGE_SIZE, "%d\n",
2176 hba->hibern8_on_idle.is_enabled);
2179 static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
2180 struct device_attribute *attr, const char *buf, size_t count)
2182 struct ufs_hba *hba = dev_get_drvdata(dev);
2183 unsigned long flags;
2186 if (kstrtou32(buf, 0, &value))
2190 if (value == hba->hibern8_on_idle.is_enabled)
2193 /* Update auto hibern8 timer value if supported */
2194 if (ufshcd_is_auto_hibern8_supported(hba)) {
2195 __ufshcd_set_auto_hibern8_timer(hba,
2196 value ? hba->hibern8_on_idle.delay_ms : value);
2202 * As clock gating work would wait for the hibern8 enter work
2203 * to finish, clocks would remain on during hibern8 enter work.
2205 ufshcd_hold(hba, false);
2206 ufshcd_release_all(hba);
2208 spin_lock_irqsave(hba->host->host_lock, flags);
2209 hba->hibern8_on_idle.active_reqs++;
2210 spin_unlock_irqrestore(hba->host->host_lock, flags);
2214 hba->hibern8_on_idle.is_enabled = value;
2219 static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
2221 /* initialize the state variable here */
2222 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2224 if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2225 !ufshcd_is_auto_hibern8_supported(hba))
2228 if (ufshcd_is_auto_hibern8_supported(hba)) {
2229 hba->hibern8_on_idle.delay_ms = 1;
2230 hba->hibern8_on_idle.state = AUTO_HIBERN8;
2232 * Disable SW hibern8 enter on idle in case
2233 * auto hibern8 is supported
2235 hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
2237 hba->hibern8_on_idle.delay_ms = 10;
2238 INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
2239 ufshcd_hibern8_enter_work);
2240 INIT_WORK(&hba->hibern8_on_idle.exit_work,
2241 ufshcd_hibern8_exit_work);
2244 hba->hibern8_on_idle.is_enabled = true;
2246 hba->hibern8_on_idle.delay_attr.show =
2247 ufshcd_hibern8_on_idle_delay_show;
2248 hba->hibern8_on_idle.delay_attr.store =
2249 ufshcd_hibern8_on_idle_delay_store;
2250 sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
2251 hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
2252 hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
2253 if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
2254 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
2256 hba->hibern8_on_idle.enable_attr.show =
2257 ufshcd_hibern8_on_idle_enable_show;
2258 hba->hibern8_on_idle.enable_attr.store =
2259 ufshcd_hibern8_on_idle_enable_store;
2260 sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
2261 hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
2262 hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
2263 if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
2264 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
2267 static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
2269 if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2270 !ufshcd_is_auto_hibern8_supported(hba))
2272 device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
2273 device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
2276 static void ufshcd_hold_all(struct ufs_hba *hba)
2278 ufshcd_hold(hba, false);
2279 ufshcd_hibern8_hold(hba, false);
2282 static void ufshcd_release_all(struct ufs_hba *hba)
2284 ufshcd_hibern8_release(hba, false);
2285 ufshcd_release(hba, false);
2288 /* Must be called with host lock acquired */
2289 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2291 bool queue_resume_work = false;
2293 if (!ufshcd_is_clkscaling_supported(hba))
2296 if (!hba->clk_scaling.active_reqs++)
2297 queue_resume_work = true;
2299 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
2302 if (queue_resume_work)
2303 queue_work(hba->clk_scaling.workq,
2304 &hba->clk_scaling.resume_work);
2306 if (!hba->clk_scaling.window_start_t) {
2307 hba->clk_scaling.window_start_t = jiffies;
2308 hba->clk_scaling.tot_busy_t = 0;
2309 hba->clk_scaling.is_busy_started = false;
2312 if (!hba->clk_scaling.is_busy_started) {
2313 hba->clk_scaling.busy_start_t = ktime_get();
2314 hba->clk_scaling.is_busy_started = true;
2318 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2320 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2322 if (!ufshcd_is_clkscaling_supported(hba))
2325 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2326 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2327 scaling->busy_start_t));
2328 scaling->busy_start_t = ktime_set(0, 0);
2329 scaling->is_busy_started = false;
2334 * ufshcd_send_command - Send SCSI or device management commands
2335 * @hba: per adapter instance
2336 * @task_tag: Task tag of the command
2339 int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2343 hba->lrb[task_tag].issue_time_stamp = ktime_get();
2344 hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
2345 ufshcd_clk_scaling_start_busy(hba);
2346 __set_bit(task_tag, &hba->outstanding_reqs);
2347 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2348 /* Make sure that doorbell is committed immediately */
2350 ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
2351 ufshcd_update_tag_stats(hba, task_tag);
2356 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2357 * @lrb - pointer to local reference block
2359 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2362 if (lrbp->sense_buffer &&
2363 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2366 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2367 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2369 memcpy(lrbp->sense_buffer,
2370 lrbp->ucd_rsp_ptr->sr.sense_data,
2371 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
2376 * ufshcd_copy_query_response() - Copy the Query Response and the data
2378 * @hba: per adapter instance
2379 * @lrb - pointer to local reference block
2382 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2384 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2386 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2388 /* Get the descriptor */
2389 if (hba->dev_cmd.query.descriptor &&
2390 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2391 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2392 GENERAL_UPIU_REQUEST_SIZE;
2396 /* data segment length */
2397 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2398 MASK_QUERY_DATA_SEG_LEN;
2399 buf_len = be16_to_cpu(
2400 hba->dev_cmd.query.request.upiu_req.length);
2401 if (likely(buf_len >= resp_len)) {
2402 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2405 "%s: Response size is bigger than buffer",
2415 * ufshcd_hba_capabilities - Read controller capabilities
2416 * @hba: per adapter instance
2418 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2420 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2422 /* nutrs and nutmrs are 0 based values */
2423 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2425 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2427 /* disable auto hibern8 */
2428 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
2432 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2433 * to accept UIC commands
2434 * @hba: per adapter instance
2435 * Return true on success, else false
2437 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2439 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2446 * ufshcd_get_upmcrs - Get the power mode change request status
2447 * @hba: Pointer to adapter instance
2449 * This function gets the UPMCRS field of HCS register
2450 * Returns value of UPMCRS field
2452 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2454 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2458 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2459 * @hba: per adapter instance
2460 * @uic_cmd: UIC command
2462 * Mutex must be held.
2465 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2467 WARN_ON(hba->active_uic_cmd);
2469 hba->active_uic_cmd = uic_cmd;
2471 ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
2473 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2474 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2475 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2478 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2483 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2484 * @hba: per adapter instance
2485 * @uic_command: UIC command
2487 * Must be called with mutex held.
2488 * Returns 0 only if success.
2491 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2494 unsigned long flags;
2496 if (wait_for_completion_timeout(&uic_cmd->done,
2497 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2498 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2503 ufsdbg_set_err_state(hba);
2505 ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
2507 spin_lock_irqsave(hba->host->host_lock, flags);
2508 hba->active_uic_cmd = NULL;
2509 spin_unlock_irqrestore(hba->host->host_lock, flags);
2515 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2516 * @hba: per adapter instance
2517 * @uic_cmd: UIC command
2518 * @completion: initialize the completion only if this is set to true
2520 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2521 * with mutex held and host_lock locked.
2522 * Returns 0 only if success.
2525 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2528 if (!ufshcd_ready_for_uic_cmd(hba)) {
2530 "Controller not ready to accept UIC commands\n");
2535 init_completion(&uic_cmd->done);
2537 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2543 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2544 * @hba: per adapter instance
2545 * @uic_cmd: UIC command
2547 * Returns 0 only if success.
2550 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2553 unsigned long flags;
2555 hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
2556 ufshcd_hold_all(hba);
2557 mutex_lock(&hba->uic_cmd_mutex);
2558 ufshcd_add_delay_before_dme_cmd(hba);
2560 spin_lock_irqsave(hba->host->host_lock, flags);
2561 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2562 spin_unlock_irqrestore(hba->host->host_lock, flags);
2564 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2566 ufshcd_save_tstamp_of_last_dme_cmd(hba);
2567 mutex_unlock(&hba->uic_cmd_mutex);
2568 ufshcd_release_all(hba);
2569 hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
2571 ufsdbg_error_inject_dispatcher(hba,
2572 ERR_INJECT_UIC, 0, &ret);
2578 * ufshcd_map_sg - Map scatter-gather list to prdt
2579 * @lrbp - pointer to local reference block
2581 * Returns 0 in case of success, non-zero value in case of failure
2583 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
2585 struct ufshcd_sg_entry *prd_table;
2586 struct scatterlist *sg;
2587 struct scsi_cmnd *cmd;
2592 sg_segments = scsi_dma_map(cmd);
2593 if (sg_segments < 0)
2597 lrbp->utr_descriptor_ptr->prd_table_length =
2598 cpu_to_le16((u16) (sg_segments));
2600 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2602 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2604 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2605 prd_table[i].base_addr =
2606 cpu_to_le32(lower_32_bits(sg->dma_address));
2607 prd_table[i].upper_addr =
2608 cpu_to_le32(upper_32_bits(sg->dma_address));
2609 prd_table[i].reserved = 0;
2612 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2619 * ufshcd_enable_intr - enable interrupts
2620 * @hba: per adapter instance
2621 * @intrs: interrupt bits
2623 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2625 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2627 if (hba->ufs_version == UFSHCI_VERSION_10) {
2629 rw = set & INTERRUPT_MASK_RW_VER_10;
2630 set = rw | ((set ^ intrs) & intrs);
2635 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2639 * ufshcd_disable_intr - disable interrupts
2640 * @hba: per adapter instance
2641 * @intrs: interrupt bits
2643 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2645 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2647 if (hba->ufs_version == UFSHCI_VERSION_10) {
2649 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2650 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2651 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2657 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2660 static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
2661 struct ufshcd_lrb *lrbp)
2663 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2665 bool enable = false;
2670 * Call vendor specific code to get crypto info for this request:
2671 * enable, crypto config. index, DUN.
2672 * If bypass is set, don't bother setting the other fields.
2674 ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
2676 if (ret != -EAGAIN) {
2678 "%s: failed to setup crypto request (%d)\n",
2688 req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
2689 req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
2690 req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
2696 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2697 * descriptor according to request
2698 * @hba: per adapter instance
2699 * @lrbp: pointer to local reference block
2700 * @upiu_flags: flags required in the header
2701 * @cmd_dir: requests data direction
2703 static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
2704 struct ufshcd_lrb *lrbp, u32 *upiu_flags,
2705 enum dma_data_direction cmd_dir)
2707 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2711 if (cmd_dir == DMA_FROM_DEVICE) {
2712 data_direction = UTP_DEVICE_TO_HOST;
2713 *upiu_flags = UPIU_CMD_FLAGS_READ;
2714 } else if (cmd_dir == DMA_TO_DEVICE) {
2715 data_direction = UTP_HOST_TO_DEVICE;
2716 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2718 data_direction = UTP_NO_DATA_TRANSFER;
2719 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2722 dword_0 = data_direction | (lrbp->command_type
2723 << UPIU_COMMAND_TYPE_OFFSET);
2725 dword_0 |= UTP_REQ_DESC_INT_CMD;
2727 /* Transfer request descriptor header fields */
2728 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2729 /* dword_1 is reserved, hence it is set to 0 */
2730 req_desc->header.dword_1 = 0;
2732 * assigning invalid value for command status. Controller
2733 * updates OCS on command completion, with the command
2736 req_desc->header.dword_2 =
2737 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2738 /* dword_3 is reserved, hence it is set to 0 */
2739 req_desc->header.dword_3 = 0;
2741 req_desc->prd_table_length = 0;
2743 if (ufshcd_is_crypto_supported(hba))
2744 return ufshcd_prepare_crypto_utrd(hba, lrbp);
2750 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2752 * @lrbp - local reference block pointer
2753 * @upiu_flags - flags
2756 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2758 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2759 unsigned short cdb_len;
2761 /* command descriptor fields */
2762 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2763 UPIU_TRANSACTION_COMMAND, upiu_flags,
2764 lrbp->lun, lrbp->task_tag);
2765 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2766 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2768 /* Total EHS length and Data segment length will be zero */
2769 ucd_req_ptr->header.dword_2 = 0;
2771 ucd_req_ptr->sc.exp_data_transfer_len =
2772 cpu_to_be32(lrbp->cmd->sdb.length);
2774 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2775 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2776 if (cdb_len < MAX_CDB_SIZE)
2777 memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
2778 (MAX_CDB_SIZE - cdb_len));
2779 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2783 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2786 * @lrbp: local reference block pointer
2787 * @upiu_flags: flags
2789 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2790 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2792 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2793 struct ufs_query *query = &hba->dev_cmd.query;
2794 u16 len = be16_to_cpu(query->request.upiu_req.length);
2795 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2797 /* Query request header */
2798 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2799 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2800 lrbp->lun, lrbp->task_tag);
2801 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2802 0, query->request.query_func, 0, 0);
2804 /* Data segment length */
2805 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
2806 0, 0, len >> 8, (u8)len);
2808 /* Copy the Query Request buffer as is */
2809 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2812 /* Copy the Descriptor */
2813 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2814 memcpy(descp, query->descriptor, len);
2816 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2819 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2821 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2823 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2825 /* command descriptor fields */
2826 ucd_req_ptr->header.dword_0 =
2828 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2829 /* clear rest of the fields of basic header */
2830 ucd_req_ptr->header.dword_1 = 0;
2831 ucd_req_ptr->header.dword_2 = 0;
2833 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2837 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
2838 * @hba - per adapter instance
2839 * @lrb - pointer to local reference block
2841 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2846 switch (lrbp->command_type) {
2847 case UTP_CMD_TYPE_SCSI:
2848 if (likely(lrbp->cmd)) {
2849 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
2850 &upiu_flags, lrbp->cmd->sc_data_direction);
2851 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2856 case UTP_CMD_TYPE_DEV_MANAGE:
2857 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
2859 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2860 ufshcd_prepare_utp_query_req_upiu(
2861 hba, lrbp, upiu_flags);
2862 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2863 ufshcd_prepare_utp_nop_upiu(lrbp);
2867 case UTP_CMD_TYPE_UFS:
2868 /* For UFS native command implementation */
2870 dev_err(hba->dev, "%s: UFS native command are not supported\n",
2875 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
2876 __func__, lrbp->command_type);
2878 } /* end of switch */
2884 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2885 * @scsi_lun: scsi LUN id
2887 * Returns UPIU LUN id
2889 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2891 if (scsi_is_wlun(scsi_lun))
2892 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2895 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2899 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2900 * @scsi_lun: UPIU W-LUN id
2902 * Returns SCSI W-LUN id
2904 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2906 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2910 * ufshcd_get_write_lock - synchronize between shutdown, scaling &
2911 * arrival of requests
2914 * Lock is predominantly held by shutdown context thus, ensuring
2915 * that no requests from any other context may sneak through.
2917 static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
2919 down_write(&hba->lock);
2923 * ufshcd_get_read_lock - synchronize between shutdown, scaling &
2924 * arrival of requests
2927 * Returns 1 if acquired, < 0 on contention
2929 * After shutdown's initiated, allow requests only directed to the
2930 * well known device lun. The sync between scaling & issue is maintained
2931 * as is and this restructuring syncs shutdown with these too.
2933 static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
2937 err = down_read_trylock(&hba->lock);
2940 /* let requests for well known device lun to go through */
2941 if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
2943 else if (!ufshcd_is_shutdown_ongoing(hba))
2953 * ufshcd_put_read_lock - synchronize between shutdown, scaling &
2954 * arrival of requests
2959 static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
2961 up_read(&hba->lock);
2965 * ufshcd_queuecommand - main entry point for SCSI requests
2966 * @cmd: command from SCSI Midlayer
2967 * @done: call back function
2969 * Returns 0 for success, non-zero in case of failure
2971 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2973 struct ufshcd_lrb *lrbp;
2974 struct ufs_hba *hba;
2975 unsigned long flags;
2978 bool has_read_lock = false;
2980 hba = shost_priv(host);
2982 if (!cmd || !cmd->request || !hba)
2985 tag = cmd->request->tag;
2986 if (!ufshcd_valid_tag(hba, tag)) {
2988 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2989 __func__, tag, cmd, cmd->request);
2993 err = ufshcd_get_read_lock(hba, cmd->device->lun);
2994 if (unlikely(err < 0)) {
2995 if (err == -EPERM) {
2996 set_host_byte(cmd, DID_ERROR);
2997 cmd->scsi_done(cmd);
3001 return SCSI_MLQUEUE_HOST_BUSY;
3002 } else if (err == 1) {
3003 has_read_lock = true;
3006 spin_lock_irqsave(hba->host->host_lock, flags);
3008 /* if error handling is in progress, return host busy */
3009 if (ufshcd_eh_in_progress(hba)) {
3010 err = SCSI_MLQUEUE_HOST_BUSY;
3014 switch (hba->ufshcd_state) {
3015 case UFSHCD_STATE_OPERATIONAL:
3017 case UFSHCD_STATE_RESET:
3018 err = SCSI_MLQUEUE_HOST_BUSY;
3020 case UFSHCD_STATE_ERROR:
3021 set_host_byte(cmd, DID_ERROR);
3022 cmd->scsi_done(cmd);
3025 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
3026 __func__, hba->ufshcd_state);
3027 set_host_byte(cmd, DID_BAD_TARGET);
3028 cmd->scsi_done(cmd);
3031 spin_unlock_irqrestore(hba->host->host_lock, flags);
3033 hba->req_abort_count = 0;
3035 /* acquire the tag to make sure device cmds don't use it */
3036 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
3038 * Dev manage command in progress, requeue the command.
3039 * Requeuing the command helps in cases where the request *may*
3040 * find different tag instead of waiting for dev manage command
3043 err = SCSI_MLQUEUE_HOST_BUSY;
3047 hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
3048 err = ufshcd_hold(hba, true);
3050 err = SCSI_MLQUEUE_HOST_BUSY;
3051 clear_bit_unlock(tag, &hba->lrb_in_use);
3054 if (ufshcd_is_clkgating_allowed(hba))
3055 WARN_ON(hba->clk_gating.state != CLKS_ON);
3057 err = ufshcd_hibern8_hold(hba, true);
3059 clear_bit_unlock(tag, &hba->lrb_in_use);
3060 err = SCSI_MLQUEUE_HOST_BUSY;
3061 hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
3062 ufshcd_release(hba, true);
3065 if (ufshcd_is_hibern8_on_idle_allowed(hba))
3066 WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
3068 /* Vote PM QoS for the request */
3069 ufshcd_vops_pm_qos_req_start(hba, cmd->request);
3071 /* IO svc time latency histogram */
3072 if (hba->latency_hist_enabled &&
3073 (cmd->request->cmd_type == REQ_TYPE_FS)) {
3074 cmd->request->lat_hist_io_start = ktime_get();
3075 cmd->request->lat_hist_enabled = 1;
3077 cmd->request->lat_hist_enabled = 0;
3080 WARN_ON(hba->clk_gating.state != CLKS_ON);
3082 lrbp = &hba->lrb[tag];
3086 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
3087 lrbp->sense_buffer = cmd->sense_buffer;
3088 lrbp->task_tag = tag;
3089 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
3090 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
3091 lrbp->command_type = UTP_CMD_TYPE_SCSI;
3092 lrbp->req_abort_skip = false;
3094 /* form UPIU before issuing the command */
3095 err = ufshcd_compose_upiu(hba, lrbp);
3099 "%s: failed to compose upiu %d\n",
3103 clear_bit_unlock(tag, &hba->lrb_in_use);
3104 ufshcd_release_all(hba);
3105 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3109 err = ufshcd_map_sg(lrbp);
3112 clear_bit_unlock(tag, &hba->lrb_in_use);
3113 ufshcd_release_all(hba);
3114 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3118 err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
3122 "%s: failed to configure crypto engine %d\n",
3125 scsi_dma_unmap(lrbp->cmd);
3127 clear_bit_unlock(tag, &hba->lrb_in_use);
3128 ufshcd_release_all(hba);
3129 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3134 /* Make sure descriptors are ready before ringing the doorbell */
3136 /* issue command to the controller */
3137 spin_lock_irqsave(hba->host->host_lock, flags);
3139 err = ufshcd_send_command(hba, tag);
3141 spin_unlock_irqrestore(hba->host->host_lock, flags);
3142 scsi_dma_unmap(lrbp->cmd);
3144 clear_bit_unlock(tag, &hba->lrb_in_use);
3145 ufshcd_release_all(hba);
3146 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3147 ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
3148 dev_err(hba->dev, "%s: failed sending command, %d\n",
3155 spin_unlock_irqrestore(hba->host->host_lock, flags);
3158 ufshcd_put_read_lock(hba);
3162 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3163 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3166 lrbp->sense_bufflen = 0;
3167 lrbp->sense_buffer = NULL;
3168 lrbp->task_tag = tag;
3169 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
3170 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
3171 lrbp->intr_cmd = true; /* No interrupt aggregation */
3172 hba->dev_cmd.type = cmd_type;
3174 return ufshcd_compose_upiu(hba, lrbp);
3178 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
3181 unsigned long flags;
3182 u32 mask = 1 << tag;
3184 /* clear outstanding transaction before retry */
3185 spin_lock_irqsave(hba->host->host_lock, flags);
3186 ufshcd_utrl_clear(hba, tag);
3187 spin_unlock_irqrestore(hba->host->host_lock, flags);
3190 * wait for for h/w to clear corresponding bit in door-bell.
3191 * max. wait is 1 sec.
3193 err = ufshcd_wait_for_register(hba,
3194 REG_UTP_TRANSFER_REQ_DOOR_BELL,
3195 mask, ~mask, 1000, 1000, true);
3201 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3203 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
3205 /* Get the UPIU response */
3206 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
3207 UPIU_RSP_CODE_OFFSET;
3208 return query_res->response;
3212 * ufshcd_dev_cmd_completion() - handles device management command responses
3213 * @hba: per adapter instance
3214 * @lrbp: pointer to local reference block
3217 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3222 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3223 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3226 case UPIU_TRANSACTION_NOP_IN:
3227 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3229 dev_err(hba->dev, "%s: unexpected response %x\n",
3233 case UPIU_TRANSACTION_QUERY_RSP:
3234 err = ufshcd_check_query_response(hba, lrbp);
3236 err = ufshcd_copy_query_response(hba, lrbp);
3238 case UPIU_TRANSACTION_REJECT_UPIU:
3239 /* TODO: handle Reject UPIU Response */
3241 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3246 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3254 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3255 struct ufshcd_lrb *lrbp, int max_timeout)
3258 unsigned long time_left;
3259 unsigned long flags;
3261 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3262 msecs_to_jiffies(max_timeout));
3264 spin_lock_irqsave(hba->host->host_lock, flags);
3265 hba->dev_cmd.complete = NULL;
3266 if (likely(time_left)) {
3267 err = ufshcd_get_tr_ocs(lrbp);
3269 err = ufshcd_dev_cmd_completion(hba, lrbp);
3271 spin_unlock_irqrestore(hba->host->host_lock, flags);
3275 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3276 __func__, lrbp->task_tag);
3277 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
3278 /* successfully cleared the command, retry if needed */
3281 * in case of an error, after clearing the doorbell,
3282 * we also need to clear the outstanding_request
3285 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
3289 ufsdbg_set_err_state(hba);
3295 * ufshcd_get_dev_cmd_tag - Get device management command tag
3296 * @hba: per-adapter instance
3297 * @tag: pointer to variable with available slot value
3299 * Get a free slot and lock it until device management command
3302 * Returns false if free slot is unavailable for locking, else
3303 * return true with tag value in @tag.
3305 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
3315 tmp = ~hba->lrb_in_use;
3316 tag = find_last_bit(&tmp, hba->nutrs);
3317 if (tag >= hba->nutrs)
3319 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
3327 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
3329 clear_bit_unlock(tag, &hba->lrb_in_use);
3333 * ufshcd_exec_dev_cmd - API for sending device management requests
3335 * @cmd_type - specifies the type (NOP, Query...)
3336 * @timeout - time in seconds
3338 * NOTE: Since there is only one available tag for device management commands,
3339 * it is expected you hold the hba->dev_cmd.lock mutex.
3341 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3342 enum dev_cmd_type cmd_type, int timeout)
3344 struct ufshcd_lrb *lrbp;
3347 struct completion wait;
3348 unsigned long flags;
3351 * May get invoked from shutdown and IOCTL contexts.
3352 * In shutdown context, it comes in with lock acquired.
3353 * In error recovery context, it may come with lock acquired.
3356 if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
3357 down_read(&hba->lock);
3360 * Get free slot, sleep if slots are unavailable.
3361 * Even though we use wait_event() which sleeps indefinitely,
3362 * the maximum wait time is bounded by SCSI request timeout.
3364 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
3366 init_completion(&wait);
3367 lrbp = &hba->lrb[tag];
3369 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3373 hba->dev_cmd.complete = &wait;
3375 /* Make sure descriptors are ready before ringing the doorbell */
3377 spin_lock_irqsave(hba->host->host_lock, flags);
3378 err = ufshcd_send_command(hba, tag);
3379 spin_unlock_irqrestore(hba->host->host_lock, flags);
3381 dev_err(hba->dev, "%s: failed sending command, %d\n",
3385 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3388 ufshcd_put_dev_cmd_tag(hba, tag);
3389 wake_up(&hba->dev_cmd.tag_wq);
3390 if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
3391 up_read(&hba->lock);
3396 * ufshcd_init_query() - init the query response and request parameters
3397 * @hba: per-adapter instance
3398 * @request: address of the request pointer to be initialized
3399 * @response: address of the response pointer to be initialized
3400 * @opcode: operation to perform
3401 * @idn: flag idn to access
3402 * @index: LU number to access
3403 * @selector: query/flag/descriptor further identification
3405 static inline void ufshcd_init_query(struct ufs_hba *hba,
3406 struct ufs_query_req **request, struct ufs_query_res **response,
3407 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3409 int idn_t = (int)idn;
3411 ufsdbg_error_inject_dispatcher(hba,
3412 ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
3415 *request = &hba->dev_cmd.query.request;
3416 *response = &hba->dev_cmd.query.response;
3417 memset(*request, 0, sizeof(struct ufs_query_req));
3418 memset(*response, 0, sizeof(struct ufs_query_res));
3419 (*request)->upiu_req.opcode = opcode;
3420 (*request)->upiu_req.idn = idn;
3421 (*request)->upiu_req.index = index;
3422 (*request)->upiu_req.selector = selector;
3424 ufshcd_update_query_stats(hba, opcode, idn);
3427 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3428 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
3433 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3434 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
3437 "%s: failed with error %d, retries %d\n",
3438 __func__, ret, retries);
3445 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3446 __func__, opcode, idn, ret, retries);
3451 * ufshcd_query_flag() - API function for sending flag query requests
3452 * hba: per-adapter instance
3453 * query_opcode: flag query to perform
3454 * idn: flag idn to access
3455 * flag_res: the flag value after the query request completes
3457 * Returns 0 for success, non-zero in case of failure
3459 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3460 enum flag_idn idn, bool *flag_res)
3462 struct ufs_query_req *request = NULL;
3463 struct ufs_query_res *response = NULL;
3464 int err, index = 0, selector = 0;
3465 int timeout = QUERY_REQ_TIMEOUT;
3469 ufshcd_hold_all(hba);
3470 mutex_lock(&hba->dev_cmd.lock);
3471 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3475 case UPIU_QUERY_OPCODE_SET_FLAG:
3476 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3477 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3478 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3480 case UPIU_QUERY_OPCODE_READ_FLAG:
3481 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3483 /* No dummy reads */
3484 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3492 "%s: Expected query flag opcode but got = %d\n",
3498 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3502 "%s: Sending flag query for idn %d failed, err = %d\n",
3503 __func__, request->upiu_req.idn, err);
3508 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3509 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3512 mutex_unlock(&hba->dev_cmd.lock);
3513 ufshcd_release_all(hba);
3516 EXPORT_SYMBOL(ufshcd_query_flag);
3519 * ufshcd_query_attr - API function for sending attribute requests
3520 * hba: per-adapter instance
3521 * opcode: attribute opcode
3522 * idn: attribute idn to access
3523 * index: index field
3524 * selector: selector field
3525 * attr_val: the attribute value after the query request completes
3527 * Returns 0 for success, non-zero in case of failure
3529 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3530 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3532 struct ufs_query_req *request = NULL;
3533 struct ufs_query_res *response = NULL;
3538 ufshcd_hold_all(hba);
3540 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3546 mutex_lock(&hba->dev_cmd.lock);
3547 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3551 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3552 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3553 request->upiu_req.value = cpu_to_be32(*attr_val);
3555 case UPIU_QUERY_OPCODE_READ_ATTR:
3556 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3559 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3565 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3568 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3570 request->upiu_req.idn, index, err);
3574 *attr_val = be32_to_cpu(response->upiu_res.value);
3577 mutex_unlock(&hba->dev_cmd.lock);
3579 ufshcd_release_all(hba);
3582 EXPORT_SYMBOL(ufshcd_query_attr);
3585 * ufshcd_query_attr_retry() - API function for sending query
3586 * attribute with retries
3587 * @hba: per-adapter instance
3588 * @opcode: attribute opcode
3589 * @idn: attribute idn to access
3590 * @index: index field
3591 * @selector: selector field
3592 * @attr_val: the attribute value after the query request
3595 * Returns 0 for success, non-zero in case of failure
3597 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3598 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3604 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3605 ret = ufshcd_query_attr(hba, opcode, idn, index,
3606 selector, attr_val);
3608 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3609 __func__, ret, retries);
3616 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3617 __func__, idn, ret, retries);
3621 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3622 enum query_opcode opcode, enum desc_idn idn, u8 index,
3623 u8 selector, u8 *desc_buf, int *buf_len)
3625 struct ufs_query_req *request = NULL;
3626 struct ufs_query_res *response = NULL;
3631 ufshcd_hold_all(hba);
3633 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3639 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3640 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3641 __func__, *buf_len);
3646 mutex_lock(&hba->dev_cmd.lock);
3647 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3649 hba->dev_cmd.query.descriptor = desc_buf;
3650 request->upiu_req.length = cpu_to_be16(*buf_len);
3653 case UPIU_QUERY_OPCODE_WRITE_DESC:
3654 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3656 case UPIU_QUERY_OPCODE_READ_DESC:
3657 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3661 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3667 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3670 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3672 request->upiu_req.idn, index, err);
3676 *buf_len = be16_to_cpu(response->upiu_res.length);
3679 hba->dev_cmd.query.descriptor = NULL;
3680 mutex_unlock(&hba->dev_cmd.lock);
3682 ufshcd_release_all(hba);
3687 * ufshcd_query_descriptor - API function for sending descriptor requests
3688 * hba: per-adapter instance
3689 * opcode: attribute opcode
3690 * idn: attribute idn to access
3691 * index: index field
3692 * selector: selector field
3693 * desc_buf: the buffer that contains the descriptor
3694 * buf_len: length parameter passed to the device
3696 * Returns 0 for success, non-zero in case of failure.
3697 * The buf_len parameter will contain, on return, the length parameter
3698 * received on the response.
3700 int ufshcd_query_descriptor(struct ufs_hba *hba,
3701 enum query_opcode opcode, enum desc_idn idn, u8 index,
3702 u8 selector, u8 *desc_buf, int *buf_len)
3707 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3708 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3709 selector, desc_buf, buf_len);
3710 if (!err || err == -EINVAL)
3716 EXPORT_SYMBOL(ufshcd_query_descriptor);
3719 * ufshcd_read_desc_param - read the specified descriptor parameter
3720 * @hba: Pointer to adapter instance
3721 * @desc_id: descriptor idn value
3722 * @desc_index: descriptor index
3723 * @param_offset: offset of the parameter to read
3724 * @param_read_buf: pointer to buffer where parameter would be read
3725 * @param_size: sizeof(param_read_buf)
3727 * Return 0 in case of success, non-zero otherwise
3729 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3730 enum desc_idn desc_id,
3739 bool is_kmalloc = true;
3742 if (desc_id >= QUERY_DESC_IDN_MAX)
3745 buff_len = ufs_query_desc_max_size[desc_id];
3746 if ((param_offset + param_size) > buff_len)
3749 if (!param_offset && (param_size == buff_len)) {
3750 /* memory space already available to hold full descriptor */
3751 desc_buf = param_read_buf;
3754 /* allocate memory to hold full descriptor */
3755 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3760 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3761 desc_id, desc_index, 0, desc_buf,
3765 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3766 __func__, desc_id, desc_index, param_offset, ret);
3772 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3773 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3774 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3780 * While reading variable size descriptors (like string descriptor),
3781 * some UFS devices may report the "LENGTH" (field in "Transaction
3782 * Specific fields" of Query Response UPIU) same as what was requested
3783 * in Query Request UPIU instead of reporting the actual size of the
3784 * variable size descriptor.
3785 * Although it's safe to ignore the "LENGTH" field for variable size
3786 * descriptors as we can always derive the length of the descriptor from
3787 * the descriptor header fields. Hence this change impose the length
3788 * match check only for fixed size descriptors (for which we always
3789 * request the correct size as part of Query Request UPIU).
3791 if ((desc_id != QUERY_DESC_IDN_STRING) &&
3792 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3793 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3794 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3800 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3807 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3808 enum desc_idn desc_id,
3813 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3816 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3820 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3823 int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3825 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3829 * ufshcd_read_string_desc - read string descriptor
3830 * @hba: pointer to adapter instance
3831 * @desc_index: descriptor index
3832 * @buf: pointer to buffer where descriptor would be read
3833 * @size: size of buf
3834 * @ascii: if true convert from unicode to ascii characters
3836 * Return 0 in case of success, non-zero otherwise
3838 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
3839 u32 size, bool ascii)
3843 err = ufshcd_read_desc(hba,
3844 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3847 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3848 __func__, QUERY_REQ_RETRIES, err);
3859 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3860 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3861 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3862 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3868 buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
3870 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
3871 __func__, ascii_len);
3877 * the descriptor contains string in UTF16 format
3878 * we need to convert to utf-8 so it can be displayed
3880 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3881 desc_len - QUERY_DESC_HDR_SIZE,
3882 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3884 /* replace non-printable or non-ASCII characters with spaces */
3885 for (i = 0; i < ascii_len; i++)
3886 ufshcd_remove_non_printable(&buff_ascii[i]);
3888 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3889 size - QUERY_DESC_HDR_SIZE);
3890 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3891 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3900 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3901 * @hba: Pointer to adapter instance
3903 * @param_offset: offset of the parameter to read
3904 * @param_read_buf: pointer to buffer where parameter would be read
3905 * @param_size: sizeof(param_read_buf)
3907 * Return 0 in case of success, non-zero otherwise
3909 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3911 enum unit_desc_param param_offset,
3916 * Unit descriptors are only available for general purpose LUs (LUN id
3917 * from 0 to 7) and RPMB Well known LU.
3919 if (!ufs_is_valid_unit_desc_lun(lun))
3922 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3923 param_offset, param_read_buf, param_size);
3927 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3928 * @hba: per adapter instance
3930 * 1. Allocate DMA memory for Command Descriptor array
3931 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3932 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3933 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3935 * 4. Allocate memory for local reference block(lrb).
3937 * Returns 0 for success, non-zero in case of failure
3939 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3941 size_t utmrdl_size, utrdl_size, ucdl_size;
3943 /* Allocate memory for UTP command descriptors */
3944 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3945 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3947 &hba->ucdl_dma_addr,
3951 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3952 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3953 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3954 * be aligned to 128 bytes as well
3956 if (!hba->ucdl_base_addr ||
3957 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3959 "Command Descriptor Memory allocation failed\n");
3964 * Allocate memory for UTP Transfer descriptors
3965 * UFSHCI requires 1024 byte alignment of UTRD
3967 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3968 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3970 &hba->utrdl_dma_addr,
3972 if (!hba->utrdl_base_addr ||
3973 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3975 "Transfer Descriptor Memory allocation failed\n");
3980 * Allocate memory for UTP Task Management descriptors
3981 * UFSHCI requires 1024 byte alignment of UTMRD
3983 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3984 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3986 &hba->utmrdl_dma_addr,
3988 if (!hba->utmrdl_base_addr ||
3989 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3991 "Task Management Descriptor Memory allocation failed\n");
3995 /* Allocate memory for local reference block */
3996 hba->lrb = devm_kzalloc(hba->dev,
3997 hba->nutrs * sizeof(struct ufshcd_lrb),
4000 dev_err(hba->dev, "LRB Memory allocation failed\n");
4009 * ufshcd_host_memory_configure - configure local reference block with
4011 * @hba: per adapter instance
4013 * Configure Host memory space
4014 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
4016 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
4018 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
4019 * into local reference block.
4021 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
4023 struct utp_transfer_cmd_desc *cmd_descp;
4024 struct utp_transfer_req_desc *utrdlp;
4025 dma_addr_t cmd_desc_dma_addr;
4026 dma_addr_t cmd_desc_element_addr;
4027 u16 response_offset;
4032 utrdlp = hba->utrdl_base_addr;
4033 cmd_descp = hba->ucdl_base_addr;
4036 offsetof(struct utp_transfer_cmd_desc, response_upiu);
4038 offsetof(struct utp_transfer_cmd_desc, prd_table);
4040 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
4041 cmd_desc_dma_addr = hba->ucdl_dma_addr;
4043 for (i = 0; i < hba->nutrs; i++) {
4044 /* Configure UTRD with command descriptor base address */
4045 cmd_desc_element_addr =
4046 (cmd_desc_dma_addr + (cmd_desc_size * i));
4047 utrdlp[i].command_desc_base_addr_lo =
4048 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
4049 utrdlp[i].command_desc_base_addr_hi =
4050 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
4052 /* Response upiu and prdt offset should be in double words */
4053 utrdlp[i].response_upiu_offset =
4054 cpu_to_le16((response_offset >> 2));
4055 utrdlp[i].prd_table_offset =
4056 cpu_to_le16((prdt_offset >> 2));
4057 utrdlp[i].response_upiu_length =
4058 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
4060 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
4061 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
4062 (i * sizeof(struct utp_transfer_req_desc));
4063 hba->lrb[i].ucd_req_ptr =
4064 (struct utp_upiu_req *)(cmd_descp + i);
4065 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
4066 hba->lrb[i].ucd_rsp_ptr =
4067 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
4068 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
4070 hba->lrb[i].ucd_prdt_ptr =
4071 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
4072 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
4078 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
4079 * @hba: per adapter instance
4081 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
4082 * in order to initialize the Unipro link startup procedure.
4083 * Once the Unipro links are up, the device connected to the controller
4086 * Returns 0 on success, non-zero value on failure
4088 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4090 struct uic_command uic_cmd = {0};
4093 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
4095 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4098 "dme-link-startup: error code %d\n", ret);
4102 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4104 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4105 unsigned long min_sleep_time_us;
4107 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4111 * last_dme_cmd_tstamp will be 0 only for 1st call to
4114 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4115 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4117 unsigned long delta =
4118 (unsigned long) ktime_to_us(
4119 ktime_sub(ktime_get(),
4120 hba->last_dme_cmd_tstamp));
4122 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4124 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4126 return; /* no more delay required */
4129 /* allow sleep for extra 50us if needed */
4130 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4133 static inline void ufshcd_save_tstamp_of_last_dme_cmd(
4134 struct ufs_hba *hba)
4136 if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
4137 hba->last_dme_cmd_tstamp = ktime_get();
4141 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4142 * @hba: per adapter instance
4143 * @attr_sel: uic command argument1
4144 * @attr_set: attribute set type as uic command argument2
4145 * @mib_val: setting value as uic command argument3
4146 * @peer: indicate whether peer or local
4148 * Returns 0 on success, non-zero value on failure
4150 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4151 u8 attr_set, u32 mib_val, u8 peer)
4153 struct uic_command uic_cmd = {0};
4154 static const char *const action[] = {
4158 const char *set = action[!!peer];
4160 int retries = UFS_UIC_COMMAND_RETRIES;
4162 ufsdbg_error_inject_dispatcher(hba,
4163 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4165 uic_cmd.command = peer ?
4166 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4167 uic_cmd.argument1 = attr_sel;
4168 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4169 uic_cmd.argument3 = mib_val;
4172 /* for peer attributes we retry upon failure */
4173 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4175 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4176 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4177 } while (ret && peer && --retries);
4180 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4181 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4182 UFS_UIC_COMMAND_RETRIES - retries);
4186 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4189 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4190 * @hba: per adapter instance
4191 * @attr_sel: uic command argument1
4192 * @mib_val: the value of the attribute as returned by the UIC command
4193 * @peer: indicate whether peer or local
4195 * Returns 0 on success, non-zero value on failure
4197 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4198 u32 *mib_val, u8 peer)
4200 struct uic_command uic_cmd = {0};
4201 static const char *const action[] = {
4205 const char *get = action[!!peer];
4207 int retries = UFS_UIC_COMMAND_RETRIES;
4208 struct ufs_pa_layer_attr orig_pwr_info;
4209 struct ufs_pa_layer_attr temp_pwr_info;
4210 bool pwr_mode_change = false;
4212 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4213 orig_pwr_info = hba->pwr_info;
4214 temp_pwr_info = orig_pwr_info;
4216 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4217 orig_pwr_info.pwr_rx == FAST_MODE) {
4218 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4219 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4220 pwr_mode_change = true;
4221 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4222 orig_pwr_info.pwr_rx == SLOW_MODE) {
4223 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4224 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4225 pwr_mode_change = true;
4227 if (pwr_mode_change) {
4228 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4234 uic_cmd.command = peer ?
4235 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4237 ufsdbg_error_inject_dispatcher(hba,
4238 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4240 uic_cmd.argument1 = attr_sel;
4243 /* for peer attributes we retry upon failure */
4244 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4246 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4247 get, UIC_GET_ATTR_ID(attr_sel), ret);
4248 } while (ret && peer && --retries);
4251 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4252 get, UIC_GET_ATTR_ID(attr_sel),
4253 UFS_UIC_COMMAND_RETRIES - retries);
4255 if (mib_val && !ret)
4256 *mib_val = uic_cmd.argument3;
4258 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4260 ufshcd_change_power_mode(hba, &orig_pwr_info);
4264 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4267 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4268 * state) and waits for it to take effect.
4270 * @hba: per adapter instance
4271 * @cmd: UIC command to execute
4273 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4274 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4275 * and device UniPro link and hence it's final completion would be indicated by
4276 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4277 * addition to normal UIC command completion Status (UCCS). This function only
4278 * returns after the relevant status bits indicate the completion.
4280 * Returns 0 on success, non-zero value on failure
4282 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4284 struct completion uic_async_done;
4285 unsigned long flags;
4288 bool reenable_intr = false;
4290 mutex_lock(&hba->uic_cmd_mutex);
4291 init_completion(&uic_async_done);
4292 ufshcd_add_delay_before_dme_cmd(hba);
4294 spin_lock_irqsave(hba->host->host_lock, flags);
4295 hba->uic_async_done = &uic_async_done;
4296 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4297 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4299 * Make sure UIC command completion interrupt is disabled before
4300 * issuing UIC command.
4303 reenable_intr = true;
4305 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4306 spin_unlock_irqrestore(hba->host->host_lock, flags);
4309 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4310 cmd->command, cmd->argument3, ret);
4314 if (!wait_for_completion_timeout(hba->uic_async_done,
4315 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4317 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4318 cmd->command, cmd->argument3);
4323 status = ufshcd_get_upmcrs(hba);
4324 if (status != PWR_LOCAL) {
4326 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
4327 cmd->command, status);
4328 ret = (status != PWR_OK) ? status : -1;
4330 ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
4334 ufsdbg_set_err_state(hba);
4335 ufshcd_print_host_state(hba);
4336 ufshcd_print_pwr_info(hba);
4337 ufshcd_print_host_regs(hba);
4338 ufshcd_print_cmd_log(hba);
4341 ufshcd_save_tstamp_of_last_dme_cmd(hba);
4342 spin_lock_irqsave(hba->host->host_lock, flags);
4343 hba->active_uic_cmd = NULL;
4344 hba->uic_async_done = NULL;
4346 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4347 spin_unlock_irqrestore(hba->host->host_lock, flags);
4348 mutex_unlock(&hba->uic_cmd_mutex);
4352 int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
4354 unsigned long flags;
4358 bool timeout = false, do_last_check = false;
4361 ufshcd_hold_all(hba);
4362 spin_lock_irqsave(hba->host->host_lock, flags);
4364 * Wait for all the outstanding tasks/transfer requests.
4365 * Verify by checking the doorbell registers are clear.
4367 start = ktime_get();
4369 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
4374 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
4375 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4376 if (!tm_doorbell && !tr_doorbell) {
4379 } else if (do_last_check) {
4383 spin_unlock_irqrestore(hba->host->host_lock, flags);
4385 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
4389 * We might have scheduled out for long time so make
4390 * sure to check if doorbells are cleared by this time
4393 do_last_check = true;
4395 spin_lock_irqsave(hba->host->host_lock, flags);
4396 } while (tm_doorbell || tr_doorbell);
4400 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
4401 __func__, tm_doorbell, tr_doorbell);
4405 spin_unlock_irqrestore(hba->host->host_lock, flags);
4406 ufshcd_release_all(hba);
4411 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4412 * using DME_SET primitives.
4413 * @hba: per adapter instance
4414 * @mode: powr mode value
4416 * Returns 0 on success, non-zero value on failure
4418 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4420 struct uic_command uic_cmd = {0};
4423 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4424 ret = ufshcd_dme_set(hba,
4425 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4427 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4433 uic_cmd.command = UIC_CMD_DME_SET;
4434 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4435 uic_cmd.argument3 = mode;
4436 hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
4437 ufshcd_hold_all(hba);
4438 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4439 hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
4440 ufshcd_release_all(hba);
4445 static int ufshcd_link_recovery(struct ufs_hba *hba)
4448 unsigned long flags;
4451 * Check if there is any race with fatal error handling.
4452 * If so, wait for it to complete. Even though fatal error
4453 * handling does reset and restore in some cases, don't assume
4454 * anything out of it. We are just avoiding race here.
4457 spin_lock_irqsave(hba->host->host_lock, flags);
4458 if (!(work_pending(&hba->eh_work) ||
4459 hba->ufshcd_state == UFSHCD_STATE_RESET))
4461 spin_unlock_irqrestore(hba->host->host_lock, flags);
4462 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4463 flush_work(&hba->eh_work);
4468 * we don't know if previous reset had really reset the host controller
4469 * or not. So let's force reset here to be sure.
4471 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4472 hba->force_host_reset = true;
4473 schedule_work(&hba->eh_work);
4475 /* wait for the reset work to finish */
4477 if (!(work_pending(&hba->eh_work) ||
4478 hba->ufshcd_state == UFSHCD_STATE_RESET))
4480 spin_unlock_irqrestore(hba->host->host_lock, flags);
4481 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4482 flush_work(&hba->eh_work);
4483 spin_lock_irqsave(hba->host->host_lock, flags);
4486 if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
4487 ufshcd_is_link_active(hba)))
4489 spin_unlock_irqrestore(hba->host->host_lock, flags);
4494 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4497 struct uic_command uic_cmd = {0};
4498 ktime_t start = ktime_get();
4500 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4501 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4502 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4503 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4506 * Do full reinit if enter failed or if LINERESET was detected during
4507 * Hibern8 operation. After LINERESET, link moves to default PWM-G1
4508 * mode hence full reinit is required to move link to HS speeds.
4510 if (ret || hba->full_init_linereset) {
4513 hba->full_init_linereset = false;
4514 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
4515 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
4518 * If link recovery fails then return error code (-ENOLINK)
4519 * returned ufshcd_link_recovery().
4520 * If link recovery succeeds then return -EAGAIN to attempt
4521 * hibern8 enter retry again.
4523 err = ufshcd_link_recovery(hba);
4525 dev_err(hba->dev, "%s: link recovery failed", __func__);
4531 dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
4532 ktime_to_us(ktime_get()));
4538 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4540 int ret = 0, retries;
4542 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
4543 ret = __ufshcd_uic_hibern8_enter(hba);
4546 else if (ret != -EAGAIN)
4547 /* Unable to recover the link, so no point proceeding */
4554 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4556 struct uic_command uic_cmd = {0};
4558 ktime_t start = ktime_get();
4560 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4561 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4562 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4563 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4565 /* Do full reinit if exit failed */
4567 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
4568 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
4570 ret = ufshcd_link_recovery(hba);
4571 /* Unable to recover the link, so no point proceeding */
4575 dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
4576 ktime_to_us(ktime_get()));
4577 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4578 hba->ufs_stats.hibern8_exit_cnt++;
4585 * ufshcd_init_pwr_info - setting the POR (power on reset)
4586 * values in hba power info
4587 * @hba: per-adapter instance
4589 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4591 hba->pwr_info.gear_rx = UFS_PWM_G1;
4592 hba->pwr_info.gear_tx = UFS_PWM_G1;
4593 hba->pwr_info.lane_rx = 1;
4594 hba->pwr_info.lane_tx = 1;
4595 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4596 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4597 hba->pwr_info.hs_rate = 0;
4601 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4602 * @hba: per-adapter instance
4604 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4606 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4608 if (hba->max_pwr_info.is_valid)
4611 pwr_info->pwr_tx = FAST_MODE;
4612 pwr_info->pwr_rx = FAST_MODE;
4613 pwr_info->hs_rate = PA_HS_MODE_B;
4615 /* Get the connected lane count */
4616 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4617 &pwr_info->lane_rx);
4618 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4619 &pwr_info->lane_tx);
4621 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4622 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4630 * First, get the maximum gears of HS speed.
4631 * If a zero value, it means there is no HSGEAR capability.
4632 * Then, get the maximum gears of PWM speed.
4634 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4635 if (!pwr_info->gear_rx) {
4636 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4637 &pwr_info->gear_rx);
4638 if (!pwr_info->gear_rx) {
4639 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4640 __func__, pwr_info->gear_rx);
4643 pwr_info->pwr_rx = SLOW_MODE;
4646 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4647 &pwr_info->gear_tx);
4648 if (!pwr_info->gear_tx) {
4649 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4650 &pwr_info->gear_tx);
4651 if (!pwr_info->gear_tx) {
4652 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4653 __func__, pwr_info->gear_tx);
4656 pwr_info->pwr_tx = SLOW_MODE;
4659 hba->max_pwr_info.is_valid = true;
4663 int ufshcd_change_power_mode(struct ufs_hba *hba,
4664 struct ufs_pa_layer_attr *pwr_mode)
4668 /* if already configured to the requested pwr_mode */
4669 if (!hba->restore_needed &&
4670 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4671 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4672 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4673 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4674 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4675 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4676 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4677 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4681 ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
4686 * Configure attributes for power mode change with below.
4687 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4688 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4691 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4692 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4694 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4695 pwr_mode->pwr_rx == FAST_MODE)
4696 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4698 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4700 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4701 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4703 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4704 pwr_mode->pwr_tx == FAST_MODE)
4705 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4707 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4709 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4710 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4711 pwr_mode->pwr_rx == FAST_MODE ||
4712 pwr_mode->pwr_tx == FAST_MODE)
4713 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4716 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4717 DL_FC0ProtectionTimeOutVal_Default);
4718 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4719 DL_TC0ReplayTimeOutVal_Default);
4720 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4721 DL_AFC0ReqTimeOutVal_Default);
4723 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4724 DL_FC0ProtectionTimeOutVal_Default);
4725 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4726 DL_TC0ReplayTimeOutVal_Default);
4727 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4728 DL_AFC0ReqTimeOutVal_Default);
4730 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4731 | pwr_mode->pwr_tx);
4734 ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
4736 "%s: power mode change failed %d\n", __func__, ret);
4738 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4741 memcpy(&hba->pwr_info, pwr_mode,
4742 sizeof(struct ufs_pa_layer_attr));
4743 hba->ufs_stats.power_mode_change_cnt++;
4750 * ufshcd_config_pwr_mode - configure a new power mode
4751 * @hba: per-adapter instance
4752 * @desired_pwr_mode: desired power configuration
4754 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4755 struct ufs_pa_layer_attr *desired_pwr_mode)
4757 struct ufs_pa_layer_attr final_params = { 0 };
4760 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4761 desired_pwr_mode, &final_params);
4764 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4766 ret = ufshcd_change_power_mode(hba, &final_params);
4768 ufshcd_print_pwr_info(hba);
4774 * ufshcd_complete_dev_init() - checks device readiness
4775 * hba: per-adapter instance
4777 * Set fDeviceInit flag and poll until device toggles it.
4779 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4785 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4786 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4789 "%s setting fDeviceInit flag failed with error %d\n",
4794 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4795 for (i = 0; i < 1000 && !err && flag_res; i++)
4796 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4797 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4801 "%s reading fDeviceInit flag failed with error %d\n",
4805 "%s fDeviceInit was not cleared by the device\n",
4813 * ufshcd_make_hba_operational - Make UFS controller operational
4814 * @hba: per adapter instance
4816 * To bring UFS host controller to operational state,
4817 * 1. Enable required interrupts
4818 * 2. Configure interrupt aggregation
4819 * 3. Program UTRL and UTMRL base address
4820 * 4. Configure run-stop-registers
4822 * Returns 0 on success, non-zero value on failure
4824 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4829 /* Enable required interrupts */
4830 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4832 /* Configure interrupt aggregation */
4833 if (ufshcd_is_intr_aggr_allowed(hba))
4834 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4836 ufshcd_disable_intr_aggr(hba);
4838 /* Configure UTRL and UTMRL base address registers */
4839 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4840 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4841 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4842 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4843 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4844 REG_UTP_TASK_REQ_LIST_BASE_L);
4845 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4846 REG_UTP_TASK_REQ_LIST_BASE_H);
4849 * Make sure base address and interrupt setup are updated before
4850 * enabling the run/stop registers below.
4855 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4857 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4858 if (!(ufshcd_get_lists_status(reg))) {
4859 ufshcd_enable_run_stop_reg(hba);
4862 "Host controller not ready to process requests");
4872 * ufshcd_hba_stop - Send controller to reset state
4873 * @hba: per adapter instance
4874 * @can_sleep: perform sleep or just spin
4876 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4880 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4881 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4882 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4885 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4889 * ufshcd_hba_enable - initialize the controller
4890 * @hba: per adapter instance
4892 * The controller resets itself and controller firmware initialization
4893 * sequence kicks off. When controller is ready it will set
4894 * the Host Controller Enable bit to 1.
4896 * Returns 0 on success, non-zero value on failure
4898 static int ufshcd_hba_enable(struct ufs_hba *hba)
4903 * msleep of 1 and 5 used in this function might result in msleep(20),
4904 * but it was necessary to send the UFS FPGA to reset mode during
4905 * development and testing of this driver. msleep can be changed to
4906 * mdelay and retry count can be reduced based on the controller.
4908 if (!ufshcd_is_hba_active(hba))
4909 /* change controller state to "reset state" */
4910 ufshcd_hba_stop(hba, true);
4912 /* UniPro link is disabled at this point */
4913 ufshcd_set_link_off(hba);
4915 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4917 /* start controller initialization sequence */
4918 ufshcd_hba_start(hba);
4921 * To initialize a UFS host controller HCE bit must be set to 1.
4922 * During initialization the HCE bit value changes from 1->0->1.
4923 * When the host controller completes initialization sequence
4924 * it sets the value of HCE bit to 1. The same HCE bit is read back
4925 * to check if the controller has completed initialization sequence.
4926 * So without this delay the value HCE = 1, set in the previous
4927 * instruction might be read back.
4928 * This delay can be changed based on the controller.
4932 /* wait for the host controller to complete initialization */
4934 while (ufshcd_is_hba_active(hba)) {
4939 "Controller enable failed\n");
4945 /* enable UIC related interrupts */
4946 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4948 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4953 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4955 int tx_lanes, i, err = 0;
4958 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4961 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4963 for (i = 0; i < tx_lanes; i++) {
4965 err = ufshcd_dme_set(hba,
4966 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4967 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4970 err = ufshcd_dme_peer_set(hba,
4971 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4972 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4975 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4976 __func__, peer, i, err);
4984 static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
4986 return ufshcd_disable_tx_lcc(hba, false);
4989 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4991 return ufshcd_disable_tx_lcc(hba, true);
4995 * ufshcd_link_startup - Initialize unipro link startup
4996 * @hba: per adapter instance
4998 * Returns 0 for success, non-zero in case of failure
5000 static int ufshcd_link_startup(struct ufs_hba *hba)
5003 int retries = DME_LINKSTARTUP_RETRIES;
5004 bool link_startup_again = false;
5007 * If UFS device isn't active then we will have to issue link startup
5008 * 2 times to make sure the device state move to active.
5010 if (!ufshcd_is_ufs_dev_active(hba))
5011 link_startup_again = true;
5015 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
5017 ret = ufshcd_dme_link_startup(hba);
5019 ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
5021 /* check if device is detected by inter-connect layer */
5022 if (!ret && !ufshcd_is_device_present(hba)) {
5023 ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
5024 dev_err(hba->dev, "%s: Device not present\n", __func__);
5030 * DME link lost indication is only received when link is up,
5031 * but we can't be sure if the link is up until link startup
5032 * succeeds. So reset the local Uni-Pro and try again.
5034 if (ret && ufshcd_hba_enable(hba))
5036 } while (ret && retries--);
5039 /* failed to get the link up... retire */
5042 if (link_startup_again) {
5043 link_startup_again = false;
5044 retries = DME_LINKSTARTUP_RETRIES;
5048 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5049 ufshcd_init_pwr_info(hba);
5050 ufshcd_print_pwr_info(hba);
5052 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5053 ret = ufshcd_disable_device_tx_lcc(hba);
5058 if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
5059 ret = ufshcd_disable_host_tx_lcc(hba);
5064 /* Include any host controller configuration via UIC commands */
5065 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5069 ret = ufshcd_make_hba_operational(hba);
5072 dev_err(hba->dev, "link startup failed %d\n", ret);
5073 ufshcd_print_host_state(hba);
5074 ufshcd_print_pwr_info(hba);
5075 ufshcd_print_host_regs(hba);
5081 * ufshcd_verify_dev_init() - Verify device initialization
5082 * @hba: per-adapter instance
5084 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5085 * device Transport Protocol (UTP) layer is ready after a reset.
5086 * If the UTP layer at the device side is not initialized, it may
5087 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5088 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5090 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5095 ufshcd_hold_all(hba);
5096 mutex_lock(&hba->dev_cmd.lock);
5097 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5098 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5101 if (!err || err == -ETIMEDOUT)
5104 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5106 mutex_unlock(&hba->dev_cmd.lock);
5107 ufshcd_release_all(hba);
5110 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5115 * ufshcd_set_queue_depth - set lun queue depth
5116 * @sdev: pointer to SCSI device
5118 * Read bLUQueueDepth value and activate scsi tagged command
5119 * queueing. For WLUN, queue depth is set to 1. For best-effort
5120 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
5121 * value that host can queue.
5123 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
5127 struct ufs_hba *hba;
5129 hba = shost_priv(sdev->host);
5131 lun_qdepth = hba->nutrs;
5132 ret = ufshcd_read_unit_desc_param(hba,
5133 ufshcd_scsi_to_upiu_lun(sdev->lun),
5134 UNIT_DESC_PARAM_LU_Q_DEPTH,
5136 sizeof(lun_qdepth));
5138 /* Some WLUN doesn't support unit descriptor */
5139 if (ret == -EOPNOTSUPP)
5141 else if (!lun_qdepth)
5142 /* eventually, we can figure out the real queue depth */
5143 lun_qdepth = hba->nutrs;
5145 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
5147 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
5148 __func__, lun_qdepth);
5149 scsi_change_queue_depth(sdev, lun_qdepth);
5153 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
5154 * @hba: per-adapter instance
5155 * @lun: UFS device lun id
5156 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
5158 * Returns 0 in case of success and b_lu_write_protect status would be returned
5159 * @b_lu_write_protect parameter.
5160 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
5161 * Returns -EINVAL in case of invalid parameters passed to this function.
5163 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
5165 u8 *b_lu_write_protect)
5169 if (!b_lu_write_protect)
5172 * According to UFS device spec, RPMB LU can't be write
5173 * protected so skip reading bLUWriteProtect parameter for
5174 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
5176 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
5179 ret = ufshcd_read_unit_desc_param(hba,
5181 UNIT_DESC_PARAM_LU_WR_PROTECT,
5183 sizeof(*b_lu_write_protect));
5188 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
5190 * @hba: per-adapter instance
5191 * @sdev: pointer to SCSI device
5194 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
5195 struct scsi_device *sdev)
5197 if (hba->dev_info.f_power_on_wp_en &&
5198 !hba->dev_info.is_lu_power_on_wp) {
5199 u8 b_lu_write_protect;
5201 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
5202 &b_lu_write_protect) &&
5203 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
5204 hba->dev_info.is_lu_power_on_wp = true;
5209 * ufshcd_slave_alloc - handle initial SCSI device configurations
5210 * @sdev: pointer to SCSI device
5214 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5216 struct ufs_hba *hba;
5218 hba = shost_priv(sdev->host);
5220 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5221 sdev->use_10_for_ms = 1;
5223 /* allow SCSI layer to restart the device in case of errors */
5224 sdev->allow_restart = 1;
5226 /* REPORT SUPPORTED OPERATION CODES is not supported */
5227 sdev->no_report_opcodes = 1;
5229 /* WRITE_SAME command is not supported */
5230 sdev->no_write_same = 1;
5232 ufshcd_set_queue_depth(sdev);
5234 ufshcd_get_lu_power_on_wp_status(hba, sdev);
5240 * ufshcd_change_queue_depth - change queue depth
5241 * @sdev: pointer to SCSI device
5242 * @depth: required depth to set
5244 * Change queue depth and make sure the max. limits are not crossed.
5246 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5248 struct ufs_hba *hba = shost_priv(sdev->host);
5250 if (depth > hba->nutrs)
5252 return scsi_change_queue_depth(sdev, depth);
5256 * ufshcd_slave_configure - adjust SCSI device configurations
5257 * @sdev: pointer to SCSI device
5259 static int ufshcd_slave_configure(struct scsi_device *sdev)
5261 struct request_queue *q = sdev->request_queue;
5263 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5264 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
5266 sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
5267 sdev->use_rpm_auto = 1;
5273 * ufshcd_slave_destroy - remove SCSI device configurations
5274 * @sdev: pointer to SCSI device
5276 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5278 struct ufs_hba *hba;
5280 hba = shost_priv(sdev->host);
5281 /* Drop the reference as it won't be needed anymore */
5282 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5283 unsigned long flags;
5285 spin_lock_irqsave(hba->host->host_lock, flags);
5286 hba->sdev_ufs_device = NULL;
5287 spin_unlock_irqrestore(hba->host->host_lock, flags);
5292 * ufshcd_task_req_compl - handle task management request completion
5293 * @hba: per adapter instance
5294 * @index: index of the completed request
5295 * @resp: task management service response
5297 * Returns non-zero value on error, zero on success
5299 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
5301 struct utp_task_req_desc *task_req_descp;
5302 struct utp_upiu_task_rsp *task_rsp_upiup;
5303 unsigned long flags;
5307 spin_lock_irqsave(hba->host->host_lock, flags);
5309 /* Clear completed tasks from outstanding_tasks */
5310 __clear_bit(index, &hba->outstanding_tasks);
5312 task_req_descp = hba->utmrdl_base_addr;
5313 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
5315 if (ocs_value == OCS_SUCCESS) {
5316 task_rsp_upiup = (struct utp_upiu_task_rsp *)
5317 task_req_descp[index].task_rsp_upiu;
5318 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
5319 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
5321 *resp = (u8)task_result;
5323 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5324 __func__, ocs_value);
5326 spin_unlock_irqrestore(hba->host->host_lock, flags);
5332 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5333 * @lrb: pointer to local reference block of completed command
5334 * @scsi_status: SCSI command status
5336 * Returns value base on SCSI command status
5339 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5343 switch (scsi_status) {
5344 case SAM_STAT_CHECK_CONDITION:
5345 ufshcd_copy_sense_data(lrbp);
5347 result |= DID_OK << 16 |
5348 COMMAND_COMPLETE << 8 |
5351 case SAM_STAT_TASK_SET_FULL:
5353 case SAM_STAT_TASK_ABORTED:
5354 ufshcd_copy_sense_data(lrbp);
5355 result |= scsi_status;
5358 result |= DID_ERROR << 16;
5360 } /* end of switch */
5366 * ufshcd_transfer_rsp_status - Get overall status of the response
5367 * @hba: per adapter instance
5368 * @lrb: pointer to local reference block of completed command
5370 * Returns result of the command to notify SCSI midlayer
5373 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5380 /* overall command status of utrd */
5381 ocs = ufshcd_get_tr_ocs(lrbp);
5385 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5386 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5388 case UPIU_TRANSACTION_RESPONSE:
5390 * get the response UPIU result to extract
5391 * the SCSI command status
5393 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5396 * get the result based on SCSI status response
5397 * to notify the SCSI midlayer of the command status
5399 scsi_status = result & MASK_SCSI_STATUS;
5400 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5403 * Currently we are only supporting BKOPs exception
5404 * events hence we can ignore BKOPs exception event
5405 * during power management callbacks. BKOPs exception
5406 * event is not expected to be raised in runtime suspend
5407 * callback as it allows the urgent bkops.
5408 * During system suspend, we are anyway forcefully
5409 * disabling the bkops and if urgent bkops is needed
5410 * it will be enabled on system resume. Long term
5411 * solution could be to abort the system suspend if
5412 * UFS device needs urgent BKOPs.
5414 if (!hba->pm_op_in_progress &&
5415 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
5417 * Prevent suspend once eeh_work is scheduled
5418 * to avoid deadlock between ufshcd_suspend
5419 * and exception event handler.
5421 if (schedule_work(&hba->eeh_work))
5422 pm_runtime_get_noresume(hba->dev);
5425 case UPIU_TRANSACTION_REJECT_UPIU:
5426 /* TODO: handle Reject UPIU Response */
5427 result = DID_ERROR << 16;
5429 "Reject UPIU not fully implemented\n");
5432 result = DID_ERROR << 16;
5434 "Unexpected request response code = %x\n",
5440 result |= DID_ABORT << 16;
5442 case OCS_INVALID_COMMAND_STATUS:
5443 result |= DID_REQUEUE << 16;
5445 case OCS_INVALID_CMD_TABLE_ATTR:
5446 case OCS_INVALID_PRDT_ATTR:
5447 case OCS_MISMATCH_DATA_BUF_SIZE:
5448 case OCS_MISMATCH_RESP_UPIU_SIZE:
5449 case OCS_PEER_COMM_FAILURE:
5450 case OCS_FATAL_ERROR:
5451 case OCS_DEVICE_FATAL_ERROR:
5452 case OCS_INVALID_CRYPTO_CONFIG:
5453 case OCS_GENERAL_CRYPTO_ERROR:
5455 result |= DID_ERROR << 16;
5457 "OCS error from controller = %x for tag %d\n",
5458 ocs, lrbp->task_tag);
5460 * This is called in interrupt context, hence avoid sleep
5461 * while printing debug registers. Also print only the minimum
5462 * debug registers needed to debug OCS failure.
5464 __ufshcd_print_host_regs(hba, true);
5465 ufshcd_print_host_state(hba);
5467 } /* end of switch */
5469 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
5470 print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
5471 ocs == OCS_MISMATCH_DATA_BUF_SIZE);
5472 ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
5475 if ((host_byte(result) == DID_ERROR) ||
5476 (host_byte(result) == DID_ABORT))
5477 ufsdbg_set_err_state(hba);
5483 * ufshcd_uic_cmd_compl - handle completion of uic command
5484 * @hba: per adapter instance
5485 * @intr_status: interrupt status generated by the controller
5488 * IRQ_HANDLED - If interrupt is valid
5489 * IRQ_NONE - If invalid interrupt
5491 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5493 irqreturn_t retval = IRQ_NONE;
5495 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5496 hba->active_uic_cmd->argument2 |=
5497 ufshcd_get_uic_cmd_result(hba);
5498 hba->active_uic_cmd->argument3 =
5499 ufshcd_get_dme_attr_val(hba);
5500 complete(&hba->active_uic_cmd->done);
5501 retval = IRQ_HANDLED;
5504 if (intr_status & UFSHCD_UIC_PWR_MASK) {
5505 if (hba->uic_async_done) {
5506 complete(hba->uic_async_done);
5507 retval = IRQ_HANDLED;
5508 } else if (ufshcd_is_auto_hibern8_supported(hba)) {
5510 * If uic_async_done flag is not set then this
5511 * is an Auto hibern8 err interrupt.
5512 * Perform a host reset followed by a full
5515 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5516 hba->force_host_reset = true;
5517 dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5518 __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
5520 intr_status, ufshcd_get_upmcrs(hba));
5521 __ufshcd_print_host_regs(hba, true);
5522 ufshcd_print_host_state(hba);
5523 schedule_work(&hba->eh_work);
5524 retval = IRQ_HANDLED;
5531 * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
5532 * @hba: per adapter instance
5533 * @result: error result to inform scsi layer about
5535 void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
5538 struct ufshcd_lrb *lrbp;
5539 struct scsi_cmnd *cmd;
5541 if (!hba->outstanding_reqs)
5544 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5545 lrbp = &hba->lrb[index];
5548 ufshcd_cond_add_cmd_trace(hba, index, "failed");
5549 ufshcd_update_error_stats(hba,
5550 UFS_ERR_INT_FATAL_ERRORS);
5551 scsi_dma_unmap(cmd);
5552 cmd->result = result;
5553 /* Clear pending transfer requests */
5554 ufshcd_clear_cmd(hba, index);
5555 ufshcd_outstanding_req_clear(hba, index);
5556 clear_bit_unlock(index, &hba->lrb_in_use);
5557 lrbp->complete_time_stamp = ktime_get();
5558 update_req_stats(hba, lrbp);
5559 /* Mark completed command as NULL in LRB */
5561 ufshcd_release_all(hba);
5564 * As we are accessing the "request" structure,
5565 * this must be called before calling
5566 * ->scsi_done() callback.
5568 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5570 ufshcd_vops_crypto_engine_cfg_end(hba,
5571 lrbp, cmd->request);
5573 /* Do not touch lrbp after scsi done */
5574 cmd->scsi_done(cmd);
5575 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5576 if (hba->dev_cmd.complete) {
5577 ufshcd_cond_add_cmd_trace(hba, index,
5579 ufshcd_outstanding_req_clear(hba, index);
5580 complete(hba->dev_cmd.complete);
5583 if (ufshcd_is_clkscaling_supported(hba))
5584 hba->clk_scaling.active_reqs--;
5589 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5590 * @hba: per adapter instance
5591 * @completed_reqs: requests to complete
5593 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5594 unsigned long completed_reqs)
5596 struct ufshcd_lrb *lrbp;
5597 struct scsi_cmnd *cmd;
5600 struct request *req;
5602 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5603 lrbp = &hba->lrb[index];
5606 ufshcd_cond_add_cmd_trace(hba, index, "complete");
5607 ufshcd_update_tag_stats_completion(hba, cmd);
5608 result = ufshcd_transfer_rsp_status(hba, lrbp);
5609 scsi_dma_unmap(cmd);
5610 cmd->result = result;
5611 clear_bit_unlock(index, &hba->lrb_in_use);
5612 lrbp->complete_time_stamp = ktime_get();
5613 update_req_stats(hba, lrbp);
5614 /* Mark completed command as NULL in LRB */
5616 hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
5617 __ufshcd_release(hba, false);
5618 __ufshcd_hibern8_release(hba, false);
5621 * As we are accessing the "request" structure,
5622 * this must be called before calling
5623 * ->scsi_done() callback.
5625 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5627 ufshcd_vops_crypto_engine_cfg_end(hba,
5628 lrbp, cmd->request);
5633 /* Update IO svc time latency histogram */
5634 if (req->lat_hist_enabled) {
5638 completion = ktime_get();
5639 delta_us = ktime_us_delta(completion,
5640 req->lat_hist_io_start);
5641 blk_update_latency_hist(
5642 (rq_data_dir(req) == READ) ?
5644 &hba->io_lat_write, delta_us);
5647 /* Do not touch lrbp after scsi done */
5648 cmd->scsi_done(cmd);
5649 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5650 if (hba->dev_cmd.complete) {
5651 ufshcd_cond_add_cmd_trace(hba, index,
5653 complete(hba->dev_cmd.complete);
5656 if (ufshcd_is_clkscaling_supported(hba))
5657 hba->clk_scaling.active_reqs--;
5660 /* clear corresponding bits of completed commands */
5661 hba->outstanding_reqs ^= completed_reqs;
5663 ufshcd_clk_scaling_update_busy(hba);
5665 /* we might have free'd some tags above */
5666 wake_up(&hba->dev_cmd.tag_wq);
5670 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5671 * @hba: per adapter instance
5674 * IRQ_HANDLED - If interrupt is valid
5675 * IRQ_NONE - If invalid interrupt
5677 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5679 unsigned long completed_reqs;
5682 /* Resetting interrupt aggregation counters first and reading the
5683 * DOOR_BELL afterward allows us to handle all the completed requests.
5684 * In order to prevent other interrupts starvation the DB is read once
5685 * after reset. The down side of this solution is the possibility of
5686 * false interrupt if device completes another request after resetting
5687 * aggregation and before reading the DB.
5689 if (ufshcd_is_intr_aggr_allowed(hba))
5690 ufshcd_reset_intr_aggr(hba);
5692 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5693 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5695 if (completed_reqs) {
5696 __ufshcd_transfer_req_compl(hba, completed_reqs);
5704 * ufshcd_disable_ee - disable exception event
5705 * @hba: per-adapter instance
5706 * @mask: exception event to disable
5708 * Disables exception event in the device so that the EVENT_ALERT
5711 * Returns zero on success, non-zero error value on failure.
5713 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5718 if (!(hba->ee_ctrl_mask & mask))
5721 val = hba->ee_ctrl_mask & ~mask;
5722 val &= 0xFFFF; /* 2 bytes */
5723 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5724 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5726 hba->ee_ctrl_mask &= ~mask;
5732 * ufshcd_enable_ee - enable exception event
5733 * @hba: per-adapter instance
5734 * @mask: exception event to enable
5736 * Enable corresponding exception event in the device to allow
5737 * device to alert host in critical scenarios.
5739 * Returns zero on success, non-zero error value on failure.
5741 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5746 if (hba->ee_ctrl_mask & mask)
5749 val = hba->ee_ctrl_mask | mask;
5750 val &= 0xFFFF; /* 2 bytes */
5751 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5752 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5754 hba->ee_ctrl_mask |= mask;
5760 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5761 * @hba: per-adapter instance
5763 * Allow device to manage background operations on its own. Enabling
5764 * this might lead to inconsistent latencies during normal data transfers
5765 * as the device is allowed to manage its own way of handling background
5768 * Returns zero on success, non-zero on failure.
5770 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5774 if (hba->auto_bkops_enabled)
5777 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5778 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5780 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5785 hba->auto_bkops_enabled = true;
5786 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
5788 /* No need of URGENT_BKOPS exception from the device */
5789 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5791 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5798 * ufshcd_disable_auto_bkops - block device in doing background operations
5799 * @hba: per-adapter instance
5801 * Disabling background operations improves command response latency but
5802 * has drawback of device moving into critical state where the device is
5803 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5804 * host is idle so that BKOPS are managed effectively without any negative
5807 * Returns zero on success, non-zero on failure.
5809 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5813 if (!hba->auto_bkops_enabled)
5817 * If host assisted BKOPs is to be enabled, make sure
5818 * urgent bkops exception is allowed.
5820 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5822 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5827 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5828 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5830 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5832 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5836 hba->auto_bkops_enabled = false;
5837 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
5843 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5844 * @hba: per adapter instance
5846 * After a device reset the device may toggle the BKOPS_EN flag
5847 * to default value. The s/w tracking variables should be updated
5848 * as well. This function would change the auto-bkops state based on
5849 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5851 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5853 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5854 hba->auto_bkops_enabled = false;
5855 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5856 ufshcd_enable_auto_bkops(hba);
5858 hba->auto_bkops_enabled = true;
5859 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5860 ufshcd_disable_auto_bkops(hba);
5864 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5866 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5867 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5871 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5872 * @hba: per-adapter instance
5873 * @status: bkops_status value
5875 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5876 * flag in the device to permit background operations if the device
5877 * bkops_status is greater than or equal to "status" argument passed to
5878 * this function, disable otherwise.
5880 * Returns 0 for success, non-zero in case of failure.
5882 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5883 * to know whether auto bkops is enabled or disabled after this function
5884 * returns control to it.
5886 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5887 enum bkops_status status)
5890 u32 curr_status = 0;
5892 err = ufshcd_get_bkops_status(hba, &curr_status);
5894 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5897 } else if (curr_status > BKOPS_STATUS_MAX) {
5898 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5899 __func__, curr_status);
5904 if (curr_status >= status)
5905 err = ufshcd_enable_auto_bkops(hba);
5907 err = ufshcd_disable_auto_bkops(hba);
5913 * ufshcd_urgent_bkops - handle urgent bkops exception event
5914 * @hba: per-adapter instance
5916 * Enable fBackgroundOpsEn flag in the device to permit background
5919 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5920 * and negative error value for any other failure.
5922 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5924 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5927 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5929 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5930 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5933 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5936 u32 curr_status = 0;
5938 if (hba->is_urgent_bkops_lvl_checked)
5939 goto enable_auto_bkops;
5941 err = ufshcd_get_bkops_status(hba, &curr_status);
5943 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5949 * We are seeing that some devices are raising the urgent bkops
5950 * exception events even when BKOPS status doesn't indicate performace
5951 * impacted or critical. Handle these device by determining their urgent
5952 * bkops status at runtime.
5954 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5955 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5956 __func__, curr_status);
5957 /* update the current status as the urgent bkops level */
5958 hba->urgent_bkops_lvl = curr_status;
5959 hba->is_urgent_bkops_lvl_checked = true;
5963 err = ufshcd_enable_auto_bkops(hba);
5966 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5971 * ufshcd_exception_event_handler - handle exceptions raised by device
5972 * @work: pointer to work data
5974 * Read bExceptionEventStatus attribute from the device and handle the
5975 * exception event accordingly.
5977 static void ufshcd_exception_event_handler(struct work_struct *work)
5979 struct ufs_hba *hba;
5982 hba = container_of(work, struct ufs_hba, eeh_work);
5984 pm_runtime_get_sync(hba->dev);
5985 ufshcd_scsi_block_requests(hba);
5986 err = ufshcd_get_ee_status(hba, &status);
5988 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5993 status &= hba->ee_ctrl_mask;
5995 if (status & MASK_EE_URGENT_BKOPS)
5996 ufshcd_bkops_exception_event_handler(hba);
5999 ufshcd_scsi_unblock_requests(hba);
6001 * pm_runtime_get_noresume is called while scheduling
6002 * eeh_work to avoid suspend racing with exception work.
6003 * Hence decrement usage counter using pm_runtime_put_noidle
6004 * to allow suspend on completion of exception event handler.
6006 pm_runtime_put_noidle(hba->dev);
6007 pm_runtime_put(hba->dev);
6011 /* Complete requests that have door-bell cleared */
6012 static void ufshcd_complete_requests(struct ufs_hba *hba)
6014 ufshcd_transfer_req_compl(hba);
6015 ufshcd_tmc_handler(hba);
6019 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6020 * to recover from the DL NAC errors or not.
6021 * @hba: per-adapter instance
6023 * Returns true if error handling is required, false otherwise
6025 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6027 unsigned long flags;
6028 bool err_handling = true;
6030 spin_lock_irqsave(hba->host->host_lock, flags);
6032 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6033 * device fatal error and/or DL NAC & REPLAY timeout errors.
6035 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6038 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6039 ((hba->saved_err & UIC_ERROR) &&
6040 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
6042 * we have to do error recovery but atleast silence the error
6045 hba->silence_err_logs = true;
6049 if ((hba->saved_err & UIC_ERROR) &&
6050 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6053 * wait for 50ms to see if we can get any other errors or not.
6055 spin_unlock_irqrestore(hba->host->host_lock, flags);
6057 spin_lock_irqsave(hba->host->host_lock, flags);
6060 * now check if we have got any other severe errors other than
6063 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6064 ((hba->saved_err & UIC_ERROR) &&
6065 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
6066 if (((hba->saved_err & INT_FATAL_ERRORS) ==
6067 DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
6068 ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
6069 hba->silence_err_logs = true;
6074 * As DL NAC is the only error received so far, send out NOP
6075 * command to confirm if link is still active or not.
6076 * - If we don't get any response then do error recovery.
6077 * - If we get response then clear the DL NAC error bit.
6080 /* silence the error logs from NOP command */
6081 hba->silence_err_logs = true;
6082 spin_unlock_irqrestore(hba->host->host_lock, flags);
6083 err = ufshcd_verify_dev_init(hba);
6084 spin_lock_irqsave(hba->host->host_lock, flags);
6085 hba->silence_err_logs = false;
6088 hba->silence_err_logs = true;
6092 /* Link seems to be alive hence ignore the DL NAC errors */
6093 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6094 hba->saved_err &= ~UIC_ERROR;
6095 /* clear NAC error */
6096 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6097 if (!hba->saved_uic_err) {
6098 err_handling = false;
6102 * there seems to be some errors other than NAC, so do error
6105 hba->silence_err_logs = true;
6108 spin_unlock_irqrestore(hba->host->host_lock, flags);
6109 return err_handling;
6113 * ufshcd_err_handler - handle UFS errors that require s/w attention
6114 * @work: pointer to work structure
6116 static void ufshcd_err_handler(struct work_struct *work)
6118 struct ufs_hba *hba;
6119 unsigned long flags;
6120 bool err_xfer = false, err_tm = false;
6123 bool needs_reset = false;
6124 bool clks_enabled = false;
6126 hba = container_of(work, struct ufs_hba, eh_work);
6128 spin_lock_irqsave(hba->host->host_lock, flags);
6129 ufsdbg_set_err_state(hba);
6131 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6135 * Make sure the clocks are ON before we proceed with err
6136 * handling. For the majority of cases err handler would be
6137 * run with clocks ON. There is a possibility that the err
6138 * handler was scheduled due to auto hibern8 error interrupt,
6139 * in which case the clocks could be gated or be in the
6140 * process of gating when the err handler runs.
6142 if (unlikely((hba->clk_gating.state != CLKS_ON) &&
6143 ufshcd_is_auto_hibern8_supported(hba))) {
6144 spin_unlock_irqrestore(hba->host->host_lock, flags);
6145 hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
6146 ufshcd_hold(hba, false);
6147 spin_lock_irqsave(hba->host->host_lock, flags);
6148 clks_enabled = true;
6151 hba->ufshcd_state = UFSHCD_STATE_RESET;
6152 ufshcd_set_eh_in_progress(hba);
6154 /* Complete requests that have door-bell cleared by h/w */
6155 ufshcd_complete_requests(hba);
6157 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6160 spin_unlock_irqrestore(hba->host->host_lock, flags);
6161 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6162 ret = ufshcd_quirk_dl_nac_errors(hba);
6163 spin_lock_irqsave(hba->host->host_lock, flags);
6165 goto skip_err_handling;
6169 * Dump controller state before resetting. Transfer requests state
6170 * will be dump as part of the request completion.
6172 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
6173 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
6174 __func__, hba->saved_err, hba->saved_uic_err);
6175 if (!hba->silence_err_logs) {
6176 /* release lock as print host regs sleeps */
6177 spin_unlock_irqrestore(hba->host->host_lock, flags);
6178 ufshcd_print_host_regs(hba);
6179 ufshcd_print_host_state(hba);
6180 ufshcd_print_pwr_info(hba);
6181 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6182 ufshcd_print_cmd_log(hba);
6183 spin_lock_irqsave(hba->host->host_lock, flags);
6187 if ((hba->saved_err & INT_FATAL_ERRORS)
6188 || hba->saved_ce_err || hba->force_host_reset ||
6189 ((hba->saved_err & UIC_ERROR) &&
6190 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
6191 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6192 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
6196 * if host reset is required then skip clearing the pending
6197 * transfers forcefully because they will automatically get
6198 * cleared after link startup.
6201 goto skip_pending_xfer_clear;
6203 /* release lock as clear command might sleep */
6204 spin_unlock_irqrestore(hba->host->host_lock, flags);
6205 /* Clear pending transfer requests */
6206 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6207 if (ufshcd_clear_cmd(hba, tag)) {
6209 goto lock_skip_pending_xfer_clear;
6213 /* Clear pending task management requests */
6214 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6215 if (ufshcd_clear_tm_cmd(hba, tag)) {
6217 goto lock_skip_pending_xfer_clear;
6221 lock_skip_pending_xfer_clear:
6222 spin_lock_irqsave(hba->host->host_lock, flags);
6224 /* Complete the requests that are cleared by s/w */
6225 ufshcd_complete_requests(hba);
6227 if (err_xfer || err_tm)
6230 skip_pending_xfer_clear:
6231 /* Fatal errors need reset */
6233 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
6235 if (hba->saved_err & INT_FATAL_ERRORS)
6236 ufshcd_update_error_stats(hba,
6237 UFS_ERR_INT_FATAL_ERRORS);
6238 if (hba->saved_ce_err)
6239 ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
6241 if (hba->saved_err & UIC_ERROR)
6242 ufshcd_update_error_stats(hba,
6243 UFS_ERR_INT_UIC_ERROR);
6245 if (err_xfer || err_tm)
6246 ufshcd_update_error_stats(hba,
6247 UFS_ERR_CLEAR_PEND_XFER_TM);
6250 * ufshcd_reset_and_restore() does the link reinitialization
6251 * which will need atleast one empty doorbell slot to send the
6252 * device management commands (NOP and query commands).
6253 * If there is no slot empty at this moment then free up last
6256 if (hba->outstanding_reqs == max_doorbells)
6257 __ufshcd_transfer_req_compl(hba,
6258 (1UL << (hba->nutrs - 1)));
6260 spin_unlock_irqrestore(hba->host->host_lock, flags);
6261 err = ufshcd_reset_and_restore(hba);
6262 spin_lock_irqsave(hba->host->host_lock, flags);
6264 dev_err(hba->dev, "%s: reset and restore failed\n",
6266 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6269 * Inform scsi mid-layer that we did reset and allow to handle
6270 * Unit Attention properly.
6272 scsi_report_bus_reset(hba->host, 0);
6274 hba->saved_uic_err = 0;
6275 hba->saved_ce_err = 0;
6276 hba->force_host_reset = false;
6281 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6282 if (hba->saved_err || hba->saved_uic_err)
6283 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6284 __func__, hba->saved_err, hba->saved_uic_err);
6287 hba->silence_err_logs = false;
6290 __ufshcd_release(hba, false);
6291 hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
6294 ufshcd_clear_eh_in_progress(hba);
6295 spin_unlock_irqrestore(hba->host->host_lock, flags);
6298 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
6301 reg_hist->reg[reg_hist->pos] = reg;
6302 reg_hist->tstamp[reg_hist->pos] = ktime_get();
6303 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
6306 static void ufshcd_rls_handler(struct work_struct *work)
6308 struct ufs_hba *hba;
6312 hba = container_of(work, struct ufs_hba, rls_work);
6313 pm_runtime_get_sync(hba->dev);
6314 ufshcd_scsi_block_requests(hba);
6315 down_write(&hba->lock);
6316 ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
6319 "Timed out (%d) waiting for DB to clear\n",
6324 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6325 if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
6326 hba->restore_needed = true;
6328 if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
6329 hba->restore_needed = true;
6331 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
6332 if (hba->pwr_info.gear_rx != mode)
6333 hba->restore_needed = true;
6335 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
6336 if (hba->pwr_info.gear_tx != mode)
6337 hba->restore_needed = true;
6339 if (hba->restore_needed)
6340 ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6343 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6346 hba->restore_needed = false;
6349 up_write(&hba->lock);
6350 ufshcd_scsi_unblock_requests(hba);
6351 pm_runtime_put_sync(hba->dev);
6355 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6356 * @hba: per-adapter instance
6359 * IRQ_HANDLED - If interrupt is valid
6360 * IRQ_NONE - If invalid interrupt
6362 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6365 irqreturn_t retval = IRQ_NONE;
6367 /* PHY layer lane error */
6368 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6369 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6370 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6372 * To know whether this error is fatal or not, DB timeout
6373 * must be checked but this error is handled separately.
6375 dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
6377 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
6380 * Don't ignore LINERESET indication during hibern8
6383 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6384 struct uic_command *cmd = hba->active_uic_cmd;
6387 if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
6388 dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
6390 hba->full_init_linereset = true;
6393 if (!hba->full_init_linereset)
6394 schedule_work(&hba->rls_work);
6396 retval |= IRQ_HANDLED;
6399 /* PA_INIT_ERROR is fatal and needs UIC reset */
6400 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6401 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6402 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6403 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
6405 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
6406 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6407 } else if (hba->dev_quirks &
6408 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6409 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6411 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6413 UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6415 UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6417 retval |= IRQ_HANDLED;
6420 /* UIC NL/TL/DME errors needs software retry */
6421 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6422 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6423 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6424 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
6425 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6426 retval |= IRQ_HANDLED;
6429 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6430 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6431 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6432 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
6433 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6434 retval |= IRQ_HANDLED;
6437 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6438 if ((reg & UIC_DME_ERROR) &&
6439 (reg & UIC_DME_ERROR_CODE_MASK)) {
6440 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
6441 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6442 retval |= IRQ_HANDLED;
6445 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6446 __func__, hba->uic_error);
6451 * ufshcd_check_errors - Check for errors that need s/w attention
6452 * @hba: per-adapter instance
6455 * IRQ_HANDLED - If interrupt is valid
6456 * IRQ_NONE - If invalid interrupt
6458 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6460 bool queue_eh_work = false;
6461 irqreturn_t retval = IRQ_NONE;
6463 if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
6464 queue_eh_work = true;
6466 if (hba->errors & UIC_ERROR) {
6468 retval = ufshcd_update_uic_error(hba);
6470 queue_eh_work = true;
6473 if (queue_eh_work) {
6475 * update the transfer error masks to sticky bits, let's do this
6476 * irrespective of current ufshcd_state.
6478 hba->saved_err |= hba->errors;
6479 hba->saved_uic_err |= hba->uic_error;
6480 hba->saved_ce_err |= hba->ce_error;
6482 /* handle fatal errors only when link is functional */
6483 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
6485 * Set error handling in progress flag early so that we
6486 * don't issue new requests any more.
6488 ufshcd_set_eh_in_progress(hba);
6490 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6491 schedule_work(&hba->eh_work);
6493 retval |= IRQ_HANDLED;
6496 * if (!queue_eh_work) -
6497 * Other errors are either non-fatal where host recovers
6498 * itself without s/w intervention or errors that will be
6499 * handled by the SCSI core layer.
6505 * ufshcd_tmc_handler - handle task management function completion
6506 * @hba: per adapter instance
6509 * IRQ_HANDLED - If interrupt is valid
6510 * IRQ_NONE - If invalid interrupt
6512 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6516 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6517 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
6518 if (hba->tm_condition) {
6519 wake_up(&hba->tm_wq);
6527 * ufshcd_sl_intr - Interrupt service routine
6528 * @hba: per adapter instance
6529 * @intr_status: contains interrupts generated by the controller
6532 * IRQ_HANDLED - If interrupt is valid
6533 * IRQ_NONE - If invalid interrupt
6535 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6537 irqreturn_t retval = IRQ_NONE;
6539 ufsdbg_error_inject_dispatcher(hba,
6540 ERR_INJECT_INTR, intr_status, &intr_status);
6542 ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
6544 hba->errors = UFSHCD_ERROR_MASK & intr_status;
6545 if (hba->errors || hba->ce_error)
6546 retval |= ufshcd_check_errors(hba);
6548 if (intr_status & UFSHCD_UIC_MASK)
6549 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6551 if (intr_status & UTP_TASK_REQ_COMPL)
6552 retval |= ufshcd_tmc_handler(hba);
6554 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6555 retval |= ufshcd_transfer_req_compl(hba);
6561 * ufshcd_intr - Main interrupt service routine
6563 * @__hba: pointer to adapter instance
6566 * IRQ_HANDLED - If interrupt is valid
6567 * IRQ_NONE - If invalid interrupt
6569 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6571 u32 intr_status, enabled_intr_status;
6572 irqreturn_t retval = IRQ_NONE;
6573 struct ufs_hba *hba = __hba;
6574 int retries = hba->nutrs;
6576 spin_lock(hba->host->host_lock);
6577 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6578 hba->ufs_stats.last_intr_status = intr_status;
6579 hba->ufs_stats.last_intr_ts = ktime_get();
6581 * There could be max of hba->nutrs reqs in flight and in worst case
6582 * if the reqs get finished 1 by 1 after the interrupt status is
6583 * read, make sure we handle them by checking the interrupt status
6584 * again in a loop until we process all of the reqs before returning.
6587 enabled_intr_status =
6588 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6590 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6591 if (enabled_intr_status)
6592 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6594 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6595 } while (intr_status && --retries);
6597 if (retval == IRQ_NONE) {
6598 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6599 __func__, intr_status);
6600 ufshcd_hex_dump("host regs: ", hba->mmio_base,
6601 UFSHCI_REG_SPACE_SIZE);
6604 spin_unlock(hba->host->host_lock);
6608 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6611 u32 mask = 1 << tag;
6612 unsigned long flags;
6614 if (!test_bit(tag, &hba->outstanding_tasks))
6617 spin_lock_irqsave(hba->host->host_lock, flags);
6618 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
6619 spin_unlock_irqrestore(hba->host->host_lock, flags);
6621 /* poll for max. 1 sec to clear door bell register by h/w */
6622 err = ufshcd_wait_for_register(hba,
6623 REG_UTP_TASK_REQ_DOOR_BELL,
6624 mask, 0, 1000, 1000, true);
6630 * ufshcd_issue_tm_cmd - issues task management commands to controller
6631 * @hba: per adapter instance
6632 * @lun_id: LUN ID to which TM command is sent
6633 * @task_id: task ID to which the TM command is applicable
6634 * @tm_function: task management function opcode
6635 * @tm_response: task management service response return value
6637 * Returns non-zero value on error, zero on success.
6639 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6640 u8 tm_function, u8 *tm_response)
6642 struct utp_task_req_desc *task_req_descp;
6643 struct utp_upiu_task_req *task_req_upiup;
6644 struct Scsi_Host *host;
6645 unsigned long flags;
6653 * Get free slot, sleep if slots are unavailable.
6654 * Even though we use wait_event() which sleeps indefinitely,
6655 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6657 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
6658 hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
6659 ufshcd_hold_all(hba);
6661 spin_lock_irqsave(host->host_lock, flags);
6662 task_req_descp = hba->utmrdl_base_addr;
6663 task_req_descp += free_slot;
6665 /* Configure task request descriptor */
6666 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6667 task_req_descp->header.dword_2 =
6668 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6670 /* Configure task request UPIU */
6672 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
6673 task_tag = hba->nutrs + free_slot;
6674 task_req_upiup->header.dword_0 =
6675 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
6677 task_req_upiup->header.dword_1 =
6678 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
6680 * The host shall provide the same value for LUN field in the basic
6681 * header and for Input Parameter.
6683 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
6684 task_req_upiup->input_param2 = cpu_to_be32(task_id);
6686 /* send command to the controller */
6687 __set_bit(free_slot, &hba->outstanding_tasks);
6689 /* Make sure descriptors are ready before ringing the task doorbell */
6692 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6693 /* Make sure that doorbell is committed immediately */
6696 spin_unlock_irqrestore(host->host_lock, flags);
6698 /* wait until the task management command is completed */
6699 err = wait_event_timeout(hba->tm_wq,
6700 test_bit(free_slot, &hba->tm_condition),
6701 msecs_to_jiffies(TM_CMD_TIMEOUT));
6703 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6704 __func__, tm_function);
6705 if (ufshcd_clear_tm_cmd(hba, free_slot))
6706 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6707 __func__, free_slot);
6710 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
6713 clear_bit(free_slot, &hba->tm_condition);
6714 ufshcd_put_tm_slot(hba, free_slot);
6715 wake_up(&hba->tm_tag_wq);
6716 hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
6718 ufshcd_release_all(hba);
6723 * ufshcd_eh_device_reset_handler - device reset handler registered to
6725 * @cmd: SCSI command pointer
6727 * Returns SUCCESS/FAILED
6729 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6731 struct Scsi_Host *host;
6732 struct ufs_hba *hba;
6737 struct ufshcd_lrb *lrbp;
6738 unsigned long flags;
6740 host = cmd->device->host;
6741 hba = shost_priv(host);
6742 tag = cmd->request->tag;
6744 ufshcd_print_cmd_log(hba);
6745 lrbp = &hba->lrb[tag];
6746 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6747 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6753 /* clear the commands that were pending for corresponding LUN */
6754 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6755 if (hba->lrb[pos].lun == lrbp->lun) {
6756 err = ufshcd_clear_cmd(hba, pos);
6761 spin_lock_irqsave(host->host_lock, flags);
6762 ufshcd_transfer_req_compl(hba);
6763 spin_unlock_irqrestore(host->host_lock, flags);
6766 hba->req_abort_count = 0;
6770 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6776 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6778 struct ufshcd_lrb *lrbp;
6781 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6782 lrbp = &hba->lrb[tag];
6783 lrbp->req_abort_skip = true;
6788 * ufshcd_abort - abort a specific command
6789 * @cmd: SCSI command pointer
6791 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6792 * command, and in host controller by clearing the door-bell register. There can
6793 * be race between controller sending the command to the device while abort is
6794 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6795 * really issued and then try to abort it.
6797 * Returns SUCCESS/FAILED
6799 static int ufshcd_abort(struct scsi_cmnd *cmd)
6801 struct Scsi_Host *host;
6802 struct ufs_hba *hba;
6803 unsigned long flags;
6808 struct ufshcd_lrb *lrbp;
6811 host = cmd->device->host;
6812 hba = shost_priv(host);
6813 tag = cmd->request->tag;
6814 if (!ufshcd_valid_tag(hba, tag)) {
6816 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6817 __func__, tag, cmd, cmd->request);
6821 lrbp = &hba->lrb[tag];
6823 ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
6826 * Task abort to the device W-LUN is illegal. When this command
6827 * will fail, due to spec violation, scsi err handling next step
6828 * will be to send LU reset which, again, is a spec violation.
6829 * To avoid these unnecessary/illegal step we skip to the last error
6830 * handling stage: reset and restore.
6832 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6833 return ufshcd_eh_host_reset_handler(cmd);
6835 ufshcd_hold_all(hba);
6836 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6837 /* If command is already aborted/completed, return SUCCESS */
6838 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6840 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6841 __func__, tag, hba->outstanding_reqs, reg);
6845 if (!(reg & (1 << tag))) {
6847 "%s: cmd was completed, but without a notifying intr, tag = %d",
6851 /* Print Transfer Request of aborted task */
6852 dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
6855 * Print detailed info about aborted request.
6856 * As more than one request might get aborted at the same time,
6857 * print full information only for the first aborted request in order
6858 * to reduce repeated printouts. For other aborted requests only print
6861 scsi_print_command(cmd);
6862 if (!hba->req_abort_count) {
6863 ufshcd_print_fsm_state(hba);
6864 ufshcd_print_host_regs(hba);
6865 ufshcd_print_host_state(hba);
6866 ufshcd_print_pwr_info(hba);
6867 ufshcd_print_trs(hba, 1 << tag, true);
6869 ufshcd_print_trs(hba, 1 << tag, false);
6871 hba->req_abort_count++;
6874 /* Skip task abort in case previous aborts failed and report failure */
6875 if (lrbp->req_abort_skip) {
6880 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6881 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6882 UFS_QUERY_TASK, &resp);
6883 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6884 /* cmd pending in the device */
6885 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
6888 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6890 * cmd not pending in the device, check if it is
6893 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
6895 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6896 if (reg & (1 << tag)) {
6897 /* sleep for max. 200us to stabilize */
6898 usleep_range(100, 200);
6901 /* command completed already */
6902 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
6907 "%s: no response from device. tag = %d, err %d",
6908 __func__, tag, err);
6910 err = resp; /* service response error */
6920 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6921 UFS_ABORT_TASK, &resp);
6922 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6924 err = resp; /* service response error */
6925 dev_err(hba->dev, "%s: issued. tag = %d, err %d",
6926 __func__, tag, err);
6931 err = ufshcd_clear_cmd(hba, tag);
6933 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
6934 __func__, tag, err);
6938 scsi_dma_unmap(cmd);
6940 spin_lock_irqsave(host->host_lock, flags);
6941 ufshcd_outstanding_req_clear(hba, tag);
6942 hba->lrb[tag].cmd = NULL;
6943 spin_unlock_irqrestore(host->host_lock, flags);
6945 clear_bit_unlock(tag, &hba->lrb_in_use);
6946 wake_up(&hba->dev_cmd.tag_wq);
6952 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6953 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6958 * This ufshcd_release_all() corresponds to the original scsi cmd that
6959 * got aborted here (as we won't get any IRQ for it).
6961 ufshcd_release_all(hba);
6966 * ufshcd_host_reset_and_restore - reset and restore host controller
6967 * @hba: per-adapter instance
6969 * Note that host controller reset may issue DME_RESET to
6970 * local and remote (device) Uni-Pro stack and the attributes
6971 * are reset to default state.
6973 * Returns zero on success, non-zero on failure
6975 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6978 unsigned long flags;
6980 /* Reset the host controller */
6981 spin_lock_irqsave(hba->host->host_lock, flags);
6982 ufshcd_hba_stop(hba, false);
6983 spin_unlock_irqrestore(hba->host->host_lock, flags);
6985 /* scale up clocks to max frequency before full reinitialization */
6986 ufshcd_set_clk_freq(hba, true);
6988 err = ufshcd_hba_enable(hba);
6992 /* Establish the link again and restore the device */
6993 err = ufshcd_probe_hba(hba);
6995 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
7001 err = ufshcd_vops_crypto_engine_reset(hba);
7004 "%s: failed to reset crypto engine %d\n",
7012 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7018 * ufshcd_reset_and_restore - reset and re-initialize host/device
7019 * @hba: per-adapter instance
7021 * Reset and recover device, host and re-establish link. This
7022 * is helpful to recover the communication in fatal error conditions.
7024 * Returns zero on success, non-zero on failure
7026 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7029 unsigned long flags;
7030 int retries = MAX_HOST_RESET_RETRIES;
7033 err = ufshcd_vops_full_reset(hba);
7035 dev_warn(hba->dev, "%s: full reset returned %d\n",
7038 err = ufshcd_reset_device(hba);
7040 dev_warn(hba->dev, "%s: device reset failed. err %d\n",
7043 err = ufshcd_host_reset_and_restore(hba);
7044 } while (err && --retries);
7047 * There is no point proceeding even after failing
7048 * to recover after multiple retries.
7053 * After reset the door-bell might be cleared, complete
7054 * outstanding requests in s/w here.
7056 spin_lock_irqsave(hba->host->host_lock, flags);
7057 ufshcd_transfer_req_compl(hba);
7058 ufshcd_tmc_handler(hba);
7059 spin_unlock_irqrestore(hba->host->host_lock, flags);
7065 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7066 * @cmd - SCSI command pointer
7068 * Returns SUCCESS/FAILED
7070 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7073 unsigned long flags;
7074 struct ufs_hba *hba;
7076 hba = shost_priv(cmd->device->host);
7079 * Check if there is any race with fatal error handling.
7080 * If so, wait for it to complete. Even though fatal error
7081 * handling does reset and restore in some cases, don't assume
7082 * anything out of it. We are just avoiding race here.
7085 spin_lock_irqsave(hba->host->host_lock, flags);
7086 if (!(work_pending(&hba->eh_work) ||
7087 hba->ufshcd_state == UFSHCD_STATE_RESET))
7089 spin_unlock_irqrestore(hba->host->host_lock, flags);
7090 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7091 flush_work(&hba->eh_work);
7095 * we don't know if previous reset had really reset the host controller
7096 * or not. So let's force reset here to be sure.
7098 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7099 hba->force_host_reset = true;
7100 schedule_work(&hba->eh_work);
7102 /* wait for the reset work to finish */
7104 if (!(work_pending(&hba->eh_work) ||
7105 hba->ufshcd_state == UFSHCD_STATE_RESET))
7107 spin_unlock_irqrestore(hba->host->host_lock, flags);
7108 dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
7109 flush_work(&hba->eh_work);
7110 spin_lock_irqsave(hba->host->host_lock, flags);
7113 if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
7114 ufshcd_is_link_active(hba))) {
7116 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7119 spin_unlock_irqrestore(hba->host->host_lock, flags);
7125 * ufshcd_get_max_icc_level - calculate the ICC level
7126 * @sup_curr_uA: max. current supported by the regulator
7127 * @start_scan: row at the desc table to start scan from
7128 * @buff: power descriptor buffer
7130 * Returns calculated max ICC level for specific regulator
7132 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7139 for (i = start_scan; i >= 0; i--) {
7140 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
7141 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7142 ATTR_ICC_LVL_UNIT_OFFSET;
7143 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7145 case UFSHCD_NANO_AMP:
7146 curr_uA = curr_uA / 1000;
7148 case UFSHCD_MILI_AMP:
7149 curr_uA = curr_uA * 1000;
7152 curr_uA = curr_uA * 1000 * 1000;
7154 case UFSHCD_MICRO_AMP:
7158 if (sup_curr_uA >= curr_uA)
7163 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7170 * ufshcd_calc_icc_level - calculate the max ICC level
7171 * In case regulators are not initialized we'll return 0
7172 * @hba: per-adapter instance
7173 * @desc_buf: power descriptor buffer to extract ICC levels from.
7174 * @len: length of desc_buff
7176 * Returns calculated ICC level
7178 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7179 u8 *desc_buf, int len)
7183 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7184 !hba->vreg_info.vccq2) {
7186 "%s: Regulator capability was not set, actvIccLevel=%d",
7187 __func__, icc_level);
7191 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
7192 icc_level = ufshcd_get_max_icc_level(
7193 hba->vreg_info.vcc->max_uA,
7194 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7195 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7197 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
7198 icc_level = ufshcd_get_max_icc_level(
7199 hba->vreg_info.vccq->max_uA,
7201 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7203 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7204 icc_level = ufshcd_get_max_icc_level(
7205 hba->vreg_info.vccq2->max_uA,
7207 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7212 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
7215 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
7216 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
7218 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
7221 "%s: Failed reading power descriptor.len = %d ret = %d",
7222 __func__, buff_len, ret);
7226 hba->init_prefetch_data.icc_level =
7227 ufshcd_find_max_sup_active_icc_level(hba,
7228 desc_buf, buff_len);
7229 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
7230 __func__, hba->init_prefetch_data.icc_level);
7232 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7233 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
7234 &hba->init_prefetch_data.icc_level);
7238 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7239 __func__, hba->init_prefetch_data.icc_level , ret);
7244 * ufshcd_scsi_add_wlus - Adds required W-LUs
7245 * @hba: per-adapter instance
7247 * UFS device specification requires the UFS devices to support 4 well known
7249 * "REPORT_LUNS" (address: 01h)
7250 * "UFS Device" (address: 50h)
7251 * "RPMB" (address: 44h)
7252 * "BOOT" (address: 30h)
7253 * UFS device's power management needs to be controlled by "POWER CONDITION"
7254 * field of SSU (START STOP UNIT) command. But this "power condition" field
7255 * will take effect only when its sent to "UFS device" well known logical unit
7256 * hence we require the scsi_device instance to represent this logical unit in
7257 * order for the UFS host driver to send the SSU command for power management.
7259 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7260 * Block) LU so user space process can control this LU. User space may also
7261 * want to have access to BOOT LU.
7263 * This function adds scsi device instances for each of all well known LUs
7264 * (except "REPORT LUNS" LU).
7266 * Returns zero on success (all required W-LUs are added successfully),
7267 * non-zero error value on failure (if failed to add any of the required W-LU).
7269 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7272 struct scsi_device *sdev_rpmb;
7273 struct scsi_device *sdev_boot;
7275 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7276 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7277 if (IS_ERR(hba->sdev_ufs_device)) {
7278 ret = PTR_ERR(hba->sdev_ufs_device);
7279 hba->sdev_ufs_device = NULL;
7282 scsi_device_put(hba->sdev_ufs_device);
7284 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7285 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7286 if (IS_ERR(sdev_boot)) {
7287 ret = PTR_ERR(sdev_boot);
7288 goto remove_sdev_ufs_device;
7290 scsi_device_put(sdev_boot);
7292 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7293 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7294 if (IS_ERR(sdev_rpmb)) {
7295 ret = PTR_ERR(sdev_rpmb);
7296 goto remove_sdev_boot;
7298 scsi_device_put(sdev_rpmb);
7302 scsi_remove_device(sdev_boot);
7303 remove_sdev_ufs_device:
7304 scsi_remove_device(hba->sdev_ufs_device);
7310 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7311 * @hba: per-adapter instance
7313 * PA_TActivate parameter can be tuned manually if UniPro version is less than
7314 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7315 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7316 * the hibern8 exit latency.
7318 * Returns zero on success, non-zero error value on failure.
7320 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7323 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7325 if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
7328 ret = ufshcd_dme_peer_get(hba,
7330 RX_MIN_ACTIVATETIME_CAPABILITY,
7331 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7332 &peer_rx_min_activatetime);
7336 /* make sure proper unit conversion is applied */
7337 tuned_pa_tactivate =
7338 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7339 / PA_TACTIVATE_TIME_UNIT_US);
7340 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7341 tuned_pa_tactivate);
7348 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7349 * @hba: per-adapter instance
7351 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7352 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7353 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7354 * This optimal value can help reduce the hibern8 exit latency.
7356 * Returns zero on success, non-zero error value on failure.
7358 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7361 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7362 u32 max_hibern8_time, tuned_pa_hibern8time;
7364 ret = ufshcd_dme_get(hba,
7365 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7366 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7367 &local_tx_hibern8_time_cap);
7371 ret = ufshcd_dme_peer_get(hba,
7372 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7373 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7374 &peer_rx_hibern8_time_cap);
7378 max_hibern8_time = max(local_tx_hibern8_time_cap,
7379 peer_rx_hibern8_time_cap);
7380 /* make sure proper unit conversion is applied */
7381 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7382 / PA_HIBERN8_TIME_UNIT_US);
7383 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7384 tuned_pa_hibern8time);
7390 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7391 * less than device PA_TACTIVATE time.
7392 * @hba: per-adapter instance
7394 * Some UFS devices require host PA_TACTIVATE to be lower than device
7395 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7398 * Returns zero on success, non-zero error value on failure.
7400 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7403 u32 granularity, peer_granularity;
7404 u32 pa_tactivate, peer_pa_tactivate;
7405 u32 pa_tactivate_us, peer_pa_tactivate_us;
7406 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7408 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7413 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7418 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7419 (granularity > PA_GRANULARITY_MAX_VAL)) {
7420 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7421 __func__, granularity);
7425 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7426 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7427 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7428 __func__, peer_granularity);
7432 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7436 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7437 &peer_pa_tactivate);
7441 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7442 peer_pa_tactivate_us = peer_pa_tactivate *
7443 gran_to_us_table[peer_granularity - 1];
7445 if (pa_tactivate_us > peer_pa_tactivate_us) {
7446 u32 new_peer_pa_tactivate;
7448 new_peer_pa_tactivate = pa_tactivate_us /
7449 gran_to_us_table[peer_granularity - 1];
7450 new_peer_pa_tactivate++;
7451 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7452 new_peer_pa_tactivate);
7459 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7461 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7462 ufshcd_tune_pa_tactivate(hba);
7463 ufshcd_tune_pa_hibern8time(hba);
7466 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7467 /* set 1ms timeout for PA_TACTIVATE */
7468 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7470 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7471 ufshcd_quirk_tune_host_pa_tactivate(hba);
7473 ufshcd_vops_apply_dev_quirks(hba);
7476 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7478 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
7480 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
7481 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
7482 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
7483 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
7484 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
7486 hba->req_abort_count = 0;
7489 static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
7491 if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
7492 if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
7493 UIC_LINK_OFF_STATE) {
7495 ufs_get_desired_pm_lvl_for_dev_link_state(
7497 UIC_LINK_HIBERN8_STATE);
7498 dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
7501 if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7502 UIC_LINK_OFF_STATE) {
7504 ufs_get_desired_pm_lvl_for_dev_link_state(
7506 UIC_LINK_HIBERN8_STATE);
7507 dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
7514 * ufshcd_probe_hba - probe hba to detect device and initialize
7515 * @hba: per-adapter instance
7517 * Execute link-startup and verify device initialization
7519 static int ufshcd_probe_hba(struct ufs_hba *hba)
7522 ktime_t start = ktime_get();
7524 ret = ufshcd_link_startup(hba);
7528 /* Debug counters initialization */
7529 ufshcd_clear_dbg_ufs_stats(hba);
7530 /* set the default level for urgent bkops */
7531 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
7532 hba->is_urgent_bkops_lvl_checked = false;
7534 /* UniPro link is active now */
7535 ufshcd_set_link_active(hba);
7537 ret = ufshcd_verify_dev_init(hba);
7541 ret = ufshcd_complete_dev_init(hba);
7545 ufs_advertise_fixup_device(hba);
7546 ufshcd_tune_unipro_params(hba);
7548 ufshcd_apply_pm_quirks(hba);
7549 ret = ufshcd_set_vccq_rail_unused(hba,
7550 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
7554 /* UFS device is also active now */
7555 ufshcd_set_ufs_dev_active(hba);
7556 ufshcd_force_reset_auto_bkops(hba);
7557 hba->wlun_dev_clr_ua = true;
7559 if (ufshcd_get_max_pwr_mode(hba)) {
7561 "%s: Failed getting max supported power mode\n",
7564 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7566 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7572 /* set the state as operational after switching to desired gear */
7573 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7575 * If we are in error handling context or in power management callbacks
7576 * context, no need to scan the host
7578 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7581 /* clear any previous UFS device information */
7582 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7583 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7584 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
7585 hba->dev_info.f_power_on_wp_en = flag;
7587 if (!hba->is_init_prefetch)
7588 ufshcd_init_icc_levels(hba);
7590 /* Add required well known logical units to scsi mid layer */
7591 if (ufshcd_scsi_add_wlus(hba))
7594 /* Initialize devfreq after UFS device is detected */
7595 if (ufshcd_is_clkscaling_supported(hba)) {
7596 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7597 &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
7598 hba->clk_scaling.saved_pwr_info.is_valid = true;
7599 hba->clk_scaling.is_scaled_up = true;
7600 if (!hba->devfreq) {
7601 hba->devfreq = devfreq_add_device(hba->dev,
7602 &ufs_devfreq_profile,
7605 if (IS_ERR(hba->devfreq)) {
7606 ret = PTR_ERR(hba->devfreq);
7607 dev_err(hba->dev, "Unable to register with devfreq %d\n",
7612 hba->clk_scaling.is_allowed = true;
7615 scsi_scan_host(hba->host);
7616 pm_runtime_put_sync(hba->dev);
7619 if (!hba->is_init_prefetch)
7620 hba->is_init_prefetch = true;
7623 * Enable auto hibern8 if supported, after full host and
7624 * device initialization.
7626 if (ufshcd_is_auto_hibern8_supported(hba))
7627 ufshcd_set_auto_hibern8_timer(hba,
7628 hba->hibern8_on_idle.delay_ms);
7631 * If we failed to initialize the device or the device is not
7632 * present, turn off the power/clocks etc.
7634 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7635 pm_runtime_put_sync(hba->dev);
7636 ufshcd_hba_exit(hba);
7639 trace_ufshcd_init(dev_name(hba->dev), ret,
7640 ktime_to_us(ktime_sub(ktime_get(), start)),
7641 hba->curr_dev_pwr_mode, hba->uic_link_state);
7646 * ufshcd_async_scan - asynchronous execution for probing hba
7647 * @data: data pointer to pass to this function
7648 * @cookie: cookie data
7650 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7652 struct ufs_hba *hba = (struct ufs_hba *)data;
7655 * Don't allow clock gating and hibern8 enter for faster device
7658 ufshcd_hold_all(hba);
7659 ufshcd_probe_hba(hba);
7660 ufshcd_release_all(hba);
7664 * ufshcd_query_ioctl - perform user read queries
7665 * @hba: per-adapter instance
7666 * @lun: used for lun specific queries
7667 * @buffer: user space buffer for reading and submitting query data and params
7668 * @return: 0 for success negative error code otherwise
7670 * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7671 * It will read the opcode, idn and buf_length parameters, and, put the
7672 * response in the buffer field while updating the used size in buf_length.
7674 static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
7676 struct ufs_ioctl_query_data *ioctl_data;
7685 ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
7687 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
7688 sizeof(struct ufs_ioctl_query_data));
7693 /* extract params from user buffer */
7694 err = copy_from_user(ioctl_data, buffer,
7695 sizeof(struct ufs_ioctl_query_data));
7698 "%s: Failed copying buffer from user, err %d\n",
7700 goto out_release_mem;
7703 /* verify legal parameters & send query */
7704 switch (ioctl_data->opcode) {
7705 case UPIU_QUERY_OPCODE_READ_DESC:
7706 switch (ioctl_data->idn) {
7707 case QUERY_DESC_IDN_DEVICE:
7708 case QUERY_DESC_IDN_CONFIGURAION:
7709 case QUERY_DESC_IDN_INTERCONNECT:
7710 case QUERY_DESC_IDN_GEOMETRY:
7711 case QUERY_DESC_IDN_POWER:
7714 case QUERY_DESC_IDN_UNIT:
7715 if (!ufs_is_valid_unit_desc_lun(lun)) {
7717 "%s: No unit descriptor for lun 0x%x\n",
7720 goto out_release_mem;
7727 length = min_t(int, QUERY_DESC_MAX_SIZE,
7728 ioctl_data->buf_size);
7729 desc = kzalloc(length, GFP_KERNEL);
7731 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
7734 goto out_release_mem;
7736 err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
7737 ioctl_data->idn, index, 0, desc, &length);
7739 case UPIU_QUERY_OPCODE_READ_ATTR:
7740 switch (ioctl_data->idn) {
7741 case QUERY_ATTR_IDN_BOOT_LU_EN:
7742 case QUERY_ATTR_IDN_POWER_MODE:
7743 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
7744 case QUERY_ATTR_IDN_OOO_DATA_EN:
7745 case QUERY_ATTR_IDN_BKOPS_STATUS:
7746 case QUERY_ATTR_IDN_PURGE_STATUS:
7747 case QUERY_ATTR_IDN_MAX_DATA_IN:
7748 case QUERY_ATTR_IDN_MAX_DATA_OUT:
7749 case QUERY_ATTR_IDN_REF_CLK_FREQ:
7750 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
7751 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
7752 case QUERY_ATTR_IDN_EE_CONTROL:
7753 case QUERY_ATTR_IDN_EE_STATUS:
7754 case QUERY_ATTR_IDN_SECONDS_PASSED:
7757 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
7758 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
7764 err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
7768 case UPIU_QUERY_OPCODE_WRITE_ATTR:
7769 err = copy_from_user(&att,
7770 buffer + sizeof(struct ufs_ioctl_query_data),
7774 "%s: Failed copying buffer from user, err %d\n",
7776 goto out_release_mem;
7779 switch (ioctl_data->idn) {
7780 case QUERY_ATTR_IDN_BOOT_LU_EN:
7782 if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
7784 "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
7785 __func__, ioctl_data->opcode,
7786 (unsigned int)ioctl_data->idn, att);
7788 goto out_release_mem;
7794 err = ufshcd_query_attr(hba, ioctl_data->opcode,
7795 ioctl_data->idn, index, 0, &att);
7798 case UPIU_QUERY_OPCODE_READ_FLAG:
7799 switch (ioctl_data->idn) {
7800 case QUERY_FLAG_IDN_FDEVICEINIT:
7801 case QUERY_FLAG_IDN_PERMANENT_WPE:
7802 case QUERY_FLAG_IDN_PWR_ON_WPE:
7803 case QUERY_FLAG_IDN_BKOPS_EN:
7804 case QUERY_FLAG_IDN_PURGE_ENABLE:
7805 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
7806 case QUERY_FLAG_IDN_BUSY_RTC:
7811 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
7812 ioctl_data->idn, &flag);
7819 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
7821 goto out_release_mem;
7825 * copy response data
7826 * As we might end up reading less data then what is specified in
7827 * "ioctl_data->buf_size". So we are updating "ioctl_data->
7828 * buf_size" to what exactly we have read.
7830 switch (ioctl_data->opcode) {
7831 case UPIU_QUERY_OPCODE_READ_DESC:
7832 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
7835 case UPIU_QUERY_OPCODE_READ_ATTR:
7836 ioctl_data->buf_size = sizeof(u32);
7839 case UPIU_QUERY_OPCODE_READ_FLAG:
7840 ioctl_data->buf_size = 1;
7843 case UPIU_QUERY_OPCODE_WRITE_ATTR:
7844 goto out_release_mem;
7850 err = copy_to_user(buffer, ioctl_data,
7851 sizeof(struct ufs_ioctl_query_data));
7853 dev_err(hba->dev, "%s: Failed copying back to user.\n",
7855 err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
7856 data_ptr, ioctl_data->buf_size);
7858 dev_err(hba->dev, "%s: err %d copying back to user.\n",
7860 goto out_release_mem;
7864 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7865 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
7875 * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7876 * @dev: scsi device required for per LUN queries
7877 * @cmd: command opcode
7878 * @buffer: user space buffer for transferring data
7880 * Supported commands:
7883 static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
7885 struct ufs_hba *hba = shost_priv(dev->host);
7890 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
7895 case UFS_IOCTL_QUERY:
7896 pm_runtime_get_sync(hba->dev);
7897 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
7899 pm_runtime_put_sync(hba->dev);
7903 dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
7911 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7913 unsigned long flags;
7914 struct Scsi_Host *host;
7915 struct ufs_hba *hba;
7919 if (!scmd || !scmd->device || !scmd->device->host)
7920 return BLK_EH_NOT_HANDLED;
7922 host = scmd->device->host;
7923 hba = shost_priv(host);
7925 return BLK_EH_NOT_HANDLED;
7927 spin_lock_irqsave(host->host_lock, flags);
7929 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7930 if (hba->lrb[index].cmd == scmd) {
7936 spin_unlock_irqrestore(host->host_lock, flags);
7939 * Bypass SCSI error handling and reset the block layer timer if this
7940 * SCSI command was not actually dispatched to UFS driver, otherwise
7941 * let SCSI layer handle the error as usual.
7943 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
7946 static struct scsi_host_template ufshcd_driver_template = {
7947 .module = THIS_MODULE,
7949 .proc_name = UFSHCD,
7950 .queuecommand = ufshcd_queuecommand,
7951 .slave_alloc = ufshcd_slave_alloc,
7952 .slave_configure = ufshcd_slave_configure,
7953 .slave_destroy = ufshcd_slave_destroy,
7954 .change_queue_depth = ufshcd_change_queue_depth,
7955 .eh_abort_handler = ufshcd_abort,
7956 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7957 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7958 .eh_timed_out = ufshcd_eh_timed_out,
7959 .ioctl = ufshcd_ioctl,
7960 #ifdef CONFIG_COMPAT
7961 .compat_ioctl = ufshcd_ioctl,
7964 .sg_tablesize = SG_ALL,
7965 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7966 .can_queue = UFSHCD_CAN_QUEUE,
7967 .max_host_blocked = 1,
7968 .track_queue_depth = 1,
7971 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7980 * "set_load" operation shall be required on those regulators
7981 * which specifically configured current limitation. Otherwise
7982 * zero max_uA may cause unexpected behavior when regulator is
7983 * enabled or set as high power mode.
7988 ret = regulator_set_load(vreg->reg, ua);
7990 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7991 __func__, vreg->name, ua, ret);
7997 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7998 struct ufs_vreg *vreg)
8002 else if (vreg->unused)
8005 return ufshcd_config_vreg_load(hba->dev, vreg,
8006 UFS_VREG_LPM_LOAD_UA);
8009 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8010 struct ufs_vreg *vreg)
8014 else if (vreg->unused)
8017 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8020 static int ufshcd_config_vreg(struct device *dev,
8021 struct ufs_vreg *vreg, bool on)
8024 struct regulator *reg;
8026 int min_uV, uA_load;
8033 if (regulator_count_voltages(reg) > 0) {
8034 uA_load = on ? vreg->max_uA : 0;
8035 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8039 if (vreg->min_uV && vreg->max_uV) {
8040 min_uV = on ? vreg->min_uV : 0;
8041 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8044 "%s: %s set voltage failed, err=%d\n",
8045 __func__, name, ret);
8054 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8060 else if (vreg->enabled || vreg->unused)
8063 ret = ufshcd_config_vreg(dev, vreg, true);
8065 ret = regulator_enable(vreg->reg);
8068 vreg->enabled = true;
8070 dev_err(dev, "%s: %s enable failed, err=%d\n",
8071 __func__, vreg->name, ret);
8076 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8082 else if (!vreg->enabled || vreg->unused)
8085 ret = regulator_disable(vreg->reg);
8088 /* ignore errors on applying disable config */
8089 ufshcd_config_vreg(dev, vreg, false);
8090 vreg->enabled = false;
8092 dev_err(dev, "%s: %s disable failed, err=%d\n",
8093 __func__, vreg->name, ret);
8099 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8102 struct device *dev = hba->dev;
8103 struct ufs_vreg_info *info = &hba->vreg_info;
8108 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8112 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8116 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8122 ufshcd_toggle_vreg(dev, info->vccq2, false);
8123 ufshcd_toggle_vreg(dev, info->vccq, false);
8124 ufshcd_toggle_vreg(dev, info->vcc, false);
8129 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8131 struct ufs_vreg_info *info = &hba->vreg_info;
8134 if (info->vdd_hba) {
8135 ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8138 ufshcd_vops_update_sec_cfg(hba, on);
8144 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8151 vreg->reg = devm_regulator_get(dev, vreg->name);
8152 if (IS_ERR(vreg->reg)) {
8153 ret = PTR_ERR(vreg->reg);
8154 dev_err(dev, "%s: %s get failed, err=%d\n",
8155 __func__, vreg->name, ret);
8161 static int ufshcd_init_vreg(struct ufs_hba *hba)
8164 struct device *dev = hba->dev;
8165 struct ufs_vreg_info *info = &hba->vreg_info;
8170 ret = ufshcd_get_vreg(dev, info->vcc);
8174 ret = ufshcd_get_vreg(dev, info->vccq);
8178 ret = ufshcd_get_vreg(dev, info->vccq2);
8183 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8185 struct ufs_vreg_info *info = &hba->vreg_info;
8188 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8193 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
8196 struct ufs_vreg_info *info = &hba->vreg_info;
8200 else if (!info->vccq)
8204 /* shut off the rail here */
8205 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
8207 * Mark this rail as no longer used, so it doesn't get enabled
8211 info->vccq->unused = true;
8214 * rail should have been already enabled hence just make sure
8215 * that unused flag is cleared.
8217 info->vccq->unused = false;
8223 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
8224 bool skip_ref_clk, bool is_gating_context)
8227 struct ufs_clk_info *clki;
8228 struct list_head *head = &hba->clk_list_head;
8229 unsigned long flags;
8230 ktime_t start = ktime_get();
8231 bool clk_state_changed = false;
8233 if (!head || list_empty(head))
8236 /* call vendor specific bus vote before enabling the clocks */
8238 ret = ufshcd_vops_set_bus_vote(hba, on);
8244 * vendor specific setup_clocks ops may depend on clocks managed by
8245 * this standard driver hence call the vendor specific setup_clocks
8246 * before disabling the clocks managed here.
8249 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8254 list_for_each_entry(clki, head, list) {
8255 if (!IS_ERR_OR_NULL(clki->clk)) {
8256 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
8259 clk_state_changed = on ^ clki->enabled;
8260 if (on && !clki->enabled) {
8261 ret = clk_prepare_enable(clki->clk);
8263 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8264 __func__, clki->name, ret);
8267 } else if (!on && clki->enabled) {
8268 clk_disable_unprepare(clki->clk);
8271 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8272 clki->name, on ? "en" : "dis");
8277 * vendor specific setup_clocks ops may depend on clocks managed by
8278 * this standard driver hence call the vendor specific setup_clocks
8279 * after enabling the clocks managed here.
8282 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8288 * call vendor specific bus vote to remove the vote after
8289 * disabling the clocks.
8292 ret = ufshcd_vops_set_bus_vote(hba, on);
8297 /* Can't do much if this fails */
8298 (void) ufshcd_vops_set_bus_vote(hba, false);
8299 list_for_each_entry(clki, head, list) {
8300 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8301 clk_disable_unprepare(clki->clk);
8303 } else if (!ret && on) {
8304 spin_lock_irqsave(hba->host->host_lock, flags);
8305 hba->clk_gating.state = CLKS_ON;
8306 trace_ufshcd_clk_gating(dev_name(hba->dev),
8307 hba->clk_gating.state);
8308 spin_unlock_irqrestore(hba->host->host_lock, flags);
8309 /* restore the secure configuration as clocks are enabled */
8310 ufshcd_vops_update_sec_cfg(hba, true);
8313 if (clk_state_changed)
8314 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8315 (on ? "on" : "off"),
8316 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8320 static int ufshcd_enable_clocks(struct ufs_hba *hba)
8322 return ufshcd_setup_clocks(hba, true, false, false);
8325 static int ufshcd_disable_clocks(struct ufs_hba *hba,
8326 bool is_gating_context)
8328 return ufshcd_setup_clocks(hba, false, false, is_gating_context);
8331 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
8332 bool is_gating_context)
8334 return ufshcd_setup_clocks(hba, false, true, is_gating_context);
8337 static int ufshcd_init_clocks(struct ufs_hba *hba)
8340 struct ufs_clk_info *clki;
8341 struct device *dev = hba->dev;
8342 struct list_head *head = &hba->clk_list_head;
8344 if (!head || list_empty(head))
8347 list_for_each_entry(clki, head, list) {
8351 clki->clk = devm_clk_get(dev, clki->name);
8352 if (IS_ERR(clki->clk)) {
8353 ret = PTR_ERR(clki->clk);
8354 dev_err(dev, "%s: %s clk get failed, %d\n",
8355 __func__, clki->name, ret);
8359 if (clki->max_freq) {
8360 ret = clk_set_rate(clki->clk, clki->max_freq);
8362 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8363 __func__, clki->name,
8364 clki->max_freq, ret);
8367 clki->curr_freq = clki->max_freq;
8369 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8370 clki->name, clk_get_rate(clki->clk));
8376 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8380 if (!hba->var || !hba->var->vops)
8383 err = ufshcd_vops_init(hba);
8387 err = ufshcd_vops_setup_regulators(hba, true);
8394 ufshcd_vops_exit(hba);
8397 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8398 __func__, ufshcd_get_var_name(hba), err);
8402 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8404 if (!hba->var || !hba->var->vops)
8407 ufshcd_vops_setup_regulators(hba, false);
8409 ufshcd_vops_exit(hba);
8412 static int ufshcd_hba_init(struct ufs_hba *hba)
8417 * Handle host controller power separately from the UFS device power
8418 * rails as it will help controlling the UFS host controller power
8419 * collapse easily which is different than UFS device power collapse.
8420 * Also, enable the host controller power before we go ahead with rest
8421 * of the initialization here.
8423 err = ufshcd_init_hba_vreg(hba);
8427 err = ufshcd_setup_hba_vreg(hba, true);
8431 err = ufshcd_init_clocks(hba);
8433 goto out_disable_hba_vreg;
8435 err = ufshcd_enable_clocks(hba);
8437 goto out_disable_hba_vreg;
8439 err = ufshcd_init_vreg(hba);
8441 goto out_disable_clks;
8443 err = ufshcd_setup_vreg(hba, true);
8445 goto out_disable_clks;
8447 err = ufshcd_variant_hba_init(hba);
8449 goto out_disable_vreg;
8451 hba->is_powered = true;
8455 ufshcd_setup_vreg(hba, false);
8457 ufshcd_disable_clocks(hba, false);
8458 out_disable_hba_vreg:
8459 ufshcd_setup_hba_vreg(hba, false);
8464 static void ufshcd_hba_exit(struct ufs_hba *hba)
8466 if (hba->is_powered) {
8467 ufshcd_variant_hba_exit(hba);
8468 ufshcd_setup_vreg(hba, false);
8469 if (ufshcd_is_clkscaling_supported(hba)) {
8471 ufshcd_suspend_clkscaling(hba);
8472 if (hba->clk_scaling.workq)
8473 destroy_workqueue(hba->clk_scaling.workq);
8475 ufshcd_disable_clocks(hba, false);
8476 ufshcd_setup_hba_vreg(hba, false);
8477 hba->is_powered = false;
8482 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8484 unsigned char cmd[6] = {REQUEST_SENSE,
8488 UFSHCD_REQ_SENSE_SIZE,
8493 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
8499 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
8500 UFSHCD_REQ_SENSE_SIZE, NULL,
8501 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
8503 pr_err("%s: failed with err %d\n", __func__, ret);
8511 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8513 * @hba: per adapter instance
8514 * @pwr_mode: device power mode to set
8516 * Returns 0 if requested power mode is set successfully
8517 * Returns non-zero if failed to set the requested power mode
8519 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8520 enum ufs_dev_pwr_mode pwr_mode)
8522 unsigned char cmd[6] = { START_STOP };
8523 struct scsi_sense_hdr sshdr;
8524 struct scsi_device *sdp;
8525 unsigned long flags;
8528 spin_lock_irqsave(hba->host->host_lock, flags);
8529 sdp = hba->sdev_ufs_device;
8531 ret = scsi_device_get(sdp);
8532 if (!ret && !scsi_device_online(sdp)) {
8534 scsi_device_put(sdp);
8539 spin_unlock_irqrestore(hba->host->host_lock, flags);
8545 * If scsi commands fail, the scsi mid-layer schedules scsi error-
8546 * handling, which would wait for host to be resumed. Since we know
8547 * we are functional while we are here, skip host resume in error
8550 hba->host->eh_noresume = 1;
8551 if (hba->wlun_dev_clr_ua) {
8552 ret = ufshcd_send_request_sense(hba, sdp);
8555 /* Unit attention condition is cleared now */
8556 hba->wlun_dev_clr_ua = false;
8559 cmd[4] = pwr_mode << 4;
8562 * Current function would be generally called from the power management
8563 * callbacks hence set the REQ_PM flag so that it doesn't resume the
8564 * already suspended childs.
8566 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
8567 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
8569 sdev_printk(KERN_WARNING, sdp,
8570 "START_STOP failed for power mode: %d, result %x\n",
8572 if (driver_byte(ret) & DRIVER_SENSE)
8573 scsi_print_sense_hdr(sdp, NULL, &sshdr);
8577 hba->curr_dev_pwr_mode = pwr_mode;
8579 scsi_device_put(sdp);
8580 hba->host->eh_noresume = 0;
8584 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8585 enum uic_link_state req_link_state,
8586 int check_for_bkops)
8590 if (req_link_state == hba->uic_link_state)
8593 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8594 ret = ufshcd_uic_hibern8_enter(hba);
8596 ufshcd_set_link_hibern8(hba);
8601 * If autobkops is enabled, link can't be turned off because
8602 * turning off the link would also turn off the device.
8604 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8605 (!check_for_bkops || (check_for_bkops &&
8606 !hba->auto_bkops_enabled))) {
8608 * Let's make sure that link is in low power mode, we are doing
8609 * this currently by putting the link in Hibern8. Otherway to
8610 * put the link in low power mode is to send the DME end point
8611 * to device and then send the DME reset command to local
8612 * unipro. But putting the link in hibern8 is much faster.
8614 ret = ufshcd_uic_hibern8_enter(hba);
8618 * Change controller state to "reset state" which
8619 * should also put the link in off/reset state
8621 ufshcd_hba_stop(hba, true);
8623 * TODO: Check if we need any delay to make sure that
8624 * controller is reset
8626 ufshcd_set_link_off(hba);
8633 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8636 * It seems some UFS devices may keep drawing more than sleep current
8637 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8638 * To avoid this situation, add 2ms delay before putting these UFS
8639 * rails in LPM mode.
8641 if (!ufshcd_is_link_active(hba))
8642 usleep_range(2000, 2100);
8645 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8648 * If UFS device and link is in OFF state, all power supplies (VCC,
8649 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8650 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8651 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8653 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8654 * in low power state which would save some power.
8656 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8657 !hba->dev_info.is_lu_power_on_wp) {
8658 ufshcd_setup_vreg(hba, false);
8659 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8660 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8661 if (!ufshcd_is_link_active(hba)) {
8662 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8663 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8668 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8672 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8673 !hba->dev_info.is_lu_power_on_wp) {
8674 ret = ufshcd_setup_vreg(hba, true);
8675 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8676 if (!ret && !ufshcd_is_link_active(hba)) {
8677 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8680 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8684 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8689 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8691 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8696 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8698 if (ufshcd_is_link_off(hba) ||
8699 (ufshcd_is_link_hibern8(hba)
8700 && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
8701 ufshcd_setup_hba_vreg(hba, false);
8704 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8706 if (ufshcd_is_link_off(hba) ||
8707 (ufshcd_is_link_hibern8(hba)
8708 && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
8709 ufshcd_setup_hba_vreg(hba, true);
8713 * ufshcd_suspend - helper function for suspend operations
8714 * @hba: per adapter instance
8715 * @pm_op: desired low power operation type
8717 * This function will try to put the UFS device and link into low power
8718 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8719 * (System PM level).
8721 * If this function is called during shutdown, it will make sure that
8722 * both UFS device and UFS link is powered off.
8724 * NOTE: UFS device & link must be active before we enter in this function.
8726 * Returns 0 for success and non-zero for failure
8728 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8731 enum ufs_pm_level pm_lvl;
8732 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8733 enum uic_link_state req_link_state;
8735 hba->pm_op_in_progress = 1;
8736 if (!ufshcd_is_shutdown_pm(pm_op)) {
8737 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8738 hba->rpm_lvl : hba->spm_lvl;
8739 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8740 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8742 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8743 req_link_state = UIC_LINK_OFF_STATE;
8747 * If we can't transition into any of the low power modes
8748 * just gate the clocks.
8750 WARN_ON(hba->hibern8_on_idle.is_enabled &&
8751 hba->hibern8_on_idle.active_reqs);
8752 ufshcd_hold_all(hba);
8753 hba->clk_gating.is_suspended = true;
8754 hba->hibern8_on_idle.is_suspended = true;
8756 if (hba->clk_scaling.is_allowed) {
8757 cancel_work_sync(&hba->clk_scaling.suspend_work);
8758 cancel_work_sync(&hba->clk_scaling.resume_work);
8759 ufshcd_suspend_clkscaling(hba);
8762 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8763 req_link_state == UIC_LINK_ACTIVE_STATE) {
8767 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8768 (req_link_state == hba->uic_link_state))
8771 /* UFS device & link must be active before we enter in this function */
8772 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8777 if (ufshcd_is_runtime_pm(pm_op)) {
8778 if (ufshcd_can_autobkops_during_suspend(hba)) {
8780 * The device is idle with no requests in the queue,
8781 * allow background operations if bkops status shows
8782 * that performance might be impacted.
8784 ret = ufshcd_urgent_bkops(hba);
8788 /* make sure that auto bkops is disabled */
8789 ufshcd_disable_auto_bkops(hba);
8793 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
8794 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8795 !ufshcd_is_runtime_pm(pm_op))) {
8796 /* ensure that bkops is disabled */
8797 ufshcd_disable_auto_bkops(hba);
8798 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8803 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8805 goto set_dev_active;
8807 if (ufshcd_is_link_hibern8(hba) &&
8808 ufshcd_is_hibern8_on_idle_allowed(hba))
8809 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8811 ufshcd_vreg_set_lpm(hba);
8815 * Call vendor specific suspend callback. As these callbacks may access
8816 * vendor specific host controller register space call them before the
8817 * host clocks are ON.
8819 ret = ufshcd_vops_suspend(hba, pm_op);
8821 goto set_link_active;
8823 if (!ufshcd_is_link_active(hba))
8824 ret = ufshcd_disable_clocks(hba, false);
8826 /* If link is active, device ref_clk can't be switched off */
8827 ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
8829 goto set_link_active;
8831 if (ufshcd_is_clkgating_allowed(hba)) {
8832 hba->clk_gating.state = CLKS_OFF;
8833 trace_ufshcd_clk_gating(dev_name(hba->dev),
8834 hba->clk_gating.state);
8837 * Disable the host irq as host controller as there won't be any
8838 * host controller transaction expected till resume.
8840 ufshcd_disable_irq(hba);
8841 /* Put the host controller in low power mode if possible */
8842 ufshcd_hba_vreg_set_lpm(hba);
8846 if (hba->clk_scaling.is_allowed)
8847 ufshcd_resume_clkscaling(hba);
8848 ufshcd_vreg_set_hpm(hba);
8849 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
8850 ufshcd_set_link_active(hba);
8851 } else if (ufshcd_is_link_off(hba)) {
8852 ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
8853 ufshcd_host_reset_and_restore(hba);
8856 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8857 ufshcd_disable_auto_bkops(hba);
8859 if (hba->clk_scaling.is_allowed)
8860 ufshcd_resume_clkscaling(hba);
8861 hba->hibern8_on_idle.is_suspended = false;
8862 hba->clk_gating.is_suspended = false;
8863 ufshcd_release_all(hba);
8865 hba->pm_op_in_progress = 0;
8868 ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
8874 * ufshcd_resume - helper function for resume operations
8875 * @hba: per adapter instance
8876 * @pm_op: runtime PM or system PM
8878 * This function basically brings the UFS device, UniPro link and controller
8881 * Returns 0 for success and non-zero for failure
8883 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8886 enum uic_link_state old_link_state;
8888 hba->pm_op_in_progress = 1;
8889 old_link_state = hba->uic_link_state;
8891 ufshcd_hba_vreg_set_hpm(hba);
8892 /* Make sure clocks are enabled before accessing controller */
8893 ret = ufshcd_enable_clocks(hba);
8897 /* enable the host irq as host controller would be active soon */
8898 ufshcd_enable_irq(hba);
8900 ret = ufshcd_vreg_set_hpm(hba);
8902 goto disable_irq_and_vops_clks;
8905 * Call vendor specific resume callback. As these callbacks may access
8906 * vendor specific host controller register space call them when the
8907 * host clocks are ON.
8909 ret = ufshcd_vops_resume(hba, pm_op);
8913 if (ufshcd_is_link_hibern8(hba)) {
8914 ret = ufshcd_uic_hibern8_exit(hba);
8916 ufshcd_set_link_active(hba);
8917 if (ufshcd_is_hibern8_on_idle_allowed(hba))
8918 hba->hibern8_on_idle.state = HIBERN8_EXITED;
8920 goto vendor_suspend;
8922 } else if (ufshcd_is_link_off(hba)) {
8924 * A full initialization of the host and the device is required
8925 * since the link was put to off during suspend.
8927 ret = ufshcd_reset_and_restore(hba);
8929 * ufshcd_reset_and_restore() should have already
8930 * set the link state as active
8932 if (ret || !ufshcd_is_link_active(hba))
8933 goto vendor_suspend;
8934 /* mark link state as hibern8 exited */
8935 if (ufshcd_is_hibern8_on_idle_allowed(hba))
8936 hba->hibern8_on_idle.state = HIBERN8_EXITED;
8939 if (!ufshcd_is_ufs_dev_active(hba)) {
8940 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8942 goto set_old_link_state;
8945 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8946 ufshcd_enable_auto_bkops(hba);
8949 * If BKOPs operations are urgently needed at this moment then
8950 * keep auto-bkops enabled or else disable it.
8952 ufshcd_urgent_bkops(hba);
8954 hba->clk_gating.is_suspended = false;
8955 hba->hibern8_on_idle.is_suspended = false;
8957 if (hba->clk_scaling.is_allowed)
8958 ufshcd_resume_clkscaling(hba);
8960 /* Schedule clock gating in case of no access to UFS device yet */
8961 ufshcd_release_all(hba);
8965 ufshcd_link_state_transition(hba, old_link_state, 0);
8966 if (ufshcd_is_link_hibern8(hba) &&
8967 ufshcd_is_hibern8_on_idle_allowed(hba))
8968 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8970 ufshcd_vops_suspend(hba, pm_op);
8972 ufshcd_vreg_set_lpm(hba);
8973 disable_irq_and_vops_clks:
8974 ufshcd_disable_irq(hba);
8975 if (hba->clk_scaling.is_allowed)
8976 ufshcd_suspend_clkscaling(hba);
8977 ufshcd_disable_clocks(hba, false);
8978 if (ufshcd_is_clkgating_allowed(hba))
8979 hba->clk_gating.state = CLKS_OFF;
8981 hba->pm_op_in_progress = 0;
8984 ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
8990 * ufshcd_system_suspend - system suspend routine
8991 * @hba: per adapter instance
8992 * @pm_op: runtime PM or system PM
8994 * Check the description of ufshcd_suspend() function for more details.
8996 * Returns 0 for success and non-zero for failure
8998 int ufshcd_system_suspend(struct ufs_hba *hba)
9001 ktime_t start = ktime_get();
9003 if (!hba || !hba->is_powered)
9006 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
9007 hba->curr_dev_pwr_mode) &&
9008 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
9009 hba->uic_link_state))
9012 if (pm_runtime_suspended(hba->dev)) {
9014 * UFS device and/or UFS link low power states during runtime
9015 * suspend seems to be different than what is expected during
9016 * system suspend. Hence runtime resume the devic & link and
9017 * let the system suspend low power states to take effect.
9018 * TODO: If resume takes longer time, we might have optimize
9019 * it in future by not resuming everything if possible.
9021 ret = ufshcd_runtime_resume(hba);
9026 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
9028 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9029 ktime_to_us(ktime_sub(ktime_get(), start)),
9030 hba->curr_dev_pwr_mode, hba->uic_link_state);
9032 hba->is_sys_suspended = true;
9035 EXPORT_SYMBOL(ufshcd_system_suspend);
9038 * ufshcd_system_resume - system resume routine
9039 * @hba: per adapter instance
9041 * Returns 0 for success and non-zero for failure
9044 int ufshcd_system_resume(struct ufs_hba *hba)
9047 ktime_t start = ktime_get();
9052 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
9054 * Let the runtime resume take care of resuming
9055 * if runtime suspended.
9059 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
9061 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9062 ktime_to_us(ktime_sub(ktime_get(), start)),
9063 hba->curr_dev_pwr_mode, hba->uic_link_state);
9066 EXPORT_SYMBOL(ufshcd_system_resume);
9069 * ufshcd_runtime_suspend - runtime suspend routine
9070 * @hba: per adapter instance
9072 * Check the description of ufshcd_suspend() function for more details.
9074 * Returns 0 for success and non-zero for failure
9076 int ufshcd_runtime_suspend(struct ufs_hba *hba)
9079 ktime_t start = ktime_get();
9084 if (!hba->is_powered)
9087 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
9089 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9090 ktime_to_us(ktime_sub(ktime_get(), start)),
9091 hba->curr_dev_pwr_mode,
9092 hba->uic_link_state);
9096 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9099 * ufshcd_runtime_resume - runtime resume routine
9100 * @hba: per adapter instance
9102 * This function basically brings the UFS device, UniPro link and controller
9103 * to active state. Following operations are done in this function:
9105 * 1. Turn on all the controller related clocks
9106 * 2. Bring the UniPro link out of Hibernate state
9107 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9109 * 4. If auto-bkops is enabled on the device, disable it.
9111 * So following would be the possible power state after this function return
9113 * S1: UFS device in Active state with VCC rail ON
9114 * UniPro link in Active state
9115 * All the UFS/UniPro controller clocks are ON
9117 * Returns 0 for success and non-zero for failure
9119 int ufshcd_runtime_resume(struct ufs_hba *hba)
9122 ktime_t start = ktime_get();
9127 if (!hba->is_powered)
9130 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9132 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9133 ktime_to_us(ktime_sub(ktime_get(), start)),
9134 hba->curr_dev_pwr_mode,
9135 hba->uic_link_state);
9138 EXPORT_SYMBOL(ufshcd_runtime_resume);
9140 int ufshcd_runtime_idle(struct ufs_hba *hba)
9144 EXPORT_SYMBOL(ufshcd_runtime_idle);
9146 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
9147 struct device_attribute *attr,
9148 const char *buf, size_t count,
9151 struct ufs_hba *hba = dev_get_drvdata(dev);
9152 unsigned long flags, value;
9154 if (kstrtoul(buf, 0, &value))
9157 if (value >= UFS_PM_LVL_MAX)
9160 spin_lock_irqsave(hba->host->host_lock, flags);
9162 hba->rpm_lvl = value;
9164 hba->spm_lvl = value;
9165 ufshcd_apply_pm_quirks(hba);
9166 spin_unlock_irqrestore(hba->host->host_lock, flags);
9170 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
9171 struct device_attribute *attr, char *buf)
9173 struct ufs_hba *hba = dev_get_drvdata(dev);
9177 curr_len = snprintf(buf, PAGE_SIZE,
9178 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
9180 ufschd_ufs_dev_pwr_mode_to_string(
9181 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
9182 ufschd_uic_link_state_to_string(
9183 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
9185 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9186 "\nAll available Runtime PM levels info:\n");
9187 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9188 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9189 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
9191 ufschd_ufs_dev_pwr_mode_to_string(
9192 ufs_pm_lvl_states[lvl].dev_state),
9193 ufschd_uic_link_state_to_string(
9194 ufs_pm_lvl_states[lvl].link_state));
9199 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
9200 struct device_attribute *attr, const char *buf, size_t count)
9202 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
9205 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
9207 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
9208 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
9209 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
9210 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
9211 hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9212 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
9213 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
9216 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
9217 struct device_attribute *attr, char *buf)
9219 struct ufs_hba *hba = dev_get_drvdata(dev);
9223 curr_len = snprintf(buf, PAGE_SIZE,
9224 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
9226 ufschd_ufs_dev_pwr_mode_to_string(
9227 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
9228 ufschd_uic_link_state_to_string(
9229 ufs_pm_lvl_states[hba->spm_lvl].link_state));
9231 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9232 "\nAll available System PM levels info:\n");
9233 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9234 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9235 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
9237 ufschd_ufs_dev_pwr_mode_to_string(
9238 ufs_pm_lvl_states[lvl].dev_state),
9239 ufschd_uic_link_state_to_string(
9240 ufs_pm_lvl_states[lvl].link_state));
9245 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
9246 struct device_attribute *attr, const char *buf, size_t count)
9248 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
9251 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
9253 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
9254 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
9255 sysfs_attr_init(&hba->spm_lvl_attr.attr);
9256 hba->spm_lvl_attr.attr.name = "spm_lvl";
9257 hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9258 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
9259 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
9262 static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
9263 enum desc_idn desc_id,
9269 u8 desc_buf[8] = {0};
9275 pm_runtime_get_sync(hba->dev);
9276 ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
9277 param_offset, desc_buf, param_size);
9278 pm_runtime_put_sync(hba->dev);
9282 switch (param_size) {
9284 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%02X\n", *desc_buf);
9287 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%04X\n",
9288 get_unaligned_be16(desc_buf));
9291 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%08X\n",
9292 get_unaligned_be32(desc_buf));
9295 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%016llX\n",
9296 get_unaligned_be64(desc_buf));
9304 #define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
9305 static ssize_t _name##_show(struct device *dev, \
9306 struct device_attribute *attr, char *buf) \
9308 struct ufs_hba *hba = dev_get_drvdata(dev); \
9309 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
9310 0, _duname##_DESC_PARAM##_puname, buf, _size); \
9312 static DEVICE_ATTR_RO(_name)
9314 #define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
9315 UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
9317 UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
9318 UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
9319 UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
9321 static struct attribute *ufs_sysfs_health_descriptor[] = {
9322 &dev_attr_eol_info.attr,
9323 &dev_attr_life_time_estimation_a.attr,
9324 &dev_attr_life_time_estimation_b.attr,
9328 static const struct attribute_group ufs_sysfs_health_descriptor_group = {
9329 .name = "health_descriptor",
9330 .attrs = ufs_sysfs_health_descriptor,
9333 static const struct attribute_group *ufs_sysfs_groups[] = {
9334 &ufs_sysfs_health_descriptor_group,
9339 static void ufshcd_add_desc_sysfs_nodes(struct device *dev)
9343 ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
9346 "%s: sysfs groups creation failed (err = %d)\n",
9350 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
9352 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
9353 ufshcd_add_spm_lvl_sysfs_nodes(hba);
9354 ufshcd_add_desc_sysfs_nodes(hba->dev);
9357 static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
9359 bool suspend = false;
9360 unsigned long flags;
9362 spin_lock_irqsave(hba->host->host_lock, flags);
9363 if (hba->clk_scaling.is_allowed) {
9364 hba->clk_scaling.is_allowed = false;
9367 spin_unlock_irqrestore(hba->host->host_lock, flags);
9370 * Scaling may be scheduled before, hence make sure it
9371 * doesn't race with shutdown
9373 if (ufshcd_is_clkscaling_supported(hba)) {
9374 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9375 cancel_work_sync(&hba->clk_scaling.suspend_work);
9376 cancel_work_sync(&hba->clk_scaling.resume_work);
9378 ufshcd_suspend_clkscaling(hba);
9381 /* Unregister so that devfreq_monitor can't race with shutdown */
9383 devfreq_remove_device(hba->devfreq);
9387 * ufshcd_shutdown - shutdown routine
9388 * @hba: per adapter instance
9390 * This function would power off both UFS device and UFS link.
9392 * Returns 0 always to allow force shutdown even in case of errors.
9394 int ufshcd_shutdown(struct ufs_hba *hba)
9398 if (!hba->is_powered)
9401 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9404 pm_runtime_get_sync(hba->dev);
9405 ufshcd_hold_all(hba);
9406 ufshcd_mark_shutdown_ongoing(hba);
9407 ufshcd_shutdown_clkscaling(hba);
9409 * (1) Acquire the lock to stop any more requests
9410 * (2) Wait for all issued requests to complete
9412 ufshcd_get_write_lock(hba);
9413 ufshcd_scsi_block_requests(hba);
9414 ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
9416 dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
9418 /* Requests may have errored out above, let it be handled */
9419 flush_work(&hba->eh_work);
9420 /* reqs issued from contexts other than shutdown will fail from now */
9421 ufshcd_scsi_unblock_requests(hba);
9422 ufshcd_release_all(hba);
9423 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9426 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9427 /* allow force shutdown even in case of errors */
9430 EXPORT_SYMBOL(ufshcd_shutdown);
9433 * Values permitted 0, 1, 2.
9434 * 0 -> Disable IO latency histograms (default)
9435 * 1 -> Enable IO latency histograms
9436 * 2 -> Zero out IO latency histograms
9439 latency_hist_store(struct device *dev, struct device_attribute *attr,
9440 const char *buf, size_t count)
9442 struct ufs_hba *hba = dev_get_drvdata(dev);
9445 if (kstrtol(buf, 0, &value))
9447 if (value == BLK_IO_LAT_HIST_ZERO) {
9448 memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
9449 memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
9450 } else if (value == BLK_IO_LAT_HIST_ENABLE ||
9451 value == BLK_IO_LAT_HIST_DISABLE)
9452 hba->latency_hist_enabled = value;
9457 latency_hist_show(struct device *dev, struct device_attribute *attr,
9460 struct ufs_hba *hba = dev_get_drvdata(dev);
9461 size_t written_bytes;
9463 written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
9465 written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
9466 buf + written_bytes, PAGE_SIZE - written_bytes);
9468 return written_bytes;
9471 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
9472 latency_hist_show, latency_hist_store);
9475 ufshcd_init_latency_hist(struct ufs_hba *hba)
9477 if (device_create_file(hba->dev, &dev_attr_latency_hist))
9478 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
9482 ufshcd_exit_latency_hist(struct ufs_hba *hba)
9484 device_create_file(hba->dev, &dev_attr_latency_hist);
9488 * ufshcd_remove - de-allocate SCSI host and host memory space
9489 * data structure memory
9490 * @hba - per adapter instance
9492 void ufshcd_remove(struct ufs_hba *hba)
9494 scsi_remove_host(hba->host);
9495 /* disable interrupts */
9496 ufshcd_disable_intr(hba, hba->intr_mask);
9497 ufshcd_hba_stop(hba, true);
9499 ufshcd_exit_clk_gating(hba);
9500 ufshcd_exit_hibern8_on_idle(hba);
9501 if (ufshcd_is_clkscaling_supported(hba)) {
9502 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9503 ufshcd_exit_latency_hist(hba);
9504 devfreq_remove_device(hba->devfreq);
9506 ufshcd_hba_exit(hba);
9507 ufsdbg_remove_debugfs(hba);
9509 EXPORT_SYMBOL_GPL(ufshcd_remove);
9512 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9513 * @hba: pointer to Host Bus Adapter (HBA)
9515 void ufshcd_dealloc_host(struct ufs_hba *hba)
9517 scsi_host_put(hba->host);
9519 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9522 * ufshcd_set_dma_mask - Set dma mask based on the controller
9523 * addressing capability
9524 * @hba: per adapter instance
9526 * Returns 0 for success, non-zero for failure
9528 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9530 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9531 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9534 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9538 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9539 * @dev: pointer to device handle
9540 * @hba_handle: driver private handle
9541 * Returns 0 on success, non-zero value on failure
9543 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9545 struct Scsi_Host *host;
9546 struct ufs_hba *hba;
9551 "Invalid memory reference for dev is NULL\n");
9556 host = scsi_host_alloc(&ufshcd_driver_template,
9557 sizeof(struct ufs_hba));
9559 dev_err(dev, "scsi_host_alloc failed\n");
9563 hba = shost_priv(host);
9571 EXPORT_SYMBOL(ufshcd_alloc_host);
9574 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
9575 * @hba: per adapter instance
9576 * @scale_up: True if scaling up and false if scaling down
9578 * Returns true if scaling is required, false otherwise.
9580 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
9583 struct ufs_clk_info *clki;
9584 struct list_head *head = &hba->clk_list_head;
9586 if (!head || list_empty(head))
9589 list_for_each_entry(clki, head, list) {
9590 if (!IS_ERR_OR_NULL(clki->clk)) {
9591 if (scale_up && clki->max_freq) {
9592 if (clki->curr_freq == clki->max_freq)
9595 } else if (!scale_up && clki->min_freq) {
9596 if (clki->curr_freq == clki->min_freq)
9607 * ufshcd_scale_gear - scale up/down UFS gear
9608 * @hba: per adapter instance
9609 * @scale_up: True for scaling up gear and false for scaling down
9611 * Returns 0 for success,
9612 * Returns -EBUSY if scaling can't happen at this time
9613 * Returns non-zero for any other errors
9615 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
9618 struct ufs_pa_layer_attr new_pwr_info;
9619 u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
9621 BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
9624 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
9625 sizeof(struct ufs_pa_layer_attr));
9627 * Some UFS devices may stop responding after switching from
9628 * HS-G1 to HS-G3. Also, it is found that these devices work
9629 * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
9630 * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
9631 * quirk is enabled for such devices, this 2 steps gear switch
9632 * workaround will be applied.
9634 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
9635 && (hba->pwr_info.gear_tx == UFS_HS_G1)
9636 && (new_pwr_info.gear_tx == UFS_HS_G3)) {
9637 /* scale up to G2 first */
9638 new_pwr_info.gear_tx = UFS_HS_G2;
9639 new_pwr_info.gear_rx = UFS_HS_G2;
9640 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9644 /* scale up to G3 now */
9645 new_pwr_info.gear_tx = UFS_HS_G3;
9646 new_pwr_info.gear_rx = UFS_HS_G3;
9647 /* now, fall through to set the HS-G3 */
9649 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9653 memcpy(&new_pwr_info, &hba->pwr_info,
9654 sizeof(struct ufs_pa_layer_attr));
9656 if (hba->pwr_info.gear_tx > scale_down_gear
9657 || hba->pwr_info.gear_rx > scale_down_gear) {
9658 /* save the current power mode */
9659 memcpy(&hba->clk_scaling.saved_pwr_info.info,
9661 sizeof(struct ufs_pa_layer_attr));
9663 /* scale down gear */
9664 new_pwr_info.gear_tx = scale_down_gear;
9665 new_pwr_info.gear_rx = scale_down_gear;
9666 if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
9667 new_pwr_info.pwr_tx = FASTAUTO_MODE;
9668 new_pwr_info.pwr_rx = FASTAUTO_MODE;
9671 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9676 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
9678 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
9679 new_pwr_info.gear_tx, new_pwr_info.gear_rx,
9685 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
9687 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
9690 * make sure that there are no outstanding requests when
9691 * clock scaling is in progress
9693 ufshcd_scsi_block_requests(hba);
9694 down_write(&hba->lock);
9695 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
9697 up_write(&hba->lock);
9698 ufshcd_scsi_unblock_requests(hba);
9704 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
9706 up_write(&hba->lock);
9707 ufshcd_scsi_unblock_requests(hba);
9711 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
9712 * @hba: per adapter instance
9713 * @scale_up: True for scaling up and false for scalin down
9715 * Returns 0 for success,
9716 * Returns -EBUSY if scaling can't happen at this time
9717 * Returns non-zero for any other errors
9719 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
9723 /* let's not get into low power until clock scaling is completed */
9724 hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
9725 ufshcd_hold_all(hba);
9727 ret = ufshcd_clock_scaling_prepare(hba);
9731 /* scale down the gear before scaling down clocks */
9733 ret = ufshcd_scale_gear(hba, false);
9735 goto clk_scaling_unprepare;
9739 * If auto hibern8 is supported then put the link in
9740 * hibern8 manually, this is to avoid auto hibern8
9741 * racing during clock frequency scaling sequence.
9743 if (ufshcd_is_auto_hibern8_supported(hba)) {
9744 ret = ufshcd_uic_hibern8_enter(hba);
9746 /* link will be bad state so no need to scale_up_gear */
9750 ret = ufshcd_scale_clks(hba, scale_up);
9754 if (ufshcd_is_auto_hibern8_supported(hba)) {
9755 ret = ufshcd_uic_hibern8_exit(hba);
9757 /* link will be bad state so no need to scale_up_gear */
9761 /* scale up the gear after scaling up clocks */
9763 ret = ufshcd_scale_gear(hba, true);
9765 ufshcd_scale_clks(hba, false);
9766 goto clk_scaling_unprepare;
9771 hba->clk_scaling.is_scaled_up = scale_up;
9773 hba->clk_gating.delay_ms =
9774 hba->clk_gating.delay_ms_perf;
9776 hba->clk_gating.delay_ms =
9777 hba->clk_gating.delay_ms_pwr_save;
9780 goto clk_scaling_unprepare;
9784 ufshcd_scale_gear(hba, true);
9785 clk_scaling_unprepare:
9786 ufshcd_clock_scaling_unprepare(hba);
9788 hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
9789 ufshcd_release_all(hba);
9793 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9795 unsigned long flags;
9797 devfreq_suspend_device(hba->devfreq);
9798 spin_lock_irqsave(hba->host->host_lock, flags);
9799 hba->clk_scaling.window_start_t = 0;
9800 spin_unlock_irqrestore(hba->host->host_lock, flags);
9803 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9805 unsigned long flags;
9806 bool suspend = false;
9808 if (!ufshcd_is_clkscaling_supported(hba))
9811 spin_lock_irqsave(hba->host->host_lock, flags);
9812 if (!hba->clk_scaling.is_suspended) {
9814 hba->clk_scaling.is_suspended = true;
9816 spin_unlock_irqrestore(hba->host->host_lock, flags);
9819 __ufshcd_suspend_clkscaling(hba);
9822 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
9824 unsigned long flags;
9825 bool resume = false;
9827 if (!ufshcd_is_clkscaling_supported(hba))
9830 spin_lock_irqsave(hba->host->host_lock, flags);
9831 if (hba->clk_scaling.is_suspended) {
9833 hba->clk_scaling.is_suspended = false;
9835 spin_unlock_irqrestore(hba->host->host_lock, flags);
9838 devfreq_resume_device(hba->devfreq);
9841 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
9842 struct device_attribute *attr, char *buf)
9844 struct ufs_hba *hba = dev_get_drvdata(dev);
9846 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
9849 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
9850 struct device_attribute *attr, const char *buf, size_t count)
9852 struct ufs_hba *hba = dev_get_drvdata(dev);
9856 if (kstrtou32(buf, 0, &value))
9860 if (value == hba->clk_scaling.is_allowed)
9863 pm_runtime_get_sync(hba->dev);
9864 ufshcd_hold(hba, false);
9866 cancel_work_sync(&hba->clk_scaling.suspend_work);
9867 cancel_work_sync(&hba->clk_scaling.resume_work);
9869 hba->clk_scaling.is_allowed = value;
9872 ufshcd_resume_clkscaling(hba);
9874 ufshcd_suspend_clkscaling(hba);
9875 err = ufshcd_devfreq_scale(hba, true);
9877 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
9881 ufshcd_release(hba, false);
9882 pm_runtime_put_sync(hba->dev);
9887 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
9889 struct ufs_hba *hba = container_of(work, struct ufs_hba,
9890 clk_scaling.suspend_work);
9891 unsigned long irq_flags;
9893 spin_lock_irqsave(hba->host->host_lock, irq_flags);
9894 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
9895 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9898 hba->clk_scaling.is_suspended = true;
9899 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9901 __ufshcd_suspend_clkscaling(hba);
9904 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
9906 struct ufs_hba *hba = container_of(work, struct ufs_hba,
9907 clk_scaling.resume_work);
9908 unsigned long irq_flags;
9910 spin_lock_irqsave(hba->host->host_lock, irq_flags);
9911 if (!hba->clk_scaling.is_suspended) {
9912 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9915 hba->clk_scaling.is_suspended = false;
9916 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9918 devfreq_resume_device(hba->devfreq);
9921 static int ufshcd_devfreq_target(struct device *dev,
9922 unsigned long *freq, u32 flags)
9925 struct ufs_hba *hba = dev_get_drvdata(dev);
9926 unsigned long irq_flags;
9928 bool scale_up, sched_clk_scaling_suspend_work = false;
9930 if (!ufshcd_is_clkscaling_supported(hba))
9933 if ((*freq > 0) && (*freq < UINT_MAX)) {
9934 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
9938 spin_lock_irqsave(hba->host->host_lock, irq_flags);
9939 if (ufshcd_eh_in_progress(hba)) {
9940 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9944 if (!hba->clk_scaling.active_reqs)
9945 sched_clk_scaling_suspend_work = true;
9947 scale_up = (*freq == UINT_MAX) ? true : false;
9948 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
9949 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9951 goto out; /* no state change required */
9953 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9955 start = ktime_get();
9956 ret = ufshcd_devfreq_scale(hba, scale_up);
9957 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
9958 (scale_up ? "up" : "down"),
9959 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9962 if (sched_clk_scaling_suspend_work)
9963 queue_work(hba->clk_scaling.workq,
9964 &hba->clk_scaling.suspend_work);
9969 static int ufshcd_devfreq_get_dev_status(struct device *dev,
9970 struct devfreq_dev_status *stat)
9972 struct ufs_hba *hba = dev_get_drvdata(dev);
9973 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
9974 unsigned long flags;
9976 if (!ufshcd_is_clkscaling_supported(hba))
9979 memset(stat, 0, sizeof(*stat));
9981 spin_lock_irqsave(hba->host->host_lock, flags);
9982 if (!scaling->window_start_t)
9985 if (scaling->is_busy_started)
9986 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
9987 scaling->busy_start_t));
9989 stat->total_time = jiffies_to_usecs((long)jiffies -
9990 (long)scaling->window_start_t);
9991 stat->busy_time = scaling->tot_busy_t;
9993 scaling->window_start_t = jiffies;
9994 scaling->tot_busy_t = 0;
9996 if (hba->outstanding_reqs) {
9997 scaling->busy_start_t = ktime_get();
9998 scaling->is_busy_started = true;
10000 scaling->busy_start_t = ktime_set(0, 0);
10001 scaling->is_busy_started = false;
10003 spin_unlock_irqrestore(hba->host->host_lock, flags);
10007 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
10009 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
10010 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
10011 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
10012 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
10013 hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
10014 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
10015 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
10018 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
10020 struct device *dev = hba->dev;
10023 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
10024 &hba->lanes_per_direction);
10027 "%s: failed to read lanes-per-direction, ret=%d\n",
10029 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
10033 * ufshcd_init - Driver initialization routine
10034 * @hba: per-adapter instance
10035 * @mmio_base: base register address
10036 * @irq: Interrupt line of device
10037 * Returns 0 on success, non-zero value on failure
10039 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10042 struct Scsi_Host *host = hba->host;
10043 struct device *dev = hba->dev;
10047 "Invalid memory reference for mmio_base is NULL\n");
10052 hba->mmio_base = mmio_base;
10055 ufshcd_init_lanes_per_dir(hba);
10057 err = ufshcd_hba_init(hba);
10061 /* Read capabilities registers */
10062 ufshcd_hba_capabilities(hba);
10064 /* Get UFS version supported by the controller */
10065 hba->ufs_version = ufshcd_get_ufs_version(hba);
10067 /* print error message if ufs_version is not valid */
10068 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
10069 (hba->ufs_version != UFSHCI_VERSION_11) &&
10070 (hba->ufs_version != UFSHCI_VERSION_20) &&
10071 (hba->ufs_version != UFSHCI_VERSION_21))
10072 dev_err(hba->dev, "invalid UFS version 0x%x\n",
10075 /* Get Interrupt bit mask per version */
10076 hba->intr_mask = ufshcd_get_intr_mask(hba);
10078 /* Enable debug prints */
10079 hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
10081 err = ufshcd_set_dma_mask(hba);
10083 dev_err(hba->dev, "set dma mask failed\n");
10087 /* Allocate memory for host memory space */
10088 err = ufshcd_memory_alloc(hba);
10090 dev_err(hba->dev, "Memory allocation failed\n");
10094 /* Configure LRB */
10095 ufshcd_host_memory_configure(hba);
10097 host->can_queue = hba->nutrs;
10098 host->cmd_per_lun = hba->nutrs;
10099 host->max_id = UFSHCD_MAX_ID;
10100 host->max_lun = UFS_MAX_LUNS;
10101 host->max_channel = UFSHCD_MAX_CHANNEL;
10102 host->unique_id = host->host_no;
10103 host->max_cmd_len = MAX_CDB_SIZE;
10104 host->set_dbd_for_caching = 1;
10106 hba->max_pwr_info.is_valid = false;
10108 /* Initailize wait queue for task management */
10109 init_waitqueue_head(&hba->tm_wq);
10110 init_waitqueue_head(&hba->tm_tag_wq);
10112 /* Initialize work queues */
10113 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10114 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10115 INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
10117 /* Initialize UIC command mutex */
10118 mutex_init(&hba->uic_cmd_mutex);
10120 /* Initialize mutex for device management commands */
10121 mutex_init(&hba->dev_cmd.lock);
10123 init_rwsem(&hba->lock);
10125 /* Initialize device management tag acquire wait queue */
10126 init_waitqueue_head(&hba->dev_cmd.tag_wq);
10128 ufshcd_init_clk_gating(hba);
10129 ufshcd_init_hibern8_on_idle(hba);
10132 * In order to avoid any spurious interrupt immediately after
10133 * registering UFS controller interrupt handler, clear any pending UFS
10134 * interrupt status and disable all the UFS interrupts.
10136 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10137 REG_INTERRUPT_STATUS);
10138 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10140 * Make sure that UFS interrupts are disabled and any pending interrupt
10141 * status is cleared before registering UFS interrupt handler.
10145 /* IRQ registration */
10146 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10148 dev_err(hba->dev, "request irq failed\n");
10151 hba->is_irq_enabled = true;
10154 err = scsi_add_host(host, hba->dev);
10156 dev_err(hba->dev, "scsi_add_host failed\n");
10160 /* Reset controller to power on reset (POR) state */
10161 ufshcd_vops_full_reset(hba);
10163 /* reset connected UFS device */
10164 err = ufshcd_reset_device(hba);
10166 dev_warn(hba->dev, "%s: device reset failed. err %d\n",
10169 /* Host controller enable */
10170 err = ufshcd_hba_enable(hba);
10172 dev_err(hba->dev, "Host controller enable failed\n");
10173 ufshcd_print_host_regs(hba);
10174 ufshcd_print_host_state(hba);
10175 goto out_remove_scsi_host;
10178 if (ufshcd_is_clkscaling_supported(hba)) {
10179 char wq_name[sizeof("ufs_clkscaling_00")];
10181 INIT_WORK(&hba->clk_scaling.suspend_work,
10182 ufshcd_clk_scaling_suspend_work);
10183 INIT_WORK(&hba->clk_scaling.resume_work,
10184 ufshcd_clk_scaling_resume_work);
10186 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
10188 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
10190 ufshcd_clkscaling_init_sysfs(hba);
10194 * If rpm_lvl and and spm_lvl are not already set to valid levels,
10195 * set the default power management level for UFS runtime and system
10196 * suspend. Default power saving mode selected is keeping UFS link in
10197 * Hibern8 state and UFS device in sleep.
10199 if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
10200 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10201 UFS_SLEEP_PWR_MODE,
10202 UIC_LINK_HIBERN8_STATE);
10203 if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
10204 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10205 UFS_SLEEP_PWR_MODE,
10206 UIC_LINK_HIBERN8_STATE);
10208 /* Hold auto suspend until async scan completes */
10209 pm_runtime_get_sync(dev);
10211 ufshcd_init_latency_hist(hba);
10214 * We are assuming that device wasn't put in sleep/power-down
10215 * state exclusively during the boot stage before kernel.
10216 * This assumption helps avoid doing link startup twice during
10217 * ufshcd_probe_hba().
10219 ufshcd_set_ufs_dev_active(hba);
10221 ufshcd_cmd_log_init(hba);
10223 async_schedule(ufshcd_async_scan, hba);
10225 ufsdbg_add_debugfs(hba);
10227 ufshcd_add_sysfs_nodes(hba);
10231 out_remove_scsi_host:
10232 scsi_remove_host(hba->host);
10234 ufshcd_exit_clk_gating(hba);
10235 ufshcd_exit_latency_hist(hba);
10237 hba->is_irq_enabled = false;
10238 ufshcd_hba_exit(hba);
10242 EXPORT_SYMBOL_GPL(ufshcd_init);
10244 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10245 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10246 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10247 MODULE_LICENSE("GPL");
10248 MODULE_VERSION(UFSHCD_DRIVER_VERSION);