OSDN Git Service

Merge "soc: qcom: glink_spi_xprt: Sanitize input for short cmd"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <scsi/ufs/ioctl.h>
42 #include <linux/devfreq.h>
43 #include <linux/nls.h>
44 #include <linux/of.h>
45 #include <linux/blkdev.h>
46
47 #include "ufshcd.h"
48 #include "ufshci.h"
49 #include "ufs_quirks.h"
50 #include "ufs-debugfs.h"
51 #include "ufs-qcom.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/ufs.h>
55
56 #ifdef CONFIG_DEBUG_FS
57
58 static int ufshcd_tag_req_type(struct request *rq)
59 {
60         int rq_type = TS_WRITE;
61
62         if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
63                 rq_type = TS_NOT_SUPPORTED;
64         else if (rq->cmd_flags & REQ_FLUSH)
65                 rq_type = TS_FLUSH;
66         else if (rq_data_dir(rq) == READ)
67                 rq_type = (rq->cmd_flags & REQ_URGENT) ?
68                         TS_URGENT_READ : TS_READ;
69         else if (rq->cmd_flags & REQ_URGENT)
70                 rq_type = TS_URGENT_WRITE;
71
72         return rq_type;
73 }
74
75 static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
76 {
77         ufsdbg_set_err_state(hba);
78         if (type < UFS_ERR_MAX)
79                 hba->ufs_stats.err_stats[type]++;
80 }
81
82 static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
83 {
84         struct request *rq =
85                 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
86         u64 **tag_stats = hba->ufs_stats.tag_stats;
87         int rq_type;
88
89         if (!hba->ufs_stats.enabled)
90                 return;
91
92         tag_stats[tag][TS_TAG]++;
93         if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
94                 return;
95
96         WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
97         rq_type = ufshcd_tag_req_type(rq);
98         if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
99                 tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
100 }
101
102 static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
103                 struct scsi_cmnd *cmd)
104 {
105         struct request *rq = cmd ? cmd->request : NULL;
106
107         if (rq && rq->cmd_type & REQ_TYPE_FS)
108                 hba->ufs_stats.q_depth--;
109 }
110
111 static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
112 {
113         int rq_type;
114         struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
115         s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
116                 lrbp->issue_time_stamp);
117
118         /* update general request statistics */
119         if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
120                 hba->ufs_stats.req_stats[TS_TAG].min = delta;
121         hba->ufs_stats.req_stats[TS_TAG].count++;
122         hba->ufs_stats.req_stats[TS_TAG].sum += delta;
123         if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
124                 hba->ufs_stats.req_stats[TS_TAG].max = delta;
125         if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
126                         hba->ufs_stats.req_stats[TS_TAG].min = delta;
127
128         rq_type = ufshcd_tag_req_type(rq);
129         if (rq_type == TS_NOT_SUPPORTED)
130                 return;
131
132         /* update request type specific statistics */
133         if (hba->ufs_stats.req_stats[rq_type].count == 0)
134                 hba->ufs_stats.req_stats[rq_type].min = delta;
135         hba->ufs_stats.req_stats[rq_type].count++;
136         hba->ufs_stats.req_stats[rq_type].sum += delta;
137         if (delta > hba->ufs_stats.req_stats[rq_type].max)
138                 hba->ufs_stats.req_stats[rq_type].max = delta;
139         if (delta < hba->ufs_stats.req_stats[rq_type].min)
140                         hba->ufs_stats.req_stats[rq_type].min = delta;
141 }
142
143 static void
144 ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
145 {
146         if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
147                 hba->ufs_stats.query_stats_arr[opcode][idn]++;
148 }
149
150 #else
151 static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
152 {
153 }
154
155 static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
156                 struct scsi_cmnd *cmd)
157 {
158 }
159
160 static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
161 {
162 }
163
164 static inline
165 void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
166 {
167 }
168
169 static inline
170 void ufshcd_update_query_stats(struct ufs_hba *hba,
171                                enum query_opcode opcode, u8 idn)
172 {
173 }
174 #endif
175
176 #define PWR_INFO_MASK   0xF
177 #define PWR_RX_OFFSET   4
178
179 #define UFSHCD_REQ_SENSE_SIZE   18
180
181 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
182                                  UTP_TASK_REQ_COMPL |\
183                                  UFSHCD_ERROR_MASK)
184 /* UIC command timeout, unit: ms */
185 #define UIC_CMD_TIMEOUT 500
186
187 /* NOP OUT retries waiting for NOP IN response */
188 #define NOP_OUT_RETRIES    10
189 /* Timeout after 30 msecs if NOP OUT hangs without response */
190 #define NOP_OUT_TIMEOUT    30 /* msecs */
191
192 /* Query request retries */
193 #define QUERY_REQ_RETRIES 3
194 /* Query request timeout */
195 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
196
197 /* Task management command timeout */
198 #define TM_CMD_TIMEOUT  100 /* msecs */
199
200 /* maximum number of retries for a general UIC command  */
201 #define UFS_UIC_COMMAND_RETRIES 3
202
203 /* maximum number of link-startup retries */
204 #define DME_LINKSTARTUP_RETRIES 3
205
206 /* Maximum retries for Hibern8 enter */
207 #define UIC_HIBERN8_ENTER_RETRIES 3
208
209 /* maximum number of reset retries before giving up */
210 #define MAX_HOST_RESET_RETRIES 5
211
212 /* Expose the flag value from utp_upiu_query.value */
213 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
214
215 /* Interrupt aggregation default timeout, unit: 40us */
216 #define INT_AGGR_DEF_TO 0x02
217
218 /* default value of auto suspend is 3 seconds */
219 #define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
220
221 #define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE     10
222 #define UFSHCD_CLK_GATING_DELAY_MS_PERF         50
223
224 /* IOCTL opcode for command - ufs set device read only */
225 #define UFS_IOCTL_BLKROSET      BLKROSET
226
227 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION              2
228
229 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
230         ({                                                              \
231                 int _ret;                                               \
232                 if (_on)                                                \
233                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
234                 else                                                    \
235                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
236                 _ret;                                                   \
237         })
238
239 #define ufshcd_hex_dump(prefix_str, buf, len) \
240 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
241
242 static u32 ufs_query_desc_max_size[] = {
243         QUERY_DESC_DEVICE_MAX_SIZE,
244         QUERY_DESC_CONFIGURAION_MAX_SIZE,
245         QUERY_DESC_UNIT_MAX_SIZE,
246         QUERY_DESC_RFU_MAX_SIZE,
247         QUERY_DESC_INTERCONNECT_MAX_SIZE,
248         QUERY_DESC_STRING_MAX_SIZE,
249         QUERY_DESC_RFU_MAX_SIZE,
250         QUERY_DESC_GEOMETRY_MAZ_SIZE,
251         QUERY_DESC_POWER_MAX_SIZE,
252         QUERY_DESC_RFU_MAX_SIZE,
253 };
254
255 enum {
256         UFSHCD_MAX_CHANNEL      = 0,
257         UFSHCD_MAX_ID           = 1,
258         UFSHCD_CMD_PER_LUN      = 32,
259         UFSHCD_CAN_QUEUE        = 32,
260 };
261
262 /* UFSHCD states */
263 enum {
264         UFSHCD_STATE_RESET,
265         UFSHCD_STATE_ERROR,
266         UFSHCD_STATE_OPERATIONAL,
267 };
268
269 /* UFSHCD error handling flags */
270 enum {
271         UFSHCD_EH_IN_PROGRESS = (1 << 0),
272 };
273
274 /* UFSHCD UIC layer error flags */
275 enum {
276         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
277         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
278         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
279         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
280         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
281         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
282 };
283
284 /* Interrupt configuration options */
285 enum {
286         UFSHCD_INT_DISABLE,
287         UFSHCD_INT_ENABLE,
288         UFSHCD_INT_CLEAR,
289 };
290
291 #define DEFAULT_UFSHCD_DBG_PRINT_EN     UFSHCD_DBG_PRINT_ALL
292
293 #define ufshcd_set_eh_in_progress(h) \
294         (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
295 #define ufshcd_eh_in_progress(h) \
296         (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
297 #define ufshcd_clear_eh_in_progress(h) \
298         (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
299
300 #define ufshcd_set_ufs_dev_active(h) \
301         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
302 #define ufshcd_set_ufs_dev_sleep(h) \
303         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
304 #define ufshcd_set_ufs_dev_poweroff(h) \
305         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
306 #define ufshcd_is_ufs_dev_active(h) \
307         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
308 #define ufshcd_is_ufs_dev_sleep(h) \
309         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
310 #define ufshcd_is_ufs_dev_poweroff(h) \
311         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
312
313 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
314         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
315         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
316         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
317         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
318         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
319         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
320 };
321
322 static inline enum ufs_dev_pwr_mode
323 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
324 {
325         return ufs_pm_lvl_states[lvl].dev_state;
326 }
327
328 static inline enum uic_link_state
329 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
330 {
331         return ufs_pm_lvl_states[lvl].link_state;
332 }
333
334 static inline enum ufs_pm_level
335 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
336                                         enum uic_link_state link_state)
337 {
338         enum ufs_pm_level lvl;
339
340         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
341                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
342                         (ufs_pm_lvl_states[lvl].link_state == link_state))
343                         return lvl;
344         }
345
346         /* if no match found, return the level 0 */
347         return UFS_PM_LVL_0;
348 }
349
350 static inline bool ufshcd_is_valid_pm_lvl(int lvl)
351 {
352         if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
353                 return true;
354         else
355                 return false;
356 }
357
358 static irqreturn_t ufshcd_intr(int irq, void *__hba);
359 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
360 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
361 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
362 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
363 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
364 static void ufshcd_hba_exit(struct ufs_hba *hba);
365 static int ufshcd_probe_hba(struct ufs_hba *hba);
366 static int ufshcd_enable_clocks(struct ufs_hba *hba);
367 static int ufshcd_disable_clocks(struct ufs_hba *hba,
368                                  bool is_gating_context);
369 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
370                                               bool is_gating_context);
371 static void ufshcd_hold_all(struct ufs_hba *hba);
372 static void ufshcd_release_all(struct ufs_hba *hba);
373 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
374 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
375 static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
376 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
377 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
378 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
379 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
380 static void ufshcd_release_all(struct ufs_hba *hba);
381 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
382 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
383 static int ufshcd_devfreq_target(struct device *dev,
384                                 unsigned long *freq, u32 flags);
385 static int ufshcd_devfreq_get_dev_status(struct device *dev,
386                 struct devfreq_dev_status *stat);
387
388 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
389 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
390         .upthreshold = 35,
391         .downdifferential = 30,
392         .simple_scaling = 1,
393 };
394
395 static void *gov_data = &ufshcd_ondemand_data;
396 #else
397 static void *gov_data;
398 #endif
399
400 static struct devfreq_dev_profile ufs_devfreq_profile = {
401         .polling_ms     = 40,
402         .target         = ufshcd_devfreq_target,
403         .get_dev_status = ufshcd_devfreq_get_dev_status,
404 };
405
406 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
407 {
408         return tag >= 0 && tag < hba->nutrs;
409 }
410
411 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
412 {
413         if (!hba->is_irq_enabled) {
414                 enable_irq(hba->irq);
415                 hba->is_irq_enabled = true;
416         }
417 }
418
419 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
420 {
421         if (hba->is_irq_enabled) {
422                 disable_irq(hba->irq);
423                 hba->is_irq_enabled = false;
424         }
425 }
426
427 void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
428 {
429         unsigned long flags;
430         bool unblock = false;
431
432         spin_lock_irqsave(hba->host->host_lock, flags);
433         hba->scsi_block_reqs_cnt--;
434         unblock = !hba->scsi_block_reqs_cnt;
435         spin_unlock_irqrestore(hba->host->host_lock, flags);
436         if (unblock)
437                 scsi_unblock_requests(hba->host);
438 }
439 EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
440
441 static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
442 {
443         if (!hba->scsi_block_reqs_cnt++)
444                 scsi_block_requests(hba->host);
445 }
446
447 void ufshcd_scsi_block_requests(struct ufs_hba *hba)
448 {
449         unsigned long flags;
450
451         spin_lock_irqsave(hba->host->host_lock, flags);
452         __ufshcd_scsi_block_requests(hba);
453         spin_unlock_irqrestore(hba->host->host_lock, flags);
454 }
455 EXPORT_SYMBOL(ufshcd_scsi_block_requests);
456
457 static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
458 {
459         int ret = 0;
460
461         if (!hba->pctrl)
462                 return 0;
463
464         /* Assert reset if ctrl == true */
465         if (ctrl)
466                 ret = pinctrl_select_state(hba->pctrl,
467                         pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
468         else
469                 ret = pinctrl_select_state(hba->pctrl,
470                         pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
471
472         if (ret < 0)
473                 dev_err(hba->dev, "%s: %s failed with err %d\n",
474                         __func__, ctrl ? "Assert" : "Deassert", ret);
475
476         return ret;
477 }
478
479 static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
480 {
481         return ufshcd_device_reset_ctrl(hba, true);
482 }
483
484 static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
485 {
486         return ufshcd_device_reset_ctrl(hba, false);
487 }
488
489 static int ufshcd_reset_device(struct ufs_hba *hba)
490 {
491         int ret;
492
493         /* reset the connected UFS device */
494         ret = ufshcd_assert_device_reset(hba);
495         if (ret)
496                 goto out;
497         /*
498          * The reset signal is active low.
499          * The UFS device shall detect more than or equal to 1us of positive
500          * or negative RST_n pulse width.
501          * To be on safe side, keep the reset low for atleast 10us.
502          */
503         usleep_range(10, 15);
504
505         ret = ufshcd_deassert_device_reset(hba);
506         if (ret)
507                 goto out;
508         /* same as assert, wait for atleast 10us after deassert */
509         usleep_range(10, 15);
510 out:
511         return ret;
512 }
513
514 /* replace non-printable or non-ASCII characters with spaces */
515 static inline void ufshcd_remove_non_printable(char *val)
516 {
517         if (!val || !*val)
518                 return;
519
520         if (*val < 0x20 || *val > 0x7e)
521                 *val = ' ';
522 }
523
524 #define UFSHCD_MAX_CMD_LOGGING  200
525
526 #ifdef CONFIG_TRACEPOINTS
527 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
528                         struct ufshcd_cmd_log_entry *entry, u8 opcode)
529 {
530         if (trace_ufshcd_command_enabled()) {
531                 u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
532
533                 trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
534                                      entry->doorbell, entry->transfer_len, intr,
535                                      entry->lba, opcode);
536         }
537 }
538 #else
539 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
540                         struct ufshcd_cmd_log_entry *entry, u8 opcode)
541 {
542 }
543 #endif
544
545 #ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
546 static void ufshcd_cmd_log_init(struct ufs_hba *hba)
547 {
548         /* Allocate log entries */
549         if (!hba->cmd_log.entries) {
550                 hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
551                         sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
552                 if (!hba->cmd_log.entries)
553                         return;
554                 dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
555                                 __func__);
556         }
557 }
558
559 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
560                              unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
561                              sector_t lba, int transfer_len, u8 opcode)
562 {
563         struct ufshcd_cmd_log_entry *entry;
564
565         if (!hba->cmd_log.entries)
566                 return;
567
568         entry = &hba->cmd_log.entries[hba->cmd_log.pos];
569         entry->lun = lun;
570         entry->str = str;
571         entry->cmd_type = cmd_type;
572         entry->cmd_id = cmd_id;
573         entry->lba = lba;
574         entry->transfer_len = transfer_len;
575         entry->idn = idn;
576         entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
577         entry->tag = tag;
578         entry->tstamp = ktime_get();
579         entry->outstanding_reqs = hba->outstanding_reqs;
580         entry->seq_num = hba->cmd_log.seq_num;
581         hba->cmd_log.seq_num++;
582         hba->cmd_log.pos =
583                         (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
584
585         ufshcd_add_command_trace(hba, entry, opcode);
586 }
587
588 static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
589         unsigned int tag, u8 cmd_id, u8 idn)
590 {
591         __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
592                          0xff, (sector_t)-1, -1, -1);
593 }
594
595 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
596 {
597         ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
598 }
599
600 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
601 {
602         int i;
603         int pos;
604         struct ufshcd_cmd_log_entry *p;
605
606         if (!hba->cmd_log.entries)
607                 return;
608
609         pos = hba->cmd_log.pos;
610         for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
611                 p = &hba->cmd_log.entries[pos];
612                 pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
613
614                 if (ktime_to_us(p->tstamp)) {
615                         pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
616                                 p->cmd_type, p->str, p->seq_num,
617                                 p->lun, p->cmd_id, (unsigned long long)p->lba,
618                                 p->transfer_len, p->tag, p->doorbell,
619                                 p->outstanding_reqs, p->idn,
620                                 ktime_to_us(p->tstamp));
621                                 usleep_range(1000, 1100);
622                 }
623         }
624 }
625 #else
626 static void ufshcd_cmd_log_init(struct ufs_hba *hba)
627 {
628 }
629
630 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
631                              unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
632                              sector_t lba, int transfer_len, u8 opcode)
633 {
634         struct ufshcd_cmd_log_entry entry;
635
636         entry.str = str;
637         entry.lba = lba;
638         entry.cmd_id = cmd_id;
639         entry.transfer_len = transfer_len;
640         entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
641         entry.tag = tag;
642
643         ufshcd_add_command_trace(hba, &entry, opcode);
644 }
645
646 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
647 {
648 }
649
650 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
651 {
652 }
653 #endif
654
655 #ifdef CONFIG_TRACEPOINTS
656 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
657                                         unsigned int tag, const char *str)
658 {
659         struct ufshcd_lrb *lrbp;
660         char *cmd_type = NULL;
661         u8 opcode = 0;
662         u8 cmd_id = 0, idn = 0;
663         sector_t lba = -1;
664         int transfer_len = -1;
665
666         lrbp = &hba->lrb[tag];
667
668         if (lrbp->cmd) { /* data phase exists */
669                 opcode = (u8)(*lrbp->cmd->cmnd);
670                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
671                         /*
672                          * Currently we only fully trace read(10) and write(10)
673                          * commands
674                          */
675                         if (lrbp->cmd->request && lrbp->cmd->request->bio)
676                                 lba =
677                                 lrbp->cmd->request->bio->bi_iter.bi_sector;
678                         transfer_len = be32_to_cpu(
679                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
680                 }
681         }
682
683         if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
684                 cmd_type = "scsi";
685                 cmd_id = (u8)(*lrbp->cmd->cmnd);
686         } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
687                 if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
688                         cmd_type = "nop";
689                         cmd_id = 0;
690                 } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
691                         cmd_type = "query";
692                         cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
693                         idn = hba->dev_cmd.query.request.upiu_req.idn;
694                 }
695         }
696
697         __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
698                          lrbp->lun, lba, transfer_len, opcode);
699 }
700 #else
701 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
702                                         unsigned int tag, const char *str)
703 {
704 }
705 #endif
706
707 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
708 {
709         struct ufs_clk_info *clki;
710         struct list_head *head = &hba->clk_list_head;
711
712         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
713                 return;
714
715         if (!head || list_empty(head))
716                 return;
717
718         list_for_each_entry(clki, head, list) {
719                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
720                                 clki->max_freq)
721                         dev_err(hba->dev, "clk: %s, rate: %u\n",
722                                         clki->name, clki->curr_freq);
723         }
724 }
725
726 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
727                 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
728 {
729         int i;
730
731         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
732                 return;
733
734         for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
735                 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
736
737                 if (err_hist->reg[p] == 0)
738                         continue;
739                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
740                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
741         }
742 }
743
744 static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
745 {
746         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
747                 return;
748
749         /*
750          * hex_dump reads its data without the readl macro. This might
751          * cause inconsistency issues on some platform, as the printed
752          * values may be from cache and not the most recent value.
753          * To know whether you are looking at an un-cached version verify
754          * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
755          * during platform/pci probe function.
756          */
757         ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
758         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
759                 hba->ufs_version, hba->capabilities);
760         dev_err(hba->dev,
761                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
762                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
763         dev_err(hba->dev,
764                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
765                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
766                 hba->ufs_stats.hibern8_exit_cnt);
767
768         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
769         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
770         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
771         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
772         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
773
774         ufshcd_print_clk_freqs(hba);
775
776         ufshcd_vops_dbg_register_dump(hba, no_sleep);
777 }
778
779 static void ufshcd_print_host_regs(struct ufs_hba *hba)
780 {
781         __ufshcd_print_host_regs(hba, false);
782 }
783
784 static
785 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
786 {
787         struct ufshcd_lrb *lrbp;
788         int prdt_length;
789         int tag;
790
791         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
792                 return;
793
794         for_each_set_bit(tag, &bitmap, hba->nutrs) {
795                 lrbp = &hba->lrb[tag];
796
797                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
798                                 tag, ktime_to_us(lrbp->issue_time_stamp));
799                 dev_err(hba->dev,
800                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
801                         tag, (u64)lrbp->utrd_dma_addr);
802                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
803                                 sizeof(struct utp_transfer_req_desc));
804                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
805                         (u64)lrbp->ucd_req_dma_addr);
806                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
807                                 sizeof(struct utp_upiu_req));
808                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
809                         (u64)lrbp->ucd_rsp_dma_addr);
810                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
811                                 sizeof(struct utp_upiu_rsp));
812                 prdt_length =
813                         le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
814                 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
815                         tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
816                 if (pr_prdt)
817                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
818                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
819         }
820 }
821
822 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
823 {
824         struct utp_task_req_desc *tmrdp;
825         int tag;
826
827         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
828                 return;
829
830         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
831                 tmrdp = &hba->utmrdl_base_addr[tag];
832                 dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
833                 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
834                                 sizeof(struct request_desc_header));
835                 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
836                                 tag);
837                 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
838                                 sizeof(struct utp_upiu_req));
839                 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
840                                 tag);
841                 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
842                                 sizeof(struct utp_task_req_desc));
843         }
844 }
845
846 static void ufshcd_print_fsm_state(struct ufs_hba *hba)
847 {
848         int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
849
850         err = ufshcd_dme_get(hba,
851                         UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
852                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
853                         &tx_fsm_val);
854         dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
855                         tx_fsm_val, err);
856         err = ufshcd_dme_get(hba,
857                         UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
858                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
859                         &rx_fsm_val);
860         dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
861                         rx_fsm_val, err);
862 }
863
864 static void ufshcd_print_host_state(struct ufs_hba *hba)
865 {
866         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
867                 return;
868
869         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
870         dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
871                 hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
872         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
873                 hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
874         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
875                 hba->curr_dev_pwr_mode, hba->uic_link_state);
876         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
877                 hba->pm_op_in_progress, hba->is_sys_suspended);
878         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
879                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
880         dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
881                 hba->clk_gating.state, hba->hibern8_on_idle.state);
882         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
883                 hba->eh_flags, hba->req_abort_count);
884         dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
885                 hba->capabilities, hba->caps);
886         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
887                 hba->dev_quirks);
888 }
889
890 /**
891  * ufshcd_print_pwr_info - print power params as saved in hba
892  * power info
893  * @hba: per-adapter instance
894  */
895 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
896 {
897         char *names[] = {
898                 "INVALID MODE",
899                 "FAST MODE",
900                 "SLOW_MODE",
901                 "INVALID MODE",
902                 "FASTAUTO_MODE",
903                 "SLOWAUTO_MODE",
904                 "INVALID MODE",
905         };
906
907         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
908                 return;
909
910         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
911                  __func__,
912                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
913                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
914                  names[hba->pwr_info.pwr_rx],
915                  names[hba->pwr_info.pwr_tx],
916                  hba->pwr_info.hs_rate);
917 }
918
919 /*
920  * ufshcd_wait_for_register - wait for register value to change
921  * @hba - per-adapter interface
922  * @reg - mmio register offset
923  * @mask - mask to apply to read register value
924  * @val - wait condition
925  * @interval_us - polling interval in microsecs
926  * @timeout_ms - timeout in millisecs
927  * @can_sleep - perform sleep or just spin
928  * Returns -ETIMEDOUT on error, zero on success
929  */
930 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
931                                 u32 val, unsigned long interval_us,
932                                 unsigned long timeout_ms, bool can_sleep)
933 {
934         int err = 0;
935         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
936
937         /* ignore bits that we don't intend to wait on */
938         val = val & mask;
939
940         while ((ufshcd_readl(hba, reg) & mask) != val) {
941                 if (can_sleep)
942                         usleep_range(interval_us, interval_us + 50);
943                 else
944                         udelay(interval_us);
945                 if (time_after(jiffies, timeout)) {
946                         if ((ufshcd_readl(hba, reg) & mask) != val)
947                                 err = -ETIMEDOUT;
948                         break;
949                 }
950         }
951
952         return err;
953 }
954
955 /**
956  * ufshcd_get_intr_mask - Get the interrupt bit mask
957  * @hba - Pointer to adapter instance
958  *
959  * Returns interrupt bit mask per version
960  */
961 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
962 {
963         u32 intr_mask = 0;
964
965         switch (hba->ufs_version) {
966         case UFSHCI_VERSION_10:
967                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
968                 break;
969         /* allow fall through */
970         case UFSHCI_VERSION_11:
971         case UFSHCI_VERSION_20:
972                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
973                 break;
974         /* allow fall through */
975         case UFSHCI_VERSION_21:
976         default:
977                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
978         }
979
980         if (!ufshcd_is_crypto_supported(hba))
981                 intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
982
983         return intr_mask;
984 }
985
986 /**
987  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
988  * @hba - Pointer to adapter instance
989  *
990  * Returns UFSHCI version supported by the controller
991  */
992 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
993 {
994         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
995                 return ufshcd_vops_get_ufs_hci_version(hba);
996
997         return ufshcd_readl(hba, REG_UFS_VERSION);
998 }
999
1000 /**
1001  * ufshcd_is_device_present - Check if any device connected to
1002  *                            the host controller
1003  * @hba: pointer to adapter instance
1004  *
1005  * Returns 1 if device present, 0 if no device detected
1006  */
1007 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
1008 {
1009         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
1010                                                 DEVICE_PRESENT) ? 1 : 0;
1011 }
1012
1013 /**
1014  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
1015  * @lrb: pointer to local command reference block
1016  *
1017  * This function is used to get the OCS field from UTRD
1018  * Returns the OCS field in the UTRD
1019  */
1020 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
1021 {
1022         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
1023 }
1024
1025 /**
1026  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
1027  * @task_req_descp: pointer to utp_task_req_desc structure
1028  *
1029  * This function is used to get the OCS field from UTMRD
1030  * Returns the OCS field in the UTMRD
1031  */
1032 static inline int
1033 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
1034 {
1035         return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
1036 }
1037
1038 /**
1039  * ufshcd_get_tm_free_slot - get a free slot for task management request
1040  * @hba: per adapter instance
1041  * @free_slot: pointer to variable with available slot value
1042  *
1043  * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
1044  * Returns 0 if free slot is not available, else return 1 with tag value
1045  * in @free_slot.
1046  */
1047 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
1048 {
1049         int tag;
1050         bool ret = false;
1051
1052         if (!free_slot)
1053                 goto out;
1054
1055         do {
1056                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
1057                 if (tag >= hba->nutmrs)
1058                         goto out;
1059         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
1060
1061         *free_slot = tag;
1062         ret = true;
1063 out:
1064         return ret;
1065 }
1066
1067 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
1068 {
1069         clear_bit_unlock(slot, &hba->tm_slots_in_use);
1070 }
1071
1072 /**
1073  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
1074  * @hba: per adapter instance
1075  * @pos: position of the bit to be cleared
1076  */
1077 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
1078 {
1079         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1080 }
1081
1082 /**
1083  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
1084  * @hba: per adapter instance
1085  * @tag: position of the bit to be cleared
1086  */
1087 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
1088 {
1089         __clear_bit(tag, &hba->outstanding_reqs);
1090 }
1091
1092 /**
1093  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
1094  * @reg: Register value of host controller status
1095  *
1096  * Returns integer, 0 on Success and positive value if failed
1097  */
1098 static inline int ufshcd_get_lists_status(u32 reg)
1099 {
1100         /*
1101          * The mask 0xFF is for the following HCS register bits
1102          * Bit          Description
1103          *  0           Device Present
1104          *  1           UTRLRDY
1105          *  2           UTMRLRDY
1106          *  3           UCRDY
1107          * 4-7          reserved
1108          */
1109         return ((reg & 0xFF) >> 1) ^ 0x07;
1110 }
1111
1112 /**
1113  * ufshcd_get_uic_cmd_result - Get the UIC command result
1114  * @hba: Pointer to adapter instance
1115  *
1116  * This function gets the result of UIC command completion
1117  * Returns 0 on success, non zero value on error
1118  */
1119 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
1120 {
1121         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
1122                MASK_UIC_COMMAND_RESULT;
1123 }
1124
1125 /**
1126  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
1127  * @hba: Pointer to adapter instance
1128  *
1129  * This function gets UIC command argument3
1130  * Returns 0 on success, non zero value on error
1131  */
1132 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
1133 {
1134         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
1135 }
1136
1137 /**
1138  * ufshcd_get_req_rsp - returns the TR response transaction type
1139  * @ucd_rsp_ptr: pointer to response UPIU
1140  */
1141 static inline int
1142 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
1143 {
1144         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
1145 }
1146
1147 /**
1148  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
1149  * @ucd_rsp_ptr: pointer to response UPIU
1150  *
1151  * This function gets the response status and scsi_status from response UPIU
1152  * Returns the response result code.
1153  */
1154 static inline int
1155 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
1156 {
1157         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
1158 }
1159
1160 /*
1161  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
1162  *                              from response UPIU
1163  * @ucd_rsp_ptr: pointer to response UPIU
1164  *
1165  * Return the data segment length.
1166  */
1167 static inline unsigned int
1168 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
1169 {
1170         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1171                 MASK_RSP_UPIU_DATA_SEG_LEN;
1172 }
1173
1174 /**
1175  * ufshcd_is_exception_event - Check if the device raised an exception event
1176  * @ucd_rsp_ptr: pointer to response UPIU
1177  *
1178  * The function checks if the device raised an exception event indicated in
1179  * the Device Information field of response UPIU.
1180  *
1181  * Returns true if exception is raised, false otherwise.
1182  */
1183 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
1184 {
1185         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1186                         MASK_RSP_EXCEPTION_EVENT ? true : false;
1187 }
1188
1189 /**
1190  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
1191  * @hba: per adapter instance
1192  */
1193 static inline void
1194 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
1195 {
1196         ufshcd_writel(hba, INT_AGGR_ENABLE |
1197                       INT_AGGR_COUNTER_AND_TIMER_RESET,
1198                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1199 }
1200
1201 /**
1202  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
1203  * @hba: per adapter instance
1204  * @cnt: Interrupt aggregation counter threshold
1205  * @tmout: Interrupt aggregation timeout value
1206  */
1207 static inline void
1208 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
1209 {
1210         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
1211                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
1212                       INT_AGGR_TIMEOUT_VAL(tmout),
1213                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1214 }
1215
1216 /**
1217  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
1218  * @hba: per adapter instance
1219  */
1220 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
1221 {
1222         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1223 }
1224
1225 /**
1226  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
1227  *                      When run-stop registers are set to 1, it indicates the
1228  *                      host controller that it can process the requests
1229  * @hba: per adapter instance
1230  */
1231 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
1232 {
1233         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
1234                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
1235         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
1236                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
1237 }
1238
1239 /**
1240  * ufshcd_hba_start - Start controller initialization sequence
1241  * @hba: per adapter instance
1242  */
1243 static inline void ufshcd_hba_start(struct ufs_hba *hba)
1244 {
1245         u32 val = CONTROLLER_ENABLE;
1246
1247         if (ufshcd_is_crypto_supported(hba))
1248                 val |= CRYPTO_GENERAL_ENABLE;
1249         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
1250 }
1251
1252 /**
1253  * ufshcd_is_hba_active - Get controller state
1254  * @hba: per adapter instance
1255  *
1256  * Returns zero if controller is active, 1 otherwise
1257  */
1258 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
1259 {
1260         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
1261 }
1262
1263 static const char *ufschd_uic_link_state_to_string(
1264                         enum uic_link_state state)
1265 {
1266         switch (state) {
1267         case UIC_LINK_OFF_STATE:        return "OFF";
1268         case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
1269         case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
1270         default:                        return "UNKNOWN";
1271         }
1272 }
1273
1274 static const char *ufschd_ufs_dev_pwr_mode_to_string(
1275                         enum ufs_dev_pwr_mode state)
1276 {
1277         switch (state) {
1278         case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
1279         case UFS_SLEEP_PWR_MODE:        return "SLEEP";
1280         case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
1281         default:                        return "UNKNOWN";
1282         }
1283 }
1284
1285 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
1286 {
1287         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
1288         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1289             (hba->ufs_version == UFSHCI_VERSION_11))
1290                 return UFS_UNIPRO_VER_1_41;
1291         else
1292                 return UFS_UNIPRO_VER_1_6;
1293 }
1294 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1295
1296 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1297 {
1298         /*
1299          * If both host and device support UniPro ver1.6 or later, PA layer
1300          * parameters tuning happens during link startup itself.
1301          *
1302          * We can manually tune PA layer parameters if either host or device
1303          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1304          * logic simple, we will only do manual tuning if local unipro version
1305          * doesn't support ver1.6 or later.
1306          */
1307         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
1308                 return true;
1309         else
1310                 return false;
1311 }
1312
1313 /**
1314  * ufshcd_set_clk_freq - set UFS controller clock frequencies
1315  * @hba: per adapter instance
1316  * @scale_up: If True, set max possible frequency othewise set low frequency
1317  *
1318  * Returns 0 if successful
1319  * Returns < 0 for any other errors
1320  */
1321 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1322 {
1323         int ret = 0;
1324         struct ufs_clk_info *clki;
1325         struct list_head *head = &hba->clk_list_head;
1326
1327         if (!head || list_empty(head))
1328                 goto out;
1329
1330         list_for_each_entry(clki, head, list) {
1331                 if (!IS_ERR_OR_NULL(clki->clk)) {
1332                         if (scale_up && clki->max_freq) {
1333                                 if (clki->curr_freq == clki->max_freq)
1334                                         continue;
1335
1336                                 ret = clk_set_rate(clki->clk, clki->max_freq);
1337                                 if (ret) {
1338                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1339                                                 __func__, clki->name,
1340                                                 clki->max_freq, ret);
1341                                         break;
1342                                 }
1343                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1344                                                 "scaled up", clki->name,
1345                                                 clki->curr_freq,
1346                                                 clki->max_freq);
1347                                 clki->curr_freq = clki->max_freq;
1348
1349                         } else if (!scale_up && clki->min_freq) {
1350                                 if (clki->curr_freq == clki->min_freq)
1351                                         continue;
1352
1353                                 ret = clk_set_rate(clki->clk, clki->min_freq);
1354                                 if (ret) {
1355                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1356                                                 __func__, clki->name,
1357                                                 clki->min_freq, ret);
1358                                         break;
1359                                 }
1360                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1361                                                 "scaled down", clki->name,
1362                                                 clki->curr_freq,
1363                                                 clki->min_freq);
1364                                 clki->curr_freq = clki->min_freq;
1365                         }
1366                 }
1367                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1368                                 clki->name, clk_get_rate(clki->clk));
1369         }
1370
1371 out:
1372         return ret;
1373 }
1374
1375 /**
1376  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1377  * @hba: per adapter instance
1378  * @scale_up: True if scaling up and false if scaling down
1379  *
1380  * Returns 0 if successful
1381  * Returns < 0 for any other errors
1382  */
1383 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1384 {
1385         int ret = 0;
1386
1387         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1388         if (ret)
1389                 return ret;
1390
1391         ret = ufshcd_set_clk_freq(hba, scale_up);
1392         if (ret)
1393                 return ret;
1394
1395         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1396         if (ret) {
1397                 ufshcd_set_clk_freq(hba, !scale_up);
1398                 return ret;
1399         }
1400
1401         return ret;
1402 }
1403
1404 static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
1405 {
1406         hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
1407         cancel_work_sync(&hba->clk_gating.gate_work);
1408 }
1409
1410 static void ufshcd_ungate_work(struct work_struct *work)
1411 {
1412         int ret;
1413         unsigned long flags;
1414         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1415                         clk_gating.ungate_work);
1416
1417         ufshcd_cancel_gate_work(hba);
1418
1419         spin_lock_irqsave(hba->host->host_lock, flags);
1420         if (hba->clk_gating.state == CLKS_ON) {
1421                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1422                 goto unblock_reqs;
1423         }
1424
1425         spin_unlock_irqrestore(hba->host->host_lock, flags);
1426         ufshcd_hba_vreg_set_hpm(hba);
1427         ufshcd_enable_clocks(hba);
1428
1429         /* Exit from hibern8 */
1430         if (ufshcd_can_hibern8_during_gating(hba)) {
1431                 /* Prevent gating in this path */
1432                 hba->clk_gating.is_suspended = true;
1433                 if (ufshcd_is_link_hibern8(hba)) {
1434                         ret = ufshcd_uic_hibern8_exit(hba);
1435                         if (ret)
1436                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1437                                         __func__, ret);
1438                         else
1439                                 ufshcd_set_link_active(hba);
1440                 }
1441                 hba->clk_gating.is_suspended = false;
1442         }
1443 unblock_reqs:
1444         ufshcd_scsi_unblock_requests(hba);
1445 }
1446
1447 /**
1448  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1449  * Also, exit from hibern8 mode and set the link as active.
1450  * @hba: per adapter instance
1451  * @async: This indicates whether caller should ungate clocks asynchronously.
1452  */
1453 int ufshcd_hold(struct ufs_hba *hba, bool async)
1454 {
1455         int rc = 0;
1456         unsigned long flags;
1457
1458         if (!ufshcd_is_clkgating_allowed(hba))
1459                 goto out;
1460         spin_lock_irqsave(hba->host->host_lock, flags);
1461         hba->clk_gating.active_reqs++;
1462
1463         if (ufshcd_eh_in_progress(hba)) {
1464                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1465                 return 0;
1466         }
1467
1468 start:
1469         switch (hba->clk_gating.state) {
1470         case CLKS_ON:
1471                 /*
1472                  * Wait for the ungate work to complete if in progress.
1473                  * Though the clocks may be in ON state, the link could
1474                  * still be in hibner8 state if hibern8 is allowed
1475                  * during clock gating.
1476                  * Make sure we exit hibern8 state also in addition to
1477                  * clocks being ON.
1478                  */
1479                 if (ufshcd_can_hibern8_during_gating(hba) &&
1480                     ufshcd_is_link_hibern8(hba)) {
1481                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1482                         flush_work(&hba->clk_gating.ungate_work);
1483                         spin_lock_irqsave(hba->host->host_lock, flags);
1484                         goto start;
1485                 }
1486                 break;
1487         case REQ_CLKS_OFF:
1488                 /*
1489                  * If the timer was active but the callback was not running
1490                  * we have nothing to do, just change state and return.
1491                  */
1492                 if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
1493                         hba->clk_gating.state = CLKS_ON;
1494                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1495                                 hba->clk_gating.state);
1496                         break;
1497                 }
1498                 /*
1499                  * If we are here, it means gating work is either done or
1500                  * currently running. Hence, fall through to cancel gating
1501                  * work and to enable clocks.
1502                  */
1503         case CLKS_OFF:
1504                 __ufshcd_scsi_block_requests(hba);
1505                 hba->clk_gating.state = REQ_CLKS_ON;
1506                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1507                         hba->clk_gating.state);
1508                 queue_work(hba->clk_gating.clk_gating_workq,
1509                                 &hba->clk_gating.ungate_work);
1510                 /*
1511                  * fall through to check if we should wait for this
1512                  * work to be done or not.
1513                  */
1514         case REQ_CLKS_ON:
1515                 if (async) {
1516                         rc = -EAGAIN;
1517                         hba->clk_gating.active_reqs--;
1518                         break;
1519                 }
1520
1521                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1522                 flush_work(&hba->clk_gating.ungate_work);
1523                 /* Make sure state is CLKS_ON before returning */
1524                 spin_lock_irqsave(hba->host->host_lock, flags);
1525                 goto start;
1526         default:
1527                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1528                                 __func__, hba->clk_gating.state);
1529                 break;
1530         }
1531         spin_unlock_irqrestore(hba->host->host_lock, flags);
1532 out:
1533         hba->ufs_stats.clk_hold.ts = ktime_get();
1534         return rc;
1535 }
1536 EXPORT_SYMBOL_GPL(ufshcd_hold);
1537
1538 static void ufshcd_gate_work(struct work_struct *work)
1539 {
1540         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1541                                                 clk_gating.gate_work);
1542         unsigned long flags;
1543
1544         spin_lock_irqsave(hba->host->host_lock, flags);
1545         /*
1546          * In case you are here to cancel this work the gating state
1547          * would be marked as REQ_CLKS_ON. In this case save time by
1548          * skipping the gating work and exit after changing the clock
1549          * state to CLKS_ON.
1550          */
1551         if (hba->clk_gating.is_suspended ||
1552                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1553                 hba->clk_gating.state = CLKS_ON;
1554                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1555                         hba->clk_gating.state);
1556                 goto rel_lock;
1557         }
1558
1559         if (hba->clk_gating.active_reqs
1560                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1561                 || hba->lrb_in_use || hba->outstanding_tasks
1562                 || hba->active_uic_cmd || hba->uic_async_done)
1563                 goto rel_lock;
1564
1565         spin_unlock_irqrestore(hba->host->host_lock, flags);
1566
1567         if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
1568             hba->hibern8_on_idle.is_enabled)
1569                 /*
1570                  * Hibern8 enter work (on Idle) needs clocks to be ON hence
1571                  * make sure that it is flushed before turning off the clocks.
1572                  */
1573                 flush_delayed_work(&hba->hibern8_on_idle.enter_work);
1574
1575         /* put the link into hibern8 mode before turning off clocks */
1576         if (ufshcd_can_hibern8_during_gating(hba)) {
1577                 if (ufshcd_uic_hibern8_enter(hba)) {
1578                         hba->clk_gating.state = CLKS_ON;
1579                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1580                                 hba->clk_gating.state);
1581                         goto out;
1582                 }
1583                 ufshcd_set_link_hibern8(hba);
1584         }
1585
1586         /*
1587          * If auto hibern8 is supported then the link will already
1588          * be in hibern8 state and the ref clock can be gated.
1589          */
1590         if ((ufshcd_is_auto_hibern8_supported(hba) ||
1591              !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
1592                 ufshcd_disable_clocks(hba, true);
1593         else
1594                 /* If link is active, device ref_clk can't be switched off */
1595                 ufshcd_disable_clocks_skip_ref_clk(hba, true);
1596
1597         /* Put the host controller in low power mode if possible */
1598         ufshcd_hba_vreg_set_lpm(hba);
1599
1600         /*
1601          * In case you are here to cancel this work the gating state
1602          * would be marked as REQ_CLKS_ON. In this case keep the state
1603          * as REQ_CLKS_ON which would anyway imply that clocks are off
1604          * and a request to turn them on is pending. By doing this way,
1605          * we keep the state machine in tact and this would ultimately
1606          * prevent from doing cancel work multiple times when there are
1607          * new requests arriving before the current cancel work is done.
1608          */
1609         spin_lock_irqsave(hba->host->host_lock, flags);
1610         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1611                 hba->clk_gating.state = CLKS_OFF;
1612                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1613                         hba->clk_gating.state);
1614         }
1615 rel_lock:
1616         spin_unlock_irqrestore(hba->host->host_lock, flags);
1617 out:
1618         return;
1619 }
1620
1621 /* host lock must be held before calling this variant */
1622 static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
1623 {
1624         if (!ufshcd_is_clkgating_allowed(hba))
1625                 return;
1626
1627         hba->clk_gating.active_reqs--;
1628
1629         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1630                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1631                 || hba->lrb_in_use || hba->outstanding_tasks
1632                 || hba->active_uic_cmd || hba->uic_async_done
1633                 || ufshcd_eh_in_progress(hba) || no_sched)
1634                 return;
1635
1636         hba->clk_gating.state = REQ_CLKS_OFF;
1637         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1638         hba->ufs_stats.clk_rel.ts = ktime_get();
1639
1640         hrtimer_start(&hba->clk_gating.gate_hrtimer,
1641                         ms_to_ktime(hba->clk_gating.delay_ms),
1642                         HRTIMER_MODE_REL);
1643 }
1644
1645 void ufshcd_release(struct ufs_hba *hba, bool no_sched)
1646 {
1647         unsigned long flags;
1648
1649         spin_lock_irqsave(hba->host->host_lock, flags);
1650         __ufshcd_release(hba, no_sched);
1651         spin_unlock_irqrestore(hba->host->host_lock, flags);
1652 }
1653 EXPORT_SYMBOL_GPL(ufshcd_release);
1654
1655 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1656                 struct device_attribute *attr, char *buf)
1657 {
1658         struct ufs_hba *hba = dev_get_drvdata(dev);
1659
1660         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1661 }
1662
1663 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1664                 struct device_attribute *attr, const char *buf, size_t count)
1665 {
1666         struct ufs_hba *hba = dev_get_drvdata(dev);
1667         unsigned long flags, value;
1668
1669         if (kstrtoul(buf, 0, &value))
1670                 return -EINVAL;
1671
1672         spin_lock_irqsave(hba->host->host_lock, flags);
1673         hba->clk_gating.delay_ms = value;
1674         spin_unlock_irqrestore(hba->host->host_lock, flags);
1675         return count;
1676 }
1677
1678 static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
1679                 struct device_attribute *attr, char *buf)
1680 {
1681         struct ufs_hba *hba = dev_get_drvdata(dev);
1682
1683         return snprintf(buf, PAGE_SIZE, "%lu\n",
1684                         hba->clk_gating.delay_ms_pwr_save);
1685 }
1686
1687 static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
1688                 struct device_attribute *attr, const char *buf, size_t count)
1689 {
1690         struct ufs_hba *hba = dev_get_drvdata(dev);
1691         unsigned long flags, value;
1692
1693         if (kstrtoul(buf, 0, &value))
1694                 return -EINVAL;
1695
1696         spin_lock_irqsave(hba->host->host_lock, flags);
1697
1698         hba->clk_gating.delay_ms_pwr_save = value;
1699         if (ufshcd_is_clkscaling_supported(hba) &&
1700             !hba->clk_scaling.is_scaled_up)
1701                 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
1702
1703         spin_unlock_irqrestore(hba->host->host_lock, flags);
1704         return count;
1705 }
1706
1707 static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
1708                 struct device_attribute *attr, char *buf)
1709 {
1710         struct ufs_hba *hba = dev_get_drvdata(dev);
1711
1712         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
1713 }
1714
1715 static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
1716                 struct device_attribute *attr, const char *buf, size_t count)
1717 {
1718         struct ufs_hba *hba = dev_get_drvdata(dev);
1719         unsigned long flags, value;
1720
1721         if (kstrtoul(buf, 0, &value))
1722                 return -EINVAL;
1723
1724         spin_lock_irqsave(hba->host->host_lock, flags);
1725
1726         hba->clk_gating.delay_ms_perf = value;
1727         if (ufshcd_is_clkscaling_supported(hba) &&
1728             hba->clk_scaling.is_scaled_up)
1729                 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
1730
1731         spin_unlock_irqrestore(hba->host->host_lock, flags);
1732         return count;
1733 }
1734
1735 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1736                 struct device_attribute *attr, char *buf)
1737 {
1738         struct ufs_hba *hba = dev_get_drvdata(dev);
1739
1740         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1741 }
1742
1743 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1744                 struct device_attribute *attr, const char *buf, size_t count)
1745 {
1746         struct ufs_hba *hba = dev_get_drvdata(dev);
1747         unsigned long flags;
1748         u32 value;
1749
1750         if (kstrtou32(buf, 0, &value))
1751                 return -EINVAL;
1752
1753         value = !!value;
1754         if (value == hba->clk_gating.is_enabled)
1755                 goto out;
1756
1757         if (value) {
1758                 ufshcd_release(hba, false);
1759         } else {
1760                 spin_lock_irqsave(hba->host->host_lock, flags);
1761                 hba->clk_gating.active_reqs++;
1762                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1763         }
1764
1765         hba->clk_gating.is_enabled = value;
1766 out:
1767         return count;
1768 }
1769
1770 static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
1771                                         struct hrtimer *timer)
1772 {
1773         struct ufs_hba *hba = container_of(timer, struct ufs_hba,
1774                                            clk_gating.gate_hrtimer);
1775
1776         queue_work(hba->clk_gating.clk_gating_workq,
1777                                 &hba->clk_gating.gate_work);
1778
1779         return HRTIMER_NORESTART;
1780 }
1781
1782 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1783 {
1784         struct ufs_clk_gating *gating = &hba->clk_gating;
1785         char wq_name[sizeof("ufs_clk_gating_00")];
1786
1787         hba->clk_gating.state = CLKS_ON;
1788
1789         if (!ufshcd_is_clkgating_allowed(hba))
1790                 return;
1791
1792         /*
1793          * Disable hibern8 during clk gating if
1794          * auto hibern8 is supported
1795          */
1796         if (ufshcd_is_auto_hibern8_supported(hba))
1797                 hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1798
1799         INIT_WORK(&gating->gate_work, ufshcd_gate_work);
1800         INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
1801         /*
1802          * Clock gating work must be executed only after auto hibern8
1803          * timeout has expired in the hardware or after aggressive
1804          * hibern8 on idle software timeout. Using jiffy based low
1805          * resolution delayed work is not reliable to guarantee this,
1806          * hence use a high resolution timer to make sure we schedule
1807          * the gate work precisely more than hibern8 timeout.
1808          *
1809          * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
1810          */
1811         hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1812         gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
1813
1814         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1815                         hba->host->host_no);
1816         hba->clk_gating.clk_gating_workq =
1817                 create_singlethread_workqueue(wq_name);
1818
1819         gating->is_enabled = true;
1820
1821         gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
1822         gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
1823
1824         /* start with performance mode */
1825         gating->delay_ms = gating->delay_ms_perf;
1826
1827         if (!ufshcd_is_clkscaling_supported(hba))
1828                 goto scaling_not_supported;
1829
1830         gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
1831         gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
1832         sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
1833         gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
1834         gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
1835         if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
1836                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
1837
1838         gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
1839         gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
1840         sysfs_attr_init(&gating->delay_perf_attr.attr);
1841         gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
1842         gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
1843         if (device_create_file(hba->dev, &gating->delay_perf_attr))
1844                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
1845
1846         goto add_clkgate_enable;
1847
1848 scaling_not_supported:
1849         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1850         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1851         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1852         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1853         hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
1854         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1855                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1856
1857 add_clkgate_enable:
1858         gating->enable_attr.show = ufshcd_clkgate_enable_show;
1859         gating->enable_attr.store = ufshcd_clkgate_enable_store;
1860         sysfs_attr_init(&gating->enable_attr.attr);
1861         gating->enable_attr.attr.name = "clkgate_enable";
1862         gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
1863         if (device_create_file(hba->dev, &gating->enable_attr))
1864                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1865 }
1866
1867 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1868 {
1869         if (!ufshcd_is_clkgating_allowed(hba))
1870                 return;
1871         if (ufshcd_is_clkscaling_supported(hba)) {
1872                 device_remove_file(hba->dev,
1873                                    &hba->clk_gating.delay_pwr_save_attr);
1874                 device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
1875         } else {
1876                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1877         }
1878         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1879         ufshcd_cancel_gate_work(hba);
1880         cancel_work_sync(&hba->clk_gating.ungate_work);
1881         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1882 }
1883
1884 static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
1885 {
1886         ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
1887                          AUTO_HIBERN8_IDLE_TIMER_MASK,
1888                         AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
1889                         REG_AUTO_HIBERN8_IDLE_TIMER);
1890         /* Make sure the timer gets applied before further operations */
1891         mb();
1892 }
1893
1894 /**
1895  * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
1896  *
1897  * @hba: per adapter instance
1898  * @async: This indicates whether caller wants to exit hibern8 asynchronously.
1899  *
1900  * Exit from hibern8 mode and set the link as active.
1901  *
1902  * Return 0 on success, non-zero on failure.
1903  */
1904 static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
1905 {
1906         int rc = 0;
1907         unsigned long flags;
1908
1909         if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1910                 goto out;
1911
1912         spin_lock_irqsave(hba->host->host_lock, flags);
1913         hba->hibern8_on_idle.active_reqs++;
1914
1915         if (ufshcd_eh_in_progress(hba)) {
1916                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1917                 return 0;
1918         }
1919
1920 start:
1921         switch (hba->hibern8_on_idle.state) {
1922         case HIBERN8_EXITED:
1923                 break;
1924         case REQ_HIBERN8_ENTER:
1925                 if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
1926                         hba->hibern8_on_idle.state = HIBERN8_EXITED;
1927                         trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1928                                 hba->hibern8_on_idle.state);
1929                         break;
1930                 }
1931                 /*
1932                  * If we here, it means Hibern8 enter work is either done or
1933                  * currently running. Hence, fall through to cancel hibern8
1934                  * work and exit hibern8.
1935                  */
1936         case HIBERN8_ENTERED:
1937                 __ufshcd_scsi_block_requests(hba);
1938                 hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
1939                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1940                         hba->hibern8_on_idle.state);
1941                 schedule_work(&hba->hibern8_on_idle.exit_work);
1942                 /*
1943                  * fall through to check if we should wait for this
1944                  * work to be done or not.
1945                  */
1946         case REQ_HIBERN8_EXIT:
1947                 if (async) {
1948                         rc = -EAGAIN;
1949                         hba->hibern8_on_idle.active_reqs--;
1950                         break;
1951                 } else {
1952                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1953                         flush_work(&hba->hibern8_on_idle.exit_work);
1954                         /* Make sure state is HIBERN8_EXITED before returning */
1955                         spin_lock_irqsave(hba->host->host_lock, flags);
1956                         goto start;
1957                 }
1958         default:
1959                 dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
1960                                 __func__, hba->hibern8_on_idle.state);
1961                 break;
1962         }
1963         spin_unlock_irqrestore(hba->host->host_lock, flags);
1964 out:
1965         return rc;
1966 }
1967
1968 /* host lock must be held before calling this variant */
1969 static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
1970 {
1971         unsigned long delay_in_jiffies;
1972
1973         if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1974                 return;
1975
1976         hba->hibern8_on_idle.active_reqs--;
1977         BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
1978
1979         if (hba->hibern8_on_idle.active_reqs
1980                 || hba->hibern8_on_idle.is_suspended
1981                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1982                 || hba->lrb_in_use || hba->outstanding_tasks
1983                 || hba->active_uic_cmd || hba->uic_async_done
1984                 || ufshcd_eh_in_progress(hba) || no_sched)
1985                 return;
1986
1987         hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
1988         trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1989                 hba->hibern8_on_idle.state);
1990         /*
1991          * Scheduling the delayed work after 1 jiffies will make the work to
1992          * get schedule any time from 0ms to 1000/HZ ms which is not desirable
1993          * for hibern8 enter work as it may impact the performance if it gets
1994          * scheduled almost immediately. Hence make sure that hibern8 enter
1995          * work gets scheduled atleast after 2 jiffies (any time between
1996          * 1000/HZ ms to 2000/HZ ms).
1997          */
1998         delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
1999         if (delay_in_jiffies == 1)
2000                 delay_in_jiffies++;
2001
2002         schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
2003                               delay_in_jiffies);
2004 }
2005
2006 static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
2007 {
2008         unsigned long flags;
2009
2010         spin_lock_irqsave(hba->host->host_lock, flags);
2011         __ufshcd_hibern8_release(hba, no_sched);
2012         spin_unlock_irqrestore(hba->host->host_lock, flags);
2013 }
2014
2015 static void ufshcd_hibern8_enter_work(struct work_struct *work)
2016 {
2017         struct ufs_hba *hba = container_of(work, struct ufs_hba,
2018                                            hibern8_on_idle.enter_work.work);
2019         unsigned long flags;
2020
2021         spin_lock_irqsave(hba->host->host_lock, flags);
2022         if (hba->hibern8_on_idle.is_suspended) {
2023                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2024                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2025                         hba->hibern8_on_idle.state);
2026                 goto rel_lock;
2027         }
2028
2029         if (hba->hibern8_on_idle.active_reqs
2030                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
2031                 || hba->lrb_in_use || hba->outstanding_tasks
2032                 || hba->active_uic_cmd || hba->uic_async_done)
2033                 goto rel_lock;
2034
2035         spin_unlock_irqrestore(hba->host->host_lock, flags);
2036
2037         if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
2038                 /* Enter failed */
2039                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2040                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2041                         hba->hibern8_on_idle.state);
2042                 goto out;
2043         }
2044         ufshcd_set_link_hibern8(hba);
2045
2046         /*
2047          * In case you are here to cancel this work the hibern8_on_idle.state
2048          * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
2049          * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
2050          * and a request to exit from it is pending. By doing this way,
2051          * we keep the state machine in tact and this would ultimately
2052          * prevent from doing cancel work multiple times when there are
2053          * new requests arriving before the current cancel work is done.
2054          */
2055         spin_lock_irqsave(hba->host->host_lock, flags);
2056         if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
2057                 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
2058                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2059                         hba->hibern8_on_idle.state);
2060         }
2061 rel_lock:
2062         spin_unlock_irqrestore(hba->host->host_lock, flags);
2063 out:
2064         return;
2065 }
2066
2067 static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
2068                                             unsigned long delay_ms)
2069 {
2070         pm_runtime_get_sync(hba->dev);
2071         ufshcd_hold_all(hba);
2072         ufshcd_scsi_block_requests(hba);
2073         down_write(&hba->lock);
2074         /* wait for all the outstanding requests to finish */
2075         ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
2076         ufshcd_set_auto_hibern8_timer(hba, delay_ms);
2077         up_write(&hba->lock);
2078         ufshcd_scsi_unblock_requests(hba);
2079         ufshcd_release_all(hba);
2080         pm_runtime_put_sync(hba->dev);
2081 }
2082
2083 static void ufshcd_hibern8_exit_work(struct work_struct *work)
2084 {
2085         int ret;
2086         unsigned long flags;
2087         struct ufs_hba *hba = container_of(work, struct ufs_hba,
2088                                            hibern8_on_idle.exit_work);
2089
2090         cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
2091
2092         spin_lock_irqsave(hba->host->host_lock, flags);
2093         if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
2094              || ufshcd_is_link_active(hba)) {
2095                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2096                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2097                 goto unblock_reqs;
2098         }
2099         spin_unlock_irqrestore(hba->host->host_lock, flags);
2100
2101         /* Exit from hibern8 */
2102         if (ufshcd_is_link_hibern8(hba)) {
2103                 hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
2104                 ufshcd_hold(hba, false);
2105                 ret = ufshcd_uic_hibern8_exit(hba);
2106                 hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
2107                 ufshcd_release(hba, false);
2108                 if (!ret) {
2109                         spin_lock_irqsave(hba->host->host_lock, flags);
2110                         ufshcd_set_link_active(hba);
2111                         hba->hibern8_on_idle.state = HIBERN8_EXITED;
2112                         trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2113                                 hba->hibern8_on_idle.state);
2114                         spin_unlock_irqrestore(hba->host->host_lock, flags);
2115                 }
2116         }
2117 unblock_reqs:
2118         ufshcd_scsi_unblock_requests(hba);
2119 }
2120
2121 static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
2122                 struct device_attribute *attr, char *buf)
2123 {
2124         struct ufs_hba *hba = dev_get_drvdata(dev);
2125
2126         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
2127 }
2128
2129 static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
2130                 struct device_attribute *attr, const char *buf, size_t count)
2131 {
2132         struct ufs_hba *hba = dev_get_drvdata(dev);
2133         unsigned long flags, value;
2134         bool change = true;
2135
2136         if (kstrtoul(buf, 0, &value))
2137                 return -EINVAL;
2138
2139         spin_lock_irqsave(hba->host->host_lock, flags);
2140         if (hba->hibern8_on_idle.delay_ms == value)
2141                 change = false;
2142
2143         if (value >= hba->clk_gating.delay_ms_pwr_save ||
2144             value >= hba->clk_gating.delay_ms_perf) {
2145                 dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
2146                         value, hba->clk_gating.delay_ms_pwr_save,
2147                         hba->clk_gating.delay_ms_perf);
2148                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2149                 return -EINVAL;
2150         }
2151
2152         hba->hibern8_on_idle.delay_ms = value;
2153         spin_unlock_irqrestore(hba->host->host_lock, flags);
2154
2155         /* Update auto hibern8 timer value if supported */
2156         if (change && ufshcd_is_auto_hibern8_supported(hba) &&
2157             hba->hibern8_on_idle.is_enabled)
2158                 __ufshcd_set_auto_hibern8_timer(hba,
2159                                                 hba->hibern8_on_idle.delay_ms);
2160
2161         return count;
2162 }
2163
2164 static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
2165                 struct device_attribute *attr, char *buf)
2166 {
2167         struct ufs_hba *hba = dev_get_drvdata(dev);
2168
2169         return snprintf(buf, PAGE_SIZE, "%d\n",
2170                         hba->hibern8_on_idle.is_enabled);
2171 }
2172
2173 static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
2174                 struct device_attribute *attr, const char *buf, size_t count)
2175 {
2176         struct ufs_hba *hba = dev_get_drvdata(dev);
2177         unsigned long flags;
2178         u32 value;
2179
2180         if (kstrtou32(buf, 0, &value))
2181                 return -EINVAL;
2182
2183         value = !!value;
2184         if (value == hba->hibern8_on_idle.is_enabled)
2185                 goto out;
2186
2187         /* Update auto hibern8 timer value if supported */
2188         if (ufshcd_is_auto_hibern8_supported(hba)) {
2189                 __ufshcd_set_auto_hibern8_timer(hba,
2190                         value ? hba->hibern8_on_idle.delay_ms : value);
2191                 goto update;
2192         }
2193
2194         if (value) {
2195                 /*
2196                  * As clock gating work would wait for the hibern8 enter work
2197                  * to finish, clocks would remain on during hibern8 enter work.
2198                  */
2199                 ufshcd_hold(hba, false);
2200                 ufshcd_release_all(hba);
2201         } else {
2202                 spin_lock_irqsave(hba->host->host_lock, flags);
2203                 hba->hibern8_on_idle.active_reqs++;
2204                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2205         }
2206
2207 update:
2208         hba->hibern8_on_idle.is_enabled = value;
2209 out:
2210         return count;
2211 }
2212
2213 static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
2214 {
2215         /* initialize the state variable here */
2216         hba->hibern8_on_idle.state = HIBERN8_EXITED;
2217
2218         if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2219             !ufshcd_is_auto_hibern8_supported(hba))
2220                 return;
2221
2222         if (ufshcd_is_auto_hibern8_supported(hba)) {
2223                 hba->hibern8_on_idle.delay_ms = 1;
2224                 hba->hibern8_on_idle.state = AUTO_HIBERN8;
2225                 /*
2226                  * Disable SW hibern8 enter on idle in case
2227                  * auto hibern8 is supported
2228                  */
2229                 hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
2230         } else {
2231                 hba->hibern8_on_idle.delay_ms = 10;
2232                 INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
2233                                   ufshcd_hibern8_enter_work);
2234                 INIT_WORK(&hba->hibern8_on_idle.exit_work,
2235                           ufshcd_hibern8_exit_work);
2236         }
2237
2238         hba->hibern8_on_idle.is_enabled = true;
2239
2240         hba->hibern8_on_idle.delay_attr.show =
2241                                         ufshcd_hibern8_on_idle_delay_show;
2242         hba->hibern8_on_idle.delay_attr.store =
2243                                         ufshcd_hibern8_on_idle_delay_store;
2244         sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
2245         hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
2246         hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
2247         if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
2248                 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
2249
2250         hba->hibern8_on_idle.enable_attr.show =
2251                                         ufshcd_hibern8_on_idle_enable_show;
2252         hba->hibern8_on_idle.enable_attr.store =
2253                                         ufshcd_hibern8_on_idle_enable_store;
2254         sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
2255         hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
2256         hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
2257         if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
2258                 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
2259 }
2260
2261 static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
2262 {
2263         if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2264             !ufshcd_is_auto_hibern8_supported(hba))
2265                 return;
2266         device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
2267         device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
2268 }
2269
2270 static void ufshcd_hold_all(struct ufs_hba *hba)
2271 {
2272         ufshcd_hold(hba, false);
2273         ufshcd_hibern8_hold(hba, false);
2274 }
2275
2276 static void ufshcd_release_all(struct ufs_hba *hba)
2277 {
2278         ufshcd_hibern8_release(hba, false);
2279         ufshcd_release(hba, false);
2280 }
2281
2282 /* Must be called with host lock acquired */
2283 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2284 {
2285         bool queue_resume_work = false;
2286
2287         if (!ufshcd_is_clkscaling_supported(hba))
2288                 return;
2289
2290         if (!hba->clk_scaling.active_reqs++)
2291                 queue_resume_work = true;
2292
2293         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
2294                 return;
2295
2296         if (queue_resume_work)
2297                 queue_work(hba->clk_scaling.workq,
2298                            &hba->clk_scaling.resume_work);
2299
2300         if (!hba->clk_scaling.window_start_t) {
2301                 hba->clk_scaling.window_start_t = jiffies;
2302                 hba->clk_scaling.tot_busy_t = 0;
2303                 hba->clk_scaling.is_busy_started = false;
2304         }
2305
2306         if (!hba->clk_scaling.is_busy_started) {
2307                 hba->clk_scaling.busy_start_t = ktime_get();
2308                 hba->clk_scaling.is_busy_started = true;
2309         }
2310 }
2311
2312 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2313 {
2314         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2315
2316         if (!ufshcd_is_clkscaling_supported(hba))
2317                 return;
2318
2319         if (!hba->outstanding_reqs && scaling->is_busy_started) {
2320                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2321                                         scaling->busy_start_t));
2322                 scaling->busy_start_t = ktime_set(0, 0);
2323                 scaling->is_busy_started = false;
2324         }
2325 }
2326
2327 /**
2328  * ufshcd_send_command - Send SCSI or device management commands
2329  * @hba: per adapter instance
2330  * @task_tag: Task tag of the command
2331  */
2332 static inline
2333 int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2334 {
2335         int ret = 0;
2336
2337         hba->lrb[task_tag].issue_time_stamp = ktime_get();
2338         hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
2339         ufshcd_clk_scaling_start_busy(hba);
2340         __set_bit(task_tag, &hba->outstanding_reqs);
2341         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2342         /* Make sure that doorbell is committed immediately */
2343         wmb();
2344         ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
2345         ufshcd_update_tag_stats(hba, task_tag);
2346         return ret;
2347 }
2348
2349 /**
2350  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2351  * @lrb - pointer to local reference block
2352  */
2353 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2354 {
2355         int len;
2356         if (lrbp->sense_buffer &&
2357             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2358                 int len_to_copy;
2359
2360                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2361                 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2362
2363                 memcpy(lrbp->sense_buffer,
2364                         lrbp->ucd_rsp_ptr->sr.sense_data,
2365                         min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
2366         }
2367 }
2368
2369 /**
2370  * ufshcd_copy_query_response() - Copy the Query Response and the data
2371  * descriptor
2372  * @hba: per adapter instance
2373  * @lrb - pointer to local reference block
2374  */
2375 static
2376 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2377 {
2378         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2379
2380         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2381
2382         /* Get the descriptor */
2383         if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2384                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2385                                 GENERAL_UPIU_REQUEST_SIZE;
2386                 u16 resp_len;
2387                 u16 buf_len;
2388
2389                 /* data segment length */
2390                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2391                                                 MASK_QUERY_DATA_SEG_LEN;
2392                 buf_len = be16_to_cpu(
2393                                 hba->dev_cmd.query.request.upiu_req.length);
2394                 if (likely(buf_len >= resp_len)) {
2395                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2396                 } else {
2397                         dev_warn(hba->dev,
2398                                 "%s: Response size is bigger than buffer",
2399                                 __func__);
2400                         return -EINVAL;
2401                 }
2402         }
2403
2404         return 0;
2405 }
2406
2407 /**
2408  * ufshcd_hba_capabilities - Read controller capabilities
2409  * @hba: per adapter instance
2410  */
2411 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2412 {
2413         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2414
2415         /* nutrs and nutmrs are 0 based values */
2416         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2417         hba->nutmrs =
2418         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2419 }
2420
2421 /**
2422  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2423  *                            to accept UIC commands
2424  * @hba: per adapter instance
2425  * Return true on success, else false
2426  */
2427 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2428 {
2429         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2430                 return true;
2431         else
2432                 return false;
2433 }
2434
2435 /**
2436  * ufshcd_get_upmcrs - Get the power mode change request status
2437  * @hba: Pointer to adapter instance
2438  *
2439  * This function gets the UPMCRS field of HCS register
2440  * Returns value of UPMCRS field
2441  */
2442 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2443 {
2444         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2445 }
2446
2447 /**
2448  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2449  * @hba: per adapter instance
2450  * @uic_cmd: UIC command
2451  *
2452  * Mutex must be held.
2453  */
2454 static inline void
2455 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2456 {
2457         WARN_ON(hba->active_uic_cmd);
2458
2459         hba->active_uic_cmd = uic_cmd;
2460
2461         ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
2462         /* Write Args */
2463         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2464         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2465         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2466
2467         /* Write UIC Cmd */
2468         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2469                       REG_UIC_COMMAND);
2470 }
2471
2472 /**
2473  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2474  * @hba: per adapter instance
2475  * @uic_command: UIC command
2476  *
2477  * Must be called with mutex held.
2478  * Returns 0 only if success.
2479  */
2480 static int
2481 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2482 {
2483         int ret;
2484         unsigned long flags;
2485
2486         if (wait_for_completion_timeout(&uic_cmd->done,
2487                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2488                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2489         else
2490                 ret = -ETIMEDOUT;
2491
2492         if (ret)
2493                 ufsdbg_set_err_state(hba);
2494
2495         ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
2496
2497         spin_lock_irqsave(hba->host->host_lock, flags);
2498         hba->active_uic_cmd = NULL;
2499         spin_unlock_irqrestore(hba->host->host_lock, flags);
2500
2501         return ret;
2502 }
2503
2504 /**
2505  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2506  * @hba: per adapter instance
2507  * @uic_cmd: UIC command
2508  * @completion: initialize the completion only if this is set to true
2509  *
2510  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2511  * with mutex held and host_lock locked.
2512  * Returns 0 only if success.
2513  */
2514 static int
2515 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2516                       bool completion)
2517 {
2518         if (!ufshcd_ready_for_uic_cmd(hba)) {
2519                 dev_err(hba->dev,
2520                         "Controller not ready to accept UIC commands\n");
2521                 return -EIO;
2522         }
2523
2524         if (completion)
2525                 init_completion(&uic_cmd->done);
2526
2527         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2528
2529         return 0;
2530 }
2531
2532 /**
2533  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2534  * @hba: per adapter instance
2535  * @uic_cmd: UIC command
2536  *
2537  * Returns 0 only if success.
2538  */
2539 static int
2540 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2541 {
2542         int ret;
2543         unsigned long flags;
2544
2545         hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
2546         ufshcd_hold_all(hba);
2547         mutex_lock(&hba->uic_cmd_mutex);
2548         ufshcd_add_delay_before_dme_cmd(hba);
2549
2550         spin_lock_irqsave(hba->host->host_lock, flags);
2551         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2552         spin_unlock_irqrestore(hba->host->host_lock, flags);
2553         if (!ret)
2554                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2555
2556         ufshcd_save_tstamp_of_last_dme_cmd(hba);
2557         mutex_unlock(&hba->uic_cmd_mutex);
2558         ufshcd_release_all(hba);
2559         hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
2560
2561         ufsdbg_error_inject_dispatcher(hba,
2562                 ERR_INJECT_UIC, 0, &ret);
2563
2564         return ret;
2565 }
2566
2567 /**
2568  * ufshcd_map_sg - Map scatter-gather list to prdt
2569  * @lrbp - pointer to local reference block
2570  *
2571  * Returns 0 in case of success, non-zero value in case of failure
2572  */
2573 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
2574 {
2575         struct ufshcd_sg_entry *prd_table;
2576         struct scatterlist *sg;
2577         struct scsi_cmnd *cmd;
2578         int sg_segments;
2579         int i;
2580
2581         cmd = lrbp->cmd;
2582         sg_segments = scsi_dma_map(cmd);
2583         if (sg_segments < 0)
2584                 return sg_segments;
2585
2586         if (sg_segments) {
2587                 lrbp->utr_descriptor_ptr->prd_table_length =
2588                                         cpu_to_le16((u16) (sg_segments));
2589
2590                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2591
2592                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2593                         prd_table[i].size  =
2594                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2595                         prd_table[i].base_addr =
2596                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2597                         prd_table[i].upper_addr =
2598                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2599                         prd_table[i].reserved = 0;
2600                 }
2601         } else {
2602                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2603         }
2604
2605         return 0;
2606 }
2607
2608 /**
2609  * ufshcd_enable_intr - enable interrupts
2610  * @hba: per adapter instance
2611  * @intrs: interrupt bits
2612  */
2613 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2614 {
2615         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2616
2617         if (hba->ufs_version == UFSHCI_VERSION_10) {
2618                 u32 rw;
2619                 rw = set & INTERRUPT_MASK_RW_VER_10;
2620                 set = rw | ((set ^ intrs) & intrs);
2621         } else {
2622                 set |= intrs;
2623         }
2624
2625         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2626 }
2627
2628 /**
2629  * ufshcd_disable_intr - disable interrupts
2630  * @hba: per adapter instance
2631  * @intrs: interrupt bits
2632  */
2633 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2634 {
2635         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2636
2637         if (hba->ufs_version == UFSHCI_VERSION_10) {
2638                 u32 rw;
2639                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2640                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2641                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2642
2643         } else {
2644                 set &= ~intrs;
2645         }
2646
2647         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2648 }
2649
2650 static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
2651                 struct ufshcd_lrb *lrbp)
2652 {
2653         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2654         u8 cc_index = 0;
2655         bool enable = false;
2656         u64 dun = 0;
2657         int ret;
2658
2659         /*
2660          * Call vendor specific code to get crypto info for this request:
2661          * enable, crypto config. index, DUN.
2662          * If bypass is set, don't bother setting the other fields.
2663          */
2664         ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
2665         if (ret) {
2666                 if (ret != -EAGAIN) {
2667                         dev_err(hba->dev,
2668                                 "%s: failed to setup crypto request (%d)\n",
2669                                 __func__, ret);
2670                 }
2671
2672                 return ret;
2673         }
2674
2675         if (!enable)
2676                 goto out;
2677
2678         req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
2679         req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
2680         req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
2681 out:
2682         return 0;
2683 }
2684
2685 /**
2686  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2687  * descriptor according to request
2688  * @hba: per adapter instance
2689  * @lrbp: pointer to local reference block
2690  * @upiu_flags: flags required in the header
2691  * @cmd_dir: requests data direction
2692  */
2693 static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
2694         struct ufshcd_lrb *lrbp, u32 *upiu_flags,
2695         enum dma_data_direction cmd_dir)
2696 {
2697         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2698         u32 data_direction;
2699         u32 dword_0;
2700
2701         if (cmd_dir == DMA_FROM_DEVICE) {
2702                 data_direction = UTP_DEVICE_TO_HOST;
2703                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2704         } else if (cmd_dir == DMA_TO_DEVICE) {
2705                 data_direction = UTP_HOST_TO_DEVICE;
2706                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2707         } else {
2708                 data_direction = UTP_NO_DATA_TRANSFER;
2709                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2710         }
2711
2712         dword_0 = data_direction | (lrbp->command_type
2713                                 << UPIU_COMMAND_TYPE_OFFSET);
2714         if (lrbp->intr_cmd)
2715                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2716
2717         /* Transfer request descriptor header fields */
2718         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2719         /* dword_1 is reserved, hence it is set to 0 */
2720         req_desc->header.dword_1 = 0;
2721         /*
2722          * assigning invalid value for command status. Controller
2723          * updates OCS on command completion, with the command
2724          * status
2725          */
2726         req_desc->header.dword_2 =
2727                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2728         /* dword_3 is reserved, hence it is set to 0 */
2729         req_desc->header.dword_3 = 0;
2730
2731         req_desc->prd_table_length = 0;
2732
2733         if (ufshcd_is_crypto_supported(hba))
2734                 return ufshcd_prepare_crypto_utrd(hba, lrbp);
2735
2736         return 0;
2737 }
2738
2739 /**
2740  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2741  * for scsi commands
2742  * @lrbp - local reference block pointer
2743  * @upiu_flags - flags
2744  */
2745 static
2746 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2747 {
2748         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2749         unsigned short cdb_len;
2750
2751         /* command descriptor fields */
2752         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2753                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2754                                 lrbp->lun, lrbp->task_tag);
2755         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2756                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2757
2758         /* Total EHS length and Data segment length will be zero */
2759         ucd_req_ptr->header.dword_2 = 0;
2760
2761         ucd_req_ptr->sc.exp_data_transfer_len =
2762                 cpu_to_be32(lrbp->cmd->sdb.length);
2763
2764         cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2765         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2766         if (cdb_len < MAX_CDB_SIZE)
2767                 memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
2768                        (MAX_CDB_SIZE - cdb_len));
2769         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2770 }
2771
2772 /**
2773  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2774  * for query requsts
2775  * @hba: UFS hba
2776  * @lrbp: local reference block pointer
2777  * @upiu_flags: flags
2778  */
2779 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2780                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2781 {
2782         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2783         struct ufs_query *query = &hba->dev_cmd.query;
2784         u16 len = be16_to_cpu(query->request.upiu_req.length);
2785         u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2786
2787         /* Query request header */
2788         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2789                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2790                         lrbp->lun, lrbp->task_tag);
2791         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2792                         0, query->request.query_func, 0, 0);
2793
2794         /* Data segment length */
2795         ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
2796                         0, 0, len >> 8, (u8)len);
2797
2798         /* Copy the Query Request buffer as is */
2799         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2800                         QUERY_OSF_SIZE);
2801
2802         /* Copy the Descriptor */
2803         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2804                 memcpy(descp, query->descriptor, len);
2805
2806         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2807 }
2808
2809 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2810 {
2811         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2812
2813         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2814
2815         /* command descriptor fields */
2816         ucd_req_ptr->header.dword_0 =
2817                 UPIU_HEADER_DWORD(
2818                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2819         /* clear rest of the fields of basic header */
2820         ucd_req_ptr->header.dword_1 = 0;
2821         ucd_req_ptr->header.dword_2 = 0;
2822
2823         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2824 }
2825
2826 /**
2827  * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
2828  * @hba - per adapter instance
2829  * @lrb - pointer to local reference block
2830  */
2831 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2832 {
2833         u32 upiu_flags;
2834         int ret = 0;
2835
2836         switch (lrbp->command_type) {
2837         case UTP_CMD_TYPE_SCSI:
2838                 if (likely(lrbp->cmd)) {
2839                         ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
2840                                 &upiu_flags, lrbp->cmd->sc_data_direction);
2841                         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2842                 } else {
2843                         ret = -EINVAL;
2844                 }
2845                 break;
2846         case UTP_CMD_TYPE_DEV_MANAGE:
2847                 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
2848                         DMA_NONE);
2849                 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2850                         ufshcd_prepare_utp_query_req_upiu(
2851                                         hba, lrbp, upiu_flags);
2852                 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2853                         ufshcd_prepare_utp_nop_upiu(lrbp);
2854                 else
2855                         ret = -EINVAL;
2856                 break;
2857         case UTP_CMD_TYPE_UFS:
2858                 /* For UFS native command implementation */
2859                 ret = -ENOTSUPP;
2860                 dev_err(hba->dev, "%s: UFS native command are not supported\n",
2861                         __func__);
2862                 break;
2863         default:
2864                 ret = -ENOTSUPP;
2865                 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
2866                                 __func__, lrbp->command_type);
2867                 break;
2868         } /* end of switch */
2869
2870         return ret;
2871 }
2872
2873 /*
2874  * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2875  * @scsi_lun: scsi LUN id
2876  *
2877  * Returns UPIU LUN id
2878  */
2879 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2880 {
2881         if (scsi_is_wlun(scsi_lun))
2882                 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2883                         | UFS_UPIU_WLUN_ID;
2884         else
2885                 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2886 }
2887
2888 /**
2889  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2890  * @scsi_lun: UPIU W-LUN id
2891  *
2892  * Returns SCSI W-LUN id
2893  */
2894 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2895 {
2896         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2897 }
2898
2899 /**
2900  * ufshcd_get_write_lock - synchronize between shutdown, scaling &
2901  * arrival of requests
2902  * @hba: ufs host
2903  *
2904  * Lock is predominantly held by shutdown context thus, ensuring
2905  * that no requests from any other context may sneak through.
2906  */
2907 static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
2908 {
2909         down_write(&hba->lock);
2910 }
2911
2912 /**
2913  * ufshcd_get_read_lock - synchronize between shutdown, scaling &
2914  * arrival of requests
2915  * @hba: ufs host
2916  *
2917  * Returns 1 if acquired, < 0 on contention
2918  *
2919  * After shutdown's initiated, allow requests only directed to the
2920  * well known device lun. The sync between scaling & issue is maintained
2921  * as is and this restructuring syncs shutdown with these too.
2922  */
2923 static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
2924 {
2925         int err = 0;
2926
2927         err = down_read_trylock(&hba->lock);
2928         if (err > 0)
2929                 goto out;
2930         /* let requests for well known device lun to go through */
2931         if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
2932                 return 0;
2933         else if (!ufshcd_is_shutdown_ongoing(hba))
2934                 return -EAGAIN;
2935         else
2936                 return -EPERM;
2937
2938 out:
2939         return err;
2940 }
2941
2942 /**
2943  * ufshcd_put_read_lock - synchronize between shutdown, scaling &
2944  * arrival of requests
2945  * @hba: ufs host
2946  *
2947  * Returns none
2948  */
2949 static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
2950 {
2951         up_read(&hba->lock);
2952 }
2953
2954 /**
2955  * ufshcd_queuecommand - main entry point for SCSI requests
2956  * @cmd: command from SCSI Midlayer
2957  * @done: call back function
2958  *
2959  * Returns 0 for success, non-zero in case of failure
2960  */
2961 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2962 {
2963         struct ufshcd_lrb *lrbp;
2964         struct ufs_hba *hba;
2965         unsigned long flags;
2966         int tag;
2967         int err = 0;
2968         bool has_read_lock = false;
2969
2970         hba = shost_priv(host);
2971
2972         if (!cmd || !cmd->request || !hba)
2973                 return -EINVAL;
2974
2975         tag = cmd->request->tag;
2976         if (!ufshcd_valid_tag(hba, tag)) {
2977                 dev_err(hba->dev,
2978                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2979                         __func__, tag, cmd, cmd->request);
2980                 BUG();
2981         }
2982
2983         err = ufshcd_get_read_lock(hba, cmd->device->lun);
2984         if (unlikely(err < 0)) {
2985                 if (err == -EPERM) {
2986                         set_host_byte(cmd, DID_ERROR);
2987                         cmd->scsi_done(cmd);
2988                         return 0;
2989                 }
2990                 if (err == -EAGAIN)
2991                         return SCSI_MLQUEUE_HOST_BUSY;
2992         } else if (err == 1) {
2993                 has_read_lock = true;
2994         }
2995
2996         spin_lock_irqsave(hba->host->host_lock, flags);
2997
2998         /* if error handling is in progress, return host busy */
2999         if (ufshcd_eh_in_progress(hba)) {
3000                 err = SCSI_MLQUEUE_HOST_BUSY;
3001                 goto out_unlock;
3002         }
3003
3004         switch (hba->ufshcd_state) {
3005         case UFSHCD_STATE_OPERATIONAL:
3006                 break;
3007         case UFSHCD_STATE_RESET:
3008                 err = SCSI_MLQUEUE_HOST_BUSY;
3009                 goto out_unlock;
3010         case UFSHCD_STATE_ERROR:
3011                 set_host_byte(cmd, DID_ERROR);
3012                 cmd->scsi_done(cmd);
3013                 goto out_unlock;
3014         default:
3015                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
3016                                 __func__, hba->ufshcd_state);
3017                 set_host_byte(cmd, DID_BAD_TARGET);
3018                 cmd->scsi_done(cmd);
3019                 goto out_unlock;
3020         }
3021         spin_unlock_irqrestore(hba->host->host_lock, flags);
3022
3023         hba->req_abort_count = 0;
3024
3025         /* acquire the tag to make sure device cmds don't use it */
3026         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
3027                 /*
3028                  * Dev manage command in progress, requeue the command.
3029                  * Requeuing the command helps in cases where the request *may*
3030                  * find different tag instead of waiting for dev manage command
3031                  * completion.
3032                  */
3033                 err = SCSI_MLQUEUE_HOST_BUSY;
3034                 goto out;
3035         }
3036
3037         hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
3038         err = ufshcd_hold(hba, true);
3039         if (err) {
3040                 err = SCSI_MLQUEUE_HOST_BUSY;
3041                 clear_bit_unlock(tag, &hba->lrb_in_use);
3042                 goto out;
3043         }
3044         if (ufshcd_is_clkgating_allowed(hba))
3045                 WARN_ON(hba->clk_gating.state != CLKS_ON);
3046
3047         err = ufshcd_hibern8_hold(hba, true);
3048         if (err) {
3049                 clear_bit_unlock(tag, &hba->lrb_in_use);
3050                 err = SCSI_MLQUEUE_HOST_BUSY;
3051                 hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
3052                 ufshcd_release(hba, true);
3053                 goto out;
3054         }
3055         if (ufshcd_is_hibern8_on_idle_allowed(hba))
3056                 WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
3057
3058         /* Vote PM QoS for the request */
3059         ufshcd_vops_pm_qos_req_start(hba, cmd->request);
3060
3061         /* IO svc time latency histogram */
3062         if (hba->latency_hist_enabled &&
3063             (cmd->request->cmd_type == REQ_TYPE_FS)) {
3064                 cmd->request->lat_hist_io_start = ktime_get();
3065                 cmd->request->lat_hist_enabled = 1;
3066         } else {
3067                 cmd->request->lat_hist_enabled = 0;
3068         }
3069
3070         WARN_ON(hba->clk_gating.state != CLKS_ON);
3071
3072         lrbp = &hba->lrb[tag];
3073
3074         WARN_ON(lrbp->cmd);
3075         lrbp->cmd = cmd;
3076         lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
3077         lrbp->sense_buffer = cmd->sense_buffer;
3078         lrbp->task_tag = tag;
3079         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
3080         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
3081         lrbp->command_type = UTP_CMD_TYPE_SCSI;
3082         lrbp->req_abort_skip = false;
3083
3084         /* form UPIU before issuing the command */
3085         err = ufshcd_compose_upiu(hba, lrbp);
3086         if (err) {
3087                 if (err != -EAGAIN)
3088                         dev_err(hba->dev,
3089                                 "%s: failed to compose upiu %d\n",
3090                                 __func__, err);
3091
3092                 lrbp->cmd = NULL;
3093                 clear_bit_unlock(tag, &hba->lrb_in_use);
3094                 ufshcd_release_all(hba);
3095                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3096                 goto out;
3097         }
3098
3099         err = ufshcd_map_sg(lrbp);
3100         if (err) {
3101                 lrbp->cmd = NULL;
3102                 clear_bit_unlock(tag, &hba->lrb_in_use);
3103                 ufshcd_release_all(hba);
3104                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3105                 goto out;
3106         }
3107
3108         err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
3109         if (err) {
3110                 if (err != -EAGAIN)
3111                         dev_err(hba->dev,
3112                                 "%s: failed to configure crypto engine %d\n",
3113                                 __func__, err);
3114
3115                 scsi_dma_unmap(lrbp->cmd);
3116                 lrbp->cmd = NULL;
3117                 clear_bit_unlock(tag, &hba->lrb_in_use);
3118                 ufshcd_release_all(hba);
3119                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3120
3121                 goto out;
3122         }
3123
3124         /* Make sure descriptors are ready before ringing the doorbell */
3125         wmb();
3126         /* issue command to the controller */
3127         spin_lock_irqsave(hba->host->host_lock, flags);
3128
3129         err = ufshcd_send_command(hba, tag);
3130         if (err) {
3131                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3132                 scsi_dma_unmap(lrbp->cmd);
3133                 lrbp->cmd = NULL;
3134                 clear_bit_unlock(tag, &hba->lrb_in_use);
3135                 ufshcd_release_all(hba);
3136                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3137                 ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
3138                 dev_err(hba->dev, "%s: failed sending command, %d\n",
3139                                                         __func__, err);
3140                 err = DID_ERROR;
3141                 goto out;
3142         }
3143
3144 out_unlock:
3145         spin_unlock_irqrestore(hba->host->host_lock, flags);
3146 out:
3147         if (has_read_lock)
3148                 ufshcd_put_read_lock(hba);
3149         return err;
3150 }
3151
3152 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3153                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3154 {
3155         lrbp->cmd = NULL;
3156         lrbp->sense_bufflen = 0;
3157         lrbp->sense_buffer = NULL;
3158         lrbp->task_tag = tag;
3159         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
3160         lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
3161         lrbp->intr_cmd = true; /* No interrupt aggregation */
3162         hba->dev_cmd.type = cmd_type;
3163
3164         return ufshcd_compose_upiu(hba, lrbp);
3165 }
3166
3167 static int
3168 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
3169 {
3170         int err = 0;
3171         unsigned long flags;
3172         u32 mask = 1 << tag;
3173
3174         /* clear outstanding transaction before retry */
3175         spin_lock_irqsave(hba->host->host_lock, flags);
3176         ufshcd_utrl_clear(hba, tag);
3177         spin_unlock_irqrestore(hba->host->host_lock, flags);
3178
3179         /*
3180          * wait for for h/w to clear corresponding bit in door-bell.
3181          * max. wait is 1 sec.
3182          */
3183         err = ufshcd_wait_for_register(hba,
3184                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
3185                         mask, ~mask, 1000, 1000, true);
3186
3187         return err;
3188 }
3189
3190 static int
3191 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3192 {
3193         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
3194
3195         /* Get the UPIU response */
3196         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
3197                                 UPIU_RSP_CODE_OFFSET;
3198         return query_res->response;
3199 }
3200
3201 /**
3202  * ufshcd_dev_cmd_completion() - handles device management command responses
3203  * @hba: per adapter instance
3204  * @lrbp: pointer to local reference block
3205  */
3206 static int
3207 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3208 {
3209         int resp;
3210         int err = 0;
3211
3212         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3213         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3214
3215         switch (resp) {
3216         case UPIU_TRANSACTION_NOP_IN:
3217                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3218                         err = -EINVAL;
3219                         dev_err(hba->dev, "%s: unexpected response %x\n",
3220                                         __func__, resp);
3221                 }
3222                 break;
3223         case UPIU_TRANSACTION_QUERY_RSP:
3224                 err = ufshcd_check_query_response(hba, lrbp);
3225                 if (!err)
3226                         err = ufshcd_copy_query_response(hba, lrbp);
3227                 break;
3228         case UPIU_TRANSACTION_REJECT_UPIU:
3229                 /* TODO: handle Reject UPIU Response */
3230                 err = -EPERM;
3231                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3232                                 __func__);
3233                 break;
3234         default:
3235                 err = -EINVAL;
3236                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3237                                 __func__, resp);
3238                 break;
3239         }
3240
3241         return err;
3242 }
3243
3244 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3245                 struct ufshcd_lrb *lrbp, int max_timeout)
3246 {
3247         int err = 0;
3248         unsigned long time_left;
3249         unsigned long flags;
3250
3251         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3252                         msecs_to_jiffies(max_timeout));
3253
3254         spin_lock_irqsave(hba->host->host_lock, flags);
3255         hba->dev_cmd.complete = NULL;
3256         if (likely(time_left)) {
3257                 err = ufshcd_get_tr_ocs(lrbp);
3258                 if (!err)
3259                         err = ufshcd_dev_cmd_completion(hba, lrbp);
3260         }
3261         spin_unlock_irqrestore(hba->host->host_lock, flags);
3262
3263         if (!time_left) {
3264                 err = -ETIMEDOUT;
3265                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3266                         __func__, lrbp->task_tag);
3267                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
3268                         /* successfully cleared the command, retry if needed */
3269                         err = -EAGAIN;
3270                 /*
3271                  * in case of an error, after clearing the doorbell,
3272                  * we also need to clear the outstanding_request
3273                  * field in hba
3274                  */
3275                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
3276         }
3277
3278         if (err)
3279                 ufsdbg_set_err_state(hba);
3280
3281         return err;
3282 }
3283
3284 /**
3285  * ufshcd_get_dev_cmd_tag - Get device management command tag
3286  * @hba: per-adapter instance
3287  * @tag: pointer to variable with available slot value
3288  *
3289  * Get a free slot and lock it until device management command
3290  * completes.
3291  *
3292  * Returns false if free slot is unavailable for locking, else
3293  * return true with tag value in @tag.
3294  */
3295 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
3296 {
3297         int tag;
3298         bool ret = false;
3299         unsigned long tmp;
3300
3301         if (!tag_out)
3302                 goto out;
3303
3304         do {
3305                 tmp = ~hba->lrb_in_use;
3306                 tag = find_last_bit(&tmp, hba->nutrs);
3307                 if (tag >= hba->nutrs)
3308                         goto out;
3309         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
3310
3311         *tag_out = tag;
3312         ret = true;
3313 out:
3314         return ret;
3315 }
3316
3317 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
3318 {
3319         clear_bit_unlock(tag, &hba->lrb_in_use);
3320 }
3321
3322 /**
3323  * ufshcd_exec_dev_cmd - API for sending device management requests
3324  * @hba - UFS hba
3325  * @cmd_type - specifies the type (NOP, Query...)
3326  * @timeout - time in seconds
3327  *
3328  * NOTE: Since there is only one available tag for device management commands,
3329  * it is expected you hold the hba->dev_cmd.lock mutex.
3330  */
3331 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3332                 enum dev_cmd_type cmd_type, int timeout)
3333 {
3334         struct ufshcd_lrb *lrbp;
3335         int err;
3336         int tag;
3337         struct completion wait;
3338         unsigned long flags;
3339
3340         /*
3341          * May get invoked from shutdown and IOCTL contexts.
3342          * In shutdown context, it comes in with lock acquired.
3343          * In error recovery context, it may come with lock acquired.
3344          */
3345
3346         if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
3347                 down_read(&hba->lock);
3348
3349         /*
3350          * Get free slot, sleep if slots are unavailable.
3351          * Even though we use wait_event() which sleeps indefinitely,
3352          * the maximum wait time is bounded by SCSI request timeout.
3353          */
3354         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
3355
3356         init_completion(&wait);
3357         lrbp = &hba->lrb[tag];
3358         WARN_ON(lrbp->cmd);
3359         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3360         if (unlikely(err))
3361                 goto out_put_tag;
3362
3363         hba->dev_cmd.complete = &wait;
3364
3365         /* Make sure descriptors are ready before ringing the doorbell */
3366         wmb();
3367         spin_lock_irqsave(hba->host->host_lock, flags);
3368         err = ufshcd_send_command(hba, tag);
3369         spin_unlock_irqrestore(hba->host->host_lock, flags);
3370         if (err) {
3371                 dev_err(hba->dev, "%s: failed sending command, %d\n",
3372                                                         __func__, err);
3373                 goto out_put_tag;
3374         }
3375         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3376
3377 out_put_tag:
3378         ufshcd_put_dev_cmd_tag(hba, tag);
3379         wake_up(&hba->dev_cmd.tag_wq);
3380         if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
3381                 up_read(&hba->lock);
3382         return err;
3383 }
3384
3385 /**
3386  * ufshcd_init_query() - init the query response and request parameters
3387  * @hba: per-adapter instance
3388  * @request: address of the request pointer to be initialized
3389  * @response: address of the response pointer to be initialized
3390  * @opcode: operation to perform
3391  * @idn: flag idn to access
3392  * @index: LU number to access
3393  * @selector: query/flag/descriptor further identification
3394  */
3395 static inline void ufshcd_init_query(struct ufs_hba *hba,
3396                 struct ufs_query_req **request, struct ufs_query_res **response,
3397                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3398 {
3399         int idn_t = (int)idn;
3400
3401         ufsdbg_error_inject_dispatcher(hba,
3402                 ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
3403         idn = idn_t;
3404
3405         *request = &hba->dev_cmd.query.request;
3406         *response = &hba->dev_cmd.query.response;
3407         memset(*request, 0, sizeof(struct ufs_query_req));
3408         memset(*response, 0, sizeof(struct ufs_query_res));
3409         (*request)->upiu_req.opcode = opcode;
3410         (*request)->upiu_req.idn = idn;
3411         (*request)->upiu_req.index = index;
3412         (*request)->upiu_req.selector = selector;
3413
3414         ufshcd_update_query_stats(hba, opcode, idn);
3415 }
3416
3417 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3418         enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
3419 {
3420         int ret;
3421         int retries;
3422
3423         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3424                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
3425                 if (ret)
3426                         dev_dbg(hba->dev,
3427                                 "%s: failed with error %d, retries %d\n",
3428                                 __func__, ret, retries);
3429                 else
3430                         break;
3431         }
3432
3433         if (ret)
3434                 dev_err(hba->dev,
3435                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3436                         __func__, opcode, idn, ret, retries);
3437         return ret;
3438 }
3439
3440 /**
3441  * ufshcd_query_flag() - API function for sending flag query requests
3442  * hba: per-adapter instance
3443  * query_opcode: flag query to perform
3444  * idn: flag idn to access
3445  * flag_res: the flag value after the query request completes
3446  *
3447  * Returns 0 for success, non-zero in case of failure
3448  */
3449 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3450                         enum flag_idn idn, bool *flag_res)
3451 {
3452         struct ufs_query_req *request = NULL;
3453         struct ufs_query_res *response = NULL;
3454         int err, index = 0, selector = 0;
3455         int timeout = QUERY_REQ_TIMEOUT;
3456
3457         BUG_ON(!hba);
3458
3459         ufshcd_hold_all(hba);
3460         mutex_lock(&hba->dev_cmd.lock);
3461         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3462                         selector);
3463
3464         switch (opcode) {
3465         case UPIU_QUERY_OPCODE_SET_FLAG:
3466         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3467         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3468                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3469                 break;
3470         case UPIU_QUERY_OPCODE_READ_FLAG:
3471                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3472                 if (!flag_res) {
3473                         /* No dummy reads */
3474                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3475                                         __func__);
3476                         err = -EINVAL;
3477                         goto out_unlock;
3478                 }
3479                 break;
3480         default:
3481                 dev_err(hba->dev,
3482                         "%s: Expected query flag opcode but got = %d\n",
3483                         __func__, opcode);
3484                 err = -EINVAL;
3485                 goto out_unlock;
3486         }
3487
3488         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3489
3490         if (err) {
3491                 dev_err(hba->dev,
3492                         "%s: Sending flag query for idn %d failed, err = %d\n",
3493                         __func__, request->upiu_req.idn, err);
3494                 goto out_unlock;
3495         }
3496
3497         if (flag_res)
3498                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3499                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3500
3501 out_unlock:
3502         mutex_unlock(&hba->dev_cmd.lock);
3503         ufshcd_release_all(hba);
3504         return err;
3505 }
3506 EXPORT_SYMBOL(ufshcd_query_flag);
3507
3508 /**
3509  * ufshcd_query_attr - API function for sending attribute requests
3510  * hba: per-adapter instance
3511  * opcode: attribute opcode
3512  * idn: attribute idn to access
3513  * index: index field
3514  * selector: selector field
3515  * attr_val: the attribute value after the query request completes
3516  *
3517  * Returns 0 for success, non-zero in case of failure
3518 */
3519 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3520                         enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3521 {
3522         struct ufs_query_req *request = NULL;
3523         struct ufs_query_res *response = NULL;
3524         int err;
3525
3526         BUG_ON(!hba);
3527
3528         ufshcd_hold_all(hba);
3529         if (!attr_val) {
3530                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3531                                 __func__, opcode);
3532                 err = -EINVAL;
3533                 goto out;
3534         }
3535
3536         mutex_lock(&hba->dev_cmd.lock);
3537         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3538                         selector);
3539
3540         switch (opcode) {
3541         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3542                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3543                 request->upiu_req.value = cpu_to_be32(*attr_val);
3544                 break;
3545         case UPIU_QUERY_OPCODE_READ_ATTR:
3546                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3547                 break;
3548         default:
3549                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3550                                 __func__, opcode);
3551                 err = -EINVAL;
3552                 goto out_unlock;
3553         }
3554
3555         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3556
3557         if (err) {
3558                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3559                                 __func__, opcode,
3560                                 request->upiu_req.idn, index, err);
3561                 goto out_unlock;
3562         }
3563
3564         *attr_val = be32_to_cpu(response->upiu_res.value);
3565
3566 out_unlock:
3567         mutex_unlock(&hba->dev_cmd.lock);
3568 out:
3569         ufshcd_release_all(hba);
3570         return err;
3571 }
3572 EXPORT_SYMBOL(ufshcd_query_attr);
3573
3574 /**
3575  * ufshcd_query_attr_retry() - API function for sending query
3576  * attribute with retries
3577  * @hba: per-adapter instance
3578  * @opcode: attribute opcode
3579  * @idn: attribute idn to access
3580  * @index: index field
3581  * @selector: selector field
3582  * @attr_val: the attribute value after the query request
3583  * completes
3584  *
3585  * Returns 0 for success, non-zero in case of failure
3586 */
3587 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3588         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3589         u32 *attr_val)
3590 {
3591         int ret = 0;
3592         u32 retries;
3593
3594          for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3595                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3596                                                 selector, attr_val);
3597                 if (ret)
3598                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3599                                 __func__, ret, retries);
3600                 else
3601                         break;
3602         }
3603
3604         if (ret)
3605                 dev_err(hba->dev,
3606                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3607                         __func__, idn, ret, retries);
3608         return ret;
3609 }
3610
3611 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3612                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3613                         u8 selector, u8 *desc_buf, int *buf_len)
3614 {
3615         struct ufs_query_req *request = NULL;
3616         struct ufs_query_res *response = NULL;
3617         int err;
3618
3619         BUG_ON(!hba);
3620
3621         ufshcd_hold_all(hba);
3622         if (!desc_buf) {
3623                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3624                                 __func__, opcode);
3625                 err = -EINVAL;
3626                 goto out;
3627         }
3628
3629         if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3630                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3631                                 __func__, *buf_len);
3632                 err = -EINVAL;
3633                 goto out;
3634         }
3635
3636         mutex_lock(&hba->dev_cmd.lock);
3637         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3638                         selector);
3639         hba->dev_cmd.query.descriptor = desc_buf;
3640         request->upiu_req.length = cpu_to_be16(*buf_len);
3641
3642         switch (opcode) {
3643         case UPIU_QUERY_OPCODE_WRITE_DESC:
3644                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3645                 break;
3646         case UPIU_QUERY_OPCODE_READ_DESC:
3647                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3648                 break;
3649         default:
3650                 dev_err(hba->dev,
3651                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3652                                 __func__, opcode);
3653                 err = -EINVAL;
3654                 goto out_unlock;
3655         }
3656
3657         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3658
3659         if (err) {
3660                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3661                                 __func__, opcode,
3662                                 request->upiu_req.idn, index, err);
3663                 goto out_unlock;
3664         }
3665
3666         hba->dev_cmd.query.descriptor = NULL;
3667         *buf_len = be16_to_cpu(response->upiu_res.length);
3668
3669 out_unlock:
3670         mutex_unlock(&hba->dev_cmd.lock);
3671 out:
3672         ufshcd_release_all(hba);
3673         return err;
3674 }
3675
3676 /**
3677  * ufshcd_query_descriptor - API function for sending descriptor requests
3678  * hba: per-adapter instance
3679  * opcode: attribute opcode
3680  * idn: attribute idn to access
3681  * index: index field
3682  * selector: selector field
3683  * desc_buf: the buffer that contains the descriptor
3684  * buf_len: length parameter passed to the device
3685  *
3686  * Returns 0 for success, non-zero in case of failure.
3687  * The buf_len parameter will contain, on return, the length parameter
3688  * received on the response.
3689  */
3690 int ufshcd_query_descriptor(struct ufs_hba *hba,
3691                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3692                         u8 selector, u8 *desc_buf, int *buf_len)
3693 {
3694         int err;
3695         int retries;
3696
3697         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3698                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3699                                                 selector, desc_buf, buf_len);
3700                 if (!err || err == -EINVAL)
3701                         break;
3702         }
3703
3704         return err;
3705 }
3706 EXPORT_SYMBOL(ufshcd_query_descriptor);
3707
3708 /**
3709  * ufshcd_read_desc_param - read the specified descriptor parameter
3710  * @hba: Pointer to adapter instance
3711  * @desc_id: descriptor idn value
3712  * @desc_index: descriptor index
3713  * @param_offset: offset of the parameter to read
3714  * @param_read_buf: pointer to buffer where parameter would be read
3715  * @param_size: sizeof(param_read_buf)
3716  *
3717  * Return 0 in case of success, non-zero otherwise
3718  */
3719 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3720                                   enum desc_idn desc_id,
3721                                   int desc_index,
3722                                   u32 param_offset,
3723                                   u8 *param_read_buf,
3724                                   u32 param_size)
3725 {
3726         int ret;
3727         u8 *desc_buf;
3728         u32 buff_len;
3729         bool is_kmalloc = true;
3730
3731         /* safety checks */
3732         if (desc_id >= QUERY_DESC_IDN_MAX)
3733                 return -EINVAL;
3734
3735         buff_len = ufs_query_desc_max_size[desc_id];
3736         if ((param_offset + param_size) > buff_len)
3737                 return -EINVAL;
3738
3739         if (!param_offset && (param_size == buff_len)) {
3740                 /* memory space already available to hold full descriptor */
3741                 desc_buf = param_read_buf;
3742                 is_kmalloc = false;
3743         } else {
3744                 /* allocate memory to hold full descriptor */
3745                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3746                 if (!desc_buf)
3747                         return -ENOMEM;
3748         }
3749
3750         ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3751                                       desc_id, desc_index, 0, desc_buf,
3752                                       &buff_len);
3753
3754         if (ret) {
3755                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3756                         __func__, desc_id, desc_index, param_offset, ret);
3757
3758                 goto out;
3759         }
3760
3761         /* Sanity check */
3762         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3763                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3764                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3765                 ret = -EINVAL;
3766                 goto out;
3767         }
3768
3769         /*
3770          * While reading variable size descriptors (like string descriptor),
3771          * some UFS devices may report the "LENGTH" (field in "Transaction
3772          * Specific fields" of Query Response UPIU) same as what was requested
3773          * in Query Request UPIU instead of reporting the actual size of the
3774          * variable size descriptor.
3775          * Although it's safe to ignore the "LENGTH" field for variable size
3776          * descriptors as we can always derive the length of the descriptor from
3777          * the descriptor header fields. Hence this change impose the length
3778          * match check only for fixed size descriptors (for which we always
3779          * request the correct size as part of Query Request UPIU).
3780          */
3781         if ((desc_id != QUERY_DESC_IDN_STRING) &&
3782             (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3783                 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3784                         __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3785                 ret = -EINVAL;
3786                 goto out;
3787         }
3788
3789         if (is_kmalloc)
3790                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3791 out:
3792         if (is_kmalloc)
3793                 kfree(desc_buf);
3794         return ret;
3795 }
3796
3797 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3798                                    enum desc_idn desc_id,
3799                                    int desc_index,
3800                                    u8 *buf,
3801                                    u32 size)
3802 {
3803         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3804 }
3805
3806 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3807                                          u8 *buf,
3808                                          u32 size)
3809 {
3810         return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3811 }
3812
3813 int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3814 {
3815         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3816 }
3817
3818 /**
3819  * ufshcd_read_string_desc - read string descriptor
3820  * @hba: pointer to adapter instance
3821  * @desc_index: descriptor index
3822  * @buf: pointer to buffer where descriptor would be read
3823  * @size: size of buf
3824  * @ascii: if true convert from unicode to ascii characters
3825  *
3826  * Return 0 in case of success, non-zero otherwise
3827  */
3828 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
3829                                 u32 size, bool ascii)
3830 {
3831         int err = 0;
3832
3833         err = ufshcd_read_desc(hba,
3834                                 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3835
3836         if (err) {
3837                 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3838                         __func__, QUERY_REQ_RETRIES, err);
3839                 goto out;
3840         }
3841
3842         if (ascii) {
3843                 int desc_len;
3844                 int ascii_len;
3845                 int i;
3846                 char *buff_ascii;
3847
3848                 desc_len = buf[0];
3849                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3850                 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3851                 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3852                         dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3853                                         __func__);
3854                         err = -ENOMEM;
3855                         goto out;
3856                 }
3857
3858                 buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
3859                 if (!buff_ascii) {
3860                         dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
3861                                         __func__, ascii_len);
3862                         err = -ENOMEM;
3863                         goto out_free_buff;
3864                 }
3865
3866                 /*
3867                  * the descriptor contains string in UTF16 format
3868                  * we need to convert to utf-8 so it can be displayed
3869                  */
3870                 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3871                                 desc_len - QUERY_DESC_HDR_SIZE,
3872                                 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3873
3874                 /* replace non-printable or non-ASCII characters with spaces */
3875                 for (i = 0; i < ascii_len; i++)
3876                         ufshcd_remove_non_printable(&buff_ascii[i]);
3877
3878                 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3879                                 size - QUERY_DESC_HDR_SIZE);
3880                 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3881                 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3882 out_free_buff:
3883                 kfree(buff_ascii);
3884         }
3885 out:
3886         return err;
3887 }
3888
3889 /**
3890  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3891  * @hba: Pointer to adapter instance
3892  * @lun: lun id
3893  * @param_offset: offset of the parameter to read
3894  * @param_read_buf: pointer to buffer where parameter would be read
3895  * @param_size: sizeof(param_read_buf)
3896  *
3897  * Return 0 in case of success, non-zero otherwise
3898  */
3899 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3900                                               int lun,
3901                                               enum unit_desc_param param_offset,
3902                                               u8 *param_read_buf,
3903                                               u32 param_size)
3904 {
3905         /*
3906          * Unit descriptors are only available for general purpose LUs (LUN id
3907          * from 0 to 7) and RPMB Well known LU.
3908          */
3909         if (!ufs_is_valid_unit_desc_lun(lun))
3910                 return -EOPNOTSUPP;
3911
3912         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3913                                       param_offset, param_read_buf, param_size);
3914 }
3915
3916 /**
3917  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3918  * @hba: per adapter instance
3919  *
3920  * 1. Allocate DMA memory for Command Descriptor array
3921  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3922  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3923  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3924  *      (UTMRDL)
3925  * 4. Allocate memory for local reference block(lrb).
3926  *
3927  * Returns 0 for success, non-zero in case of failure
3928  */
3929 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3930 {
3931         size_t utmrdl_size, utrdl_size, ucdl_size;
3932
3933         /* Allocate memory for UTP command descriptors */
3934         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3935         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3936                                                   ucdl_size,
3937                                                   &hba->ucdl_dma_addr,
3938                                                   GFP_KERNEL);
3939
3940         /*
3941          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3942          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3943          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3944          * be aligned to 128 bytes as well
3945          */
3946         if (!hba->ucdl_base_addr ||
3947             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3948                 dev_err(hba->dev,
3949                         "Command Descriptor Memory allocation failed\n");
3950                 goto out;
3951         }
3952
3953         /*
3954          * Allocate memory for UTP Transfer descriptors
3955          * UFSHCI requires 1024 byte alignment of UTRD
3956          */
3957         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3958         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3959                                                    utrdl_size,
3960                                                    &hba->utrdl_dma_addr,
3961                                                    GFP_KERNEL);
3962         if (!hba->utrdl_base_addr ||
3963             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3964                 dev_err(hba->dev,
3965                         "Transfer Descriptor Memory allocation failed\n");
3966                 goto out;
3967         }
3968
3969         /*
3970          * Allocate memory for UTP Task Management descriptors
3971          * UFSHCI requires 1024 byte alignment of UTMRD
3972          */
3973         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3974         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3975                                                     utmrdl_size,
3976                                                     &hba->utmrdl_dma_addr,
3977                                                     GFP_KERNEL);
3978         if (!hba->utmrdl_base_addr ||
3979             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3980                 dev_err(hba->dev,
3981                 "Task Management Descriptor Memory allocation failed\n");
3982                 goto out;
3983         }
3984
3985         /* Allocate memory for local reference block */
3986         hba->lrb = devm_kzalloc(hba->dev,
3987                                 hba->nutrs * sizeof(struct ufshcd_lrb),
3988                                 GFP_KERNEL);
3989         if (!hba->lrb) {
3990                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3991                 goto out;
3992         }
3993         return 0;
3994 out:
3995         return -ENOMEM;
3996 }
3997
3998 /**
3999  * ufshcd_host_memory_configure - configure local reference block with
4000  *                              memory offsets
4001  * @hba: per adapter instance
4002  *
4003  * Configure Host memory space
4004  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
4005  * address.
4006  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
4007  * and PRDT offset.
4008  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
4009  * into local reference block.
4010  */
4011 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
4012 {
4013         struct utp_transfer_cmd_desc *cmd_descp;
4014         struct utp_transfer_req_desc *utrdlp;
4015         dma_addr_t cmd_desc_dma_addr;
4016         dma_addr_t cmd_desc_element_addr;
4017         u16 response_offset;
4018         u16 prdt_offset;
4019         int cmd_desc_size;
4020         int i;
4021
4022         utrdlp = hba->utrdl_base_addr;
4023         cmd_descp = hba->ucdl_base_addr;
4024
4025         response_offset =
4026                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
4027         prdt_offset =
4028                 offsetof(struct utp_transfer_cmd_desc, prd_table);
4029
4030         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
4031         cmd_desc_dma_addr = hba->ucdl_dma_addr;
4032
4033         for (i = 0; i < hba->nutrs; i++) {
4034                 /* Configure UTRD with command descriptor base address */
4035                 cmd_desc_element_addr =
4036                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
4037                 utrdlp[i].command_desc_base_addr_lo =
4038                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
4039                 utrdlp[i].command_desc_base_addr_hi =
4040                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
4041
4042                 /* Response upiu and prdt offset should be in double words */
4043                 utrdlp[i].response_upiu_offset =
4044                                 cpu_to_le16((response_offset >> 2));
4045                 utrdlp[i].prd_table_offset =
4046                                 cpu_to_le16((prdt_offset >> 2));
4047                 utrdlp[i].response_upiu_length =
4048                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
4049
4050                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
4051                 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
4052                                 (i * sizeof(struct utp_transfer_req_desc));
4053                 hba->lrb[i].ucd_req_ptr =
4054                         (struct utp_upiu_req *)(cmd_descp + i);
4055                 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
4056                 hba->lrb[i].ucd_rsp_ptr =
4057                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
4058                 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
4059                                 response_offset;
4060                 hba->lrb[i].ucd_prdt_ptr =
4061                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
4062                 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
4063                                 prdt_offset;
4064         }
4065 }
4066
4067 /**
4068  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
4069  * @hba: per adapter instance
4070  *
4071  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
4072  * in order to initialize the Unipro link startup procedure.
4073  * Once the Unipro links are up, the device connected to the controller
4074  * is detected.
4075  *
4076  * Returns 0 on success, non-zero value on failure
4077  */
4078 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4079 {
4080         struct uic_command uic_cmd = {0};
4081         int ret;
4082
4083         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
4084
4085         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4086         if (ret)
4087                 dev_dbg(hba->dev,
4088                         "dme-link-startup: error code %d\n", ret);
4089         return ret;
4090 }
4091
4092 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4093 {
4094         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
4095         unsigned long min_sleep_time_us;
4096
4097         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4098                 return;
4099
4100         /*
4101          * last_dme_cmd_tstamp will be 0 only for 1st call to
4102          * this function
4103          */
4104         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4105                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4106         } else {
4107                 unsigned long delta =
4108                         (unsigned long) ktime_to_us(
4109                                 ktime_sub(ktime_get(),
4110                                 hba->last_dme_cmd_tstamp));
4111
4112                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4113                         min_sleep_time_us =
4114                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4115                 else
4116                         return; /* no more delay required */
4117         }
4118
4119         /* allow sleep for extra 50us if needed */
4120         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4121 }
4122
4123 static inline void ufshcd_save_tstamp_of_last_dme_cmd(
4124                         struct ufs_hba *hba)
4125 {
4126         if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
4127                 hba->last_dme_cmd_tstamp = ktime_get();
4128 }
4129
4130 /**
4131  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4132  * @hba: per adapter instance
4133  * @attr_sel: uic command argument1
4134  * @attr_set: attribute set type as uic command argument2
4135  * @mib_val: setting value as uic command argument3
4136  * @peer: indicate whether peer or local
4137  *
4138  * Returns 0 on success, non-zero value on failure
4139  */
4140 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4141                         u8 attr_set, u32 mib_val, u8 peer)
4142 {
4143         struct uic_command uic_cmd = {0};
4144         static const char *const action[] = {
4145                 "dme-set",
4146                 "dme-peer-set"
4147         };
4148         const char *set = action[!!peer];
4149         int ret;
4150         int retries = UFS_UIC_COMMAND_RETRIES;
4151
4152         ufsdbg_error_inject_dispatcher(hba,
4153                 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4154
4155         uic_cmd.command = peer ?
4156                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4157         uic_cmd.argument1 = attr_sel;
4158         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4159         uic_cmd.argument3 = mib_val;
4160
4161         do {
4162                 /* for peer attributes we retry upon failure */
4163                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4164                 if (ret)
4165                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4166                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4167         } while (ret && peer && --retries);
4168
4169         if (ret)
4170                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4171                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4172                         UFS_UIC_COMMAND_RETRIES - retries);
4173
4174         return ret;
4175 }
4176 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4177
4178 /**
4179  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4180  * @hba: per adapter instance
4181  * @attr_sel: uic command argument1
4182  * @mib_val: the value of the attribute as returned by the UIC command
4183  * @peer: indicate whether peer or local
4184  *
4185  * Returns 0 on success, non-zero value on failure
4186  */
4187 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4188                         u32 *mib_val, u8 peer)
4189 {
4190         struct uic_command uic_cmd = {0};
4191         static const char *const action[] = {
4192                 "dme-get",
4193                 "dme-peer-get"
4194         };
4195         const char *get = action[!!peer];
4196         int ret;
4197         int retries = UFS_UIC_COMMAND_RETRIES;
4198         struct ufs_pa_layer_attr orig_pwr_info;
4199         struct ufs_pa_layer_attr temp_pwr_info;
4200         bool pwr_mode_change = false;
4201
4202         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4203                 orig_pwr_info = hba->pwr_info;
4204                 temp_pwr_info = orig_pwr_info;
4205
4206                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4207                     orig_pwr_info.pwr_rx == FAST_MODE) {
4208                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4209                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4210                         pwr_mode_change = true;
4211                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4212                     orig_pwr_info.pwr_rx == SLOW_MODE) {
4213                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4214                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4215                         pwr_mode_change = true;
4216                 }
4217                 if (pwr_mode_change) {
4218                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4219                         if (ret)
4220                                 goto out;
4221                 }
4222         }
4223
4224         uic_cmd.command = peer ?
4225                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4226
4227         ufsdbg_error_inject_dispatcher(hba,
4228                 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4229
4230         uic_cmd.argument1 = attr_sel;
4231
4232         do {
4233                 /* for peer attributes we retry upon failure */
4234                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4235                 if (ret)
4236                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4237                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
4238         } while (ret && peer && --retries);
4239
4240         if (ret)
4241                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4242                         get, UIC_GET_ATTR_ID(attr_sel),
4243                         UFS_UIC_COMMAND_RETRIES - retries);
4244
4245         if (mib_val && !ret)
4246                 *mib_val = uic_cmd.argument3;
4247
4248         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4249             && pwr_mode_change)
4250                 ufshcd_change_power_mode(hba, &orig_pwr_info);
4251 out:
4252         return ret;
4253 }
4254 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4255
4256 /**
4257  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4258  * state) and waits for it to take effect.
4259  *
4260  * @hba: per adapter instance
4261  * @cmd: UIC command to execute
4262  *
4263  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4264  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4265  * and device UniPro link and hence it's final completion would be indicated by
4266  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4267  * addition to normal UIC command completion Status (UCCS). This function only
4268  * returns after the relevant status bits indicate the completion.
4269  *
4270  * Returns 0 on success, non-zero value on failure
4271  */
4272 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4273 {
4274         struct completion uic_async_done;
4275         unsigned long flags;
4276         u8 status;
4277         int ret;
4278         bool reenable_intr = false;
4279
4280         mutex_lock(&hba->uic_cmd_mutex);
4281         init_completion(&uic_async_done);
4282         ufshcd_add_delay_before_dme_cmd(hba);
4283
4284         spin_lock_irqsave(hba->host->host_lock, flags);
4285         hba->uic_async_done = &uic_async_done;
4286         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4287                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4288                 /*
4289                  * Make sure UIC command completion interrupt is disabled before
4290                  * issuing UIC command.
4291                  */
4292                 wmb();
4293                 reenable_intr = true;
4294         }
4295         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4296         spin_unlock_irqrestore(hba->host->host_lock, flags);
4297         if (ret) {
4298                 dev_err(hba->dev,
4299                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4300                         cmd->command, cmd->argument3, ret);
4301                 goto out;
4302         }
4303
4304         if (!wait_for_completion_timeout(hba->uic_async_done,
4305                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4306                 dev_err(hba->dev,
4307                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4308                         cmd->command, cmd->argument3);
4309                 ret = -ETIMEDOUT;
4310                 goto out;
4311         }
4312
4313         status = ufshcd_get_upmcrs(hba);
4314         if (status != PWR_LOCAL) {
4315                 dev_err(hba->dev,
4316                         "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
4317                         cmd->command, status);
4318                 ret = (status != PWR_OK) ? status : -1;
4319         }
4320         ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
4321
4322 out:
4323         if (ret) {
4324                 ufsdbg_set_err_state(hba);
4325                 ufshcd_print_host_state(hba);
4326                 ufshcd_print_pwr_info(hba);
4327                 ufshcd_print_host_regs(hba);
4328                 ufshcd_print_cmd_log(hba);
4329         }
4330
4331         ufshcd_save_tstamp_of_last_dme_cmd(hba);
4332         spin_lock_irqsave(hba->host->host_lock, flags);
4333         hba->active_uic_cmd = NULL;
4334         hba->uic_async_done = NULL;
4335         if (reenable_intr)
4336                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4337         spin_unlock_irqrestore(hba->host->host_lock, flags);
4338         mutex_unlock(&hba->uic_cmd_mutex);
4339         return ret;
4340 }
4341
4342 int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
4343 {
4344         unsigned long flags;
4345         int ret = 0;
4346         u32 tm_doorbell;
4347         u32 tr_doorbell;
4348         bool timeout = false, do_last_check = false;
4349         ktime_t start;
4350
4351         ufshcd_hold_all(hba);
4352         spin_lock_irqsave(hba->host->host_lock, flags);
4353         /*
4354          * Wait for all the outstanding tasks/transfer requests.
4355          * Verify by checking the doorbell registers are clear.
4356          */
4357         start = ktime_get();
4358         do {
4359                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
4360                         ret = -EBUSY;
4361                         goto out;
4362                 }
4363
4364                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
4365                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4366                 if (!tm_doorbell && !tr_doorbell) {
4367                         timeout = false;
4368                         break;
4369                 } else if (do_last_check) {
4370                         break;
4371                 }
4372
4373                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4374                 schedule();
4375                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
4376                     wait_timeout_us) {
4377                         timeout = true;
4378                         /*
4379                          * We might have scheduled out for long time so make
4380                          * sure to check if doorbells are cleared by this time
4381                          * or not.
4382                          */
4383                         do_last_check = true;
4384                 }
4385                 spin_lock_irqsave(hba->host->host_lock, flags);
4386         } while (tm_doorbell || tr_doorbell);
4387
4388         if (timeout) {
4389                 dev_err(hba->dev,
4390                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
4391                         __func__, tm_doorbell, tr_doorbell);
4392                 ret = -EBUSY;
4393         }
4394 out:
4395         spin_unlock_irqrestore(hba->host->host_lock, flags);
4396         ufshcd_release_all(hba);
4397         return ret;
4398 }
4399
4400 /**
4401  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4402  *                              using DME_SET primitives.
4403  * @hba: per adapter instance
4404  * @mode: powr mode value
4405  *
4406  * Returns 0 on success, non-zero value on failure
4407  */
4408 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4409 {
4410         struct uic_command uic_cmd = {0};
4411         int ret;
4412
4413         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4414                 ret = ufshcd_dme_set(hba,
4415                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4416                 if (ret) {
4417                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4418                                                 __func__, ret);
4419                         goto out;
4420                 }
4421         }
4422
4423         uic_cmd.command = UIC_CMD_DME_SET;
4424         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4425         uic_cmd.argument3 = mode;
4426         hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
4427         ufshcd_hold_all(hba);
4428         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4429         hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
4430         ufshcd_release_all(hba);
4431 out:
4432         return ret;
4433 }
4434
4435 static int ufshcd_link_recovery(struct ufs_hba *hba)
4436 {
4437         int ret = 0;
4438         unsigned long flags;
4439
4440         /*
4441          * Check if there is any race with fatal error handling.
4442          * If so, wait for it to complete. Even though fatal error
4443          * handling does reset and restore in some cases, don't assume
4444          * anything out of it. We are just avoiding race here.
4445          */
4446         do {
4447                 spin_lock_irqsave(hba->host->host_lock, flags);
4448                 if (!(work_pending(&hba->eh_work) ||
4449                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
4450                         break;
4451                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4452                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4453                 flush_work(&hba->eh_work);
4454         } while (1);
4455
4456
4457         /*
4458          * we don't know if previous reset had really reset the host controller
4459          * or not. So let's force reset here to be sure.
4460          */
4461         hba->ufshcd_state = UFSHCD_STATE_ERROR;
4462         hba->force_host_reset = true;
4463         schedule_work(&hba->eh_work);
4464
4465         /* wait for the reset work to finish */
4466         do {
4467                 if (!(work_pending(&hba->eh_work) ||
4468                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
4469                         break;
4470                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4471                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4472                 flush_work(&hba->eh_work);
4473                 spin_lock_irqsave(hba->host->host_lock, flags);
4474         } while (1);
4475
4476         if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
4477               ufshcd_is_link_active(hba)))
4478                 ret = -ENOLINK;
4479         spin_unlock_irqrestore(hba->host->host_lock, flags);
4480
4481         return ret;
4482 }
4483
4484 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4485 {
4486         int ret;
4487         struct uic_command uic_cmd = {0};
4488         ktime_t start = ktime_get();
4489
4490         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4491         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4492         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4493                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4494
4495         /*
4496          * Do full reinit if enter failed or if LINERESET was detected during
4497          * Hibern8 operation. After LINERESET, link moves to default PWM-G1
4498          * mode hence full reinit is required to move link to HS speeds.
4499          */
4500         if (ret || hba->full_init_linereset) {
4501                 int err;
4502
4503                 hba->full_init_linereset = false;
4504                 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
4505                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
4506                         __func__, ret);
4507                 /*
4508                  * If link recovery fails then return error code (-ENOLINK)
4509                  * returned ufshcd_link_recovery().
4510                  * If link recovery succeeds then return -EAGAIN to attempt
4511                  * hibern8 enter retry again.
4512                  */
4513                 err = ufshcd_link_recovery(hba);
4514                 if (err) {
4515                         dev_err(hba->dev, "%s: link recovery failed", __func__);
4516                         ret = err;
4517                 } else {
4518                         ret = -EAGAIN;
4519                 }
4520         } else {
4521                 dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
4522                         ktime_to_us(ktime_get()));
4523         }
4524
4525         return ret;
4526 }
4527
4528 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4529 {
4530         int ret = 0, retries;
4531
4532         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
4533                 ret = __ufshcd_uic_hibern8_enter(hba);
4534                 if (!ret)
4535                         goto out;
4536                 else if (ret != -EAGAIN)
4537                         /* Unable to recover the link, so no point proceeding */
4538                         BUG();
4539         }
4540 out:
4541         return ret;
4542 }
4543
4544 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4545 {
4546         struct uic_command uic_cmd = {0};
4547         int ret;
4548         ktime_t start = ktime_get();
4549
4550         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4551         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4552         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4553                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4554
4555         /* Do full reinit if exit failed */
4556         if (ret) {
4557                 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
4558                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
4559                         __func__, ret);
4560                 ret = ufshcd_link_recovery(hba);
4561                 /* Unable to recover the link, so no point proceeding */
4562                 if (ret)
4563                         BUG();
4564         } else {
4565                 dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
4566                         ktime_to_us(ktime_get()));
4567                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4568                 hba->ufs_stats.hibern8_exit_cnt++;
4569         }
4570
4571         return ret;
4572 }
4573
4574  /**
4575  * ufshcd_init_pwr_info - setting the POR (power on reset)
4576  * values in hba power info
4577  * @hba: per-adapter instance
4578  */
4579 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4580 {
4581         hba->pwr_info.gear_rx = UFS_PWM_G1;
4582         hba->pwr_info.gear_tx = UFS_PWM_G1;
4583         hba->pwr_info.lane_rx = 1;
4584         hba->pwr_info.lane_tx = 1;
4585         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4586         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4587         hba->pwr_info.hs_rate = 0;
4588 }
4589
4590 /**
4591  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4592  * @hba: per-adapter instance
4593  */
4594 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4595 {
4596         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4597
4598         if (hba->max_pwr_info.is_valid)
4599                 return 0;
4600
4601         pwr_info->pwr_tx = FAST_MODE;
4602         pwr_info->pwr_rx = FAST_MODE;
4603         pwr_info->hs_rate = PA_HS_MODE_B;
4604
4605         /* Get the connected lane count */
4606         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4607                         &pwr_info->lane_rx);
4608         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4609                         &pwr_info->lane_tx);
4610
4611         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4612                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4613                                 __func__,
4614                                 pwr_info->lane_rx,
4615                                 pwr_info->lane_tx);
4616                 return -EINVAL;
4617         }
4618
4619         /*
4620          * First, get the maximum gears of HS speed.
4621          * If a zero value, it means there is no HSGEAR capability.
4622          * Then, get the maximum gears of PWM speed.
4623          */
4624         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4625         if (!pwr_info->gear_rx) {
4626                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4627                                 &pwr_info->gear_rx);
4628                 if (!pwr_info->gear_rx) {
4629                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4630                                 __func__, pwr_info->gear_rx);
4631                         return -EINVAL;
4632                 }
4633                 pwr_info->pwr_rx = SLOW_MODE;
4634         }
4635
4636         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4637                         &pwr_info->gear_tx);
4638         if (!pwr_info->gear_tx) {
4639                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4640                                 &pwr_info->gear_tx);
4641                 if (!pwr_info->gear_tx) {
4642                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4643                                 __func__, pwr_info->gear_tx);
4644                         return -EINVAL;
4645                 }
4646                 pwr_info->pwr_tx = SLOW_MODE;
4647         }
4648
4649         hba->max_pwr_info.is_valid = true;
4650         return 0;
4651 }
4652
4653 int ufshcd_change_power_mode(struct ufs_hba *hba,
4654                              struct ufs_pa_layer_attr *pwr_mode)
4655 {
4656         int ret = 0;
4657
4658         /* if already configured to the requested pwr_mode */
4659         if (!hba->restore_needed &&
4660                 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4661                 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4662             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4663             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4664             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4665             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4666             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4667                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4668                 return 0;
4669         }
4670
4671         ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
4672         if (ret)
4673                 return ret;
4674
4675         /*
4676          * Configure attributes for power mode change with below.
4677          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4678          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4679          * - PA_HSSERIES
4680          */
4681         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4682         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4683                         pwr_mode->lane_rx);
4684         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4685                         pwr_mode->pwr_rx == FAST_MODE)
4686                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4687         else
4688                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4689
4690         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4691         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4692                         pwr_mode->lane_tx);
4693         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4694                         pwr_mode->pwr_tx == FAST_MODE)
4695                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4696         else
4697                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4698
4699         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4700             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4701             pwr_mode->pwr_rx == FAST_MODE ||
4702             pwr_mode->pwr_tx == FAST_MODE)
4703                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4704                                                 pwr_mode->hs_rate);
4705
4706         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4707                         DL_FC0ProtectionTimeOutVal_Default);
4708         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4709                         DL_TC0ReplayTimeOutVal_Default);
4710         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4711                         DL_AFC0ReqTimeOutVal_Default);
4712
4713         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4714                         DL_FC0ProtectionTimeOutVal_Default);
4715         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4716                         DL_TC0ReplayTimeOutVal_Default);
4717         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4718                         DL_AFC0ReqTimeOutVal_Default);
4719
4720         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4721                         | pwr_mode->pwr_tx);
4722
4723         if (ret) {
4724                 ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
4725                 dev_err(hba->dev,
4726                         "%s: power mode change failed %d\n", __func__, ret);
4727         } else {
4728                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4729                                                                 pwr_mode);
4730
4731                 memcpy(&hba->pwr_info, pwr_mode,
4732                         sizeof(struct ufs_pa_layer_attr));
4733                 hba->ufs_stats.power_mode_change_cnt++;
4734         }
4735
4736         return ret;
4737 }
4738
4739 /**
4740  * ufshcd_config_pwr_mode - configure a new power mode
4741  * @hba: per-adapter instance
4742  * @desired_pwr_mode: desired power configuration
4743  */
4744 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4745                 struct ufs_pa_layer_attr *desired_pwr_mode)
4746 {
4747         struct ufs_pa_layer_attr final_params = { 0 };
4748         int ret;
4749
4750         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4751                                         desired_pwr_mode, &final_params);
4752
4753         if (ret)
4754                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4755
4756         ret = ufshcd_change_power_mode(hba, &final_params);
4757         if (!ret)
4758                 ufshcd_print_pwr_info(hba);
4759
4760         return ret;
4761 }
4762
4763 /**
4764  * ufshcd_complete_dev_init() - checks device readiness
4765  * hba: per-adapter instance
4766  *
4767  * Set fDeviceInit flag and poll until device toggles it.
4768  */
4769 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4770 {
4771         int i;
4772         int err;
4773         bool flag_res = 1;
4774
4775         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4776                 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4777         if (err) {
4778                 dev_err(hba->dev,
4779                         "%s setting fDeviceInit flag failed with error %d\n",
4780                         __func__, err);
4781                 goto out;
4782         }
4783
4784         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4785         for (i = 0; i < 1000 && !err && flag_res; i++)
4786                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4787                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4788
4789         if (err)
4790                 dev_err(hba->dev,
4791                         "%s reading fDeviceInit flag failed with error %d\n",
4792                         __func__, err);
4793         else if (flag_res)
4794                 dev_err(hba->dev,
4795                         "%s fDeviceInit was not cleared by the device\n",
4796                         __func__);
4797
4798 out:
4799         return err;
4800 }
4801
4802 /**
4803  * ufshcd_make_hba_operational - Make UFS controller operational
4804  * @hba: per adapter instance
4805  *
4806  * To bring UFS host controller to operational state,
4807  * 1. Enable required interrupts
4808  * 2. Configure interrupt aggregation
4809  * 3. Program UTRL and UTMRL base address
4810  * 4. Configure run-stop-registers
4811  *
4812  * Returns 0 on success, non-zero value on failure
4813  */
4814 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4815 {
4816         int err = 0;
4817         u32 reg;
4818
4819         /* Enable required interrupts */
4820         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4821
4822         /* Configure interrupt aggregation */
4823         if (ufshcd_is_intr_aggr_allowed(hba))
4824                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4825         else
4826                 ufshcd_disable_intr_aggr(hba);
4827
4828         /* Configure UTRL and UTMRL base address registers */
4829         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4830                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4831         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4832                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4833         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4834                         REG_UTP_TASK_REQ_LIST_BASE_L);
4835         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4836                         REG_UTP_TASK_REQ_LIST_BASE_H);
4837
4838         /*
4839          * Make sure base address and interrupt setup are updated before
4840          * enabling the run/stop registers below.
4841          */
4842         wmb();
4843
4844         /*
4845          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4846          */
4847         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4848         if (!(ufshcd_get_lists_status(reg))) {
4849                 ufshcd_enable_run_stop_reg(hba);
4850         } else {
4851                 dev_err(hba->dev,
4852                         "Host controller not ready to process requests");
4853                 err = -EIO;
4854                 goto out;
4855         }
4856
4857 out:
4858         return err;
4859 }
4860
4861 /**
4862  * ufshcd_hba_stop - Send controller to reset state
4863  * @hba: per adapter instance
4864  * @can_sleep: perform sleep or just spin
4865  */
4866 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4867 {
4868         int err;
4869
4870         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4871         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4872                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4873                                         10, 1, can_sleep);
4874         if (err)
4875                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4876 }
4877
4878 /**
4879  * ufshcd_hba_enable - initialize the controller
4880  * @hba: per adapter instance
4881  *
4882  * The controller resets itself and controller firmware initialization
4883  * sequence kicks off. When controller is ready it will set
4884  * the Host Controller Enable bit to 1.
4885  *
4886  * Returns 0 on success, non-zero value on failure
4887  */
4888 static int ufshcd_hba_enable(struct ufs_hba *hba)
4889 {
4890         int retry;
4891
4892         /*
4893          * msleep of 1 and 5 used in this function might result in msleep(20),
4894          * but it was necessary to send the UFS FPGA to reset mode during
4895          * development and testing of this driver. msleep can be changed to
4896          * mdelay and retry count can be reduced based on the controller.
4897          */
4898         if (!ufshcd_is_hba_active(hba))
4899                 /* change controller state to "reset state" */
4900                 ufshcd_hba_stop(hba, true);
4901
4902         /* UniPro link is disabled at this point */
4903         ufshcd_set_link_off(hba);
4904
4905         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4906
4907         /* start controller initialization sequence */
4908         ufshcd_hba_start(hba);
4909
4910         /*
4911          * To initialize a UFS host controller HCE bit must be set to 1.
4912          * During initialization the HCE bit value changes from 1->0->1.
4913          * When the host controller completes initialization sequence
4914          * it sets the value of HCE bit to 1. The same HCE bit is read back
4915          * to check if the controller has completed initialization sequence.
4916          * So without this delay the value HCE = 1, set in the previous
4917          * instruction might be read back.
4918          * This delay can be changed based on the controller.
4919          */
4920         msleep(1);
4921
4922         /* wait for the host controller to complete initialization */
4923         retry = 10;
4924         while (ufshcd_is_hba_active(hba)) {
4925                 if (retry) {
4926                         retry--;
4927                 } else {
4928                         dev_err(hba->dev,
4929                                 "Controller enable failed\n");
4930                         return -EIO;
4931                 }
4932                 msleep(5);
4933         }
4934
4935         /* enable UIC related interrupts */
4936         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4937
4938         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4939
4940         return 0;
4941 }
4942
4943 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4944 {
4945         int tx_lanes, i, err = 0;
4946
4947         if (!peer)
4948                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4949                                &tx_lanes);
4950         else
4951                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4952                                     &tx_lanes);
4953         for (i = 0; i < tx_lanes; i++) {
4954                 if (!peer)
4955                         err = ufshcd_dme_set(hba,
4956                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4957                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4958                                         0);
4959                 else
4960                         err = ufshcd_dme_peer_set(hba,
4961                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4962                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4963                                         0);
4964                 if (err) {
4965                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4966                                 __func__, peer, i, err);
4967                         break;
4968                 }
4969         }
4970
4971         return err;
4972 }
4973
4974 static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
4975 {
4976         return ufshcd_disable_tx_lcc(hba, false);
4977 }
4978
4979 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4980 {
4981         return ufshcd_disable_tx_lcc(hba, true);
4982 }
4983
4984 /**
4985  * ufshcd_link_startup - Initialize unipro link startup
4986  * @hba: per adapter instance
4987  *
4988  * Returns 0 for success, non-zero in case of failure
4989  */
4990 static int ufshcd_link_startup(struct ufs_hba *hba)
4991 {
4992         int ret;
4993         int retries = DME_LINKSTARTUP_RETRIES;
4994         bool link_startup_again = false;
4995
4996         /*
4997          * If UFS device isn't active then we will have to issue link startup
4998          * 2 times to make sure the device state move to active.
4999          */
5000         if (!ufshcd_is_ufs_dev_active(hba))
5001                 link_startup_again = true;
5002
5003 link_startup:
5004         do {
5005                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
5006
5007                 ret = ufshcd_dme_link_startup(hba);
5008                 if (ret)
5009                         ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
5010
5011                 /* check if device is detected by inter-connect layer */
5012                 if (!ret && !ufshcd_is_device_present(hba)) {
5013                         ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
5014                         dev_err(hba->dev, "%s: Device not present\n", __func__);
5015                         ret = -ENXIO;
5016                         goto out;
5017                 }
5018
5019                 /*
5020                  * DME link lost indication is only received when link is up,
5021                  * but we can't be sure if the link is up until link startup
5022                  * succeeds. So reset the local Uni-Pro and try again.
5023                  */
5024                 if (ret && ufshcd_hba_enable(hba))
5025                         goto out;
5026         } while (ret && retries--);
5027
5028         if (ret)
5029                 /* failed to get the link up... retire */
5030                 goto out;
5031
5032         if (link_startup_again) {
5033                 link_startup_again = false;
5034                 retries = DME_LINKSTARTUP_RETRIES;
5035                 goto link_startup;
5036         }
5037
5038         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5039         ufshcd_init_pwr_info(hba);
5040         ufshcd_print_pwr_info(hba);
5041
5042         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5043                 ret = ufshcd_disable_device_tx_lcc(hba);
5044                 if (ret)
5045                         goto out;
5046         }
5047
5048         if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
5049                 ret = ufshcd_disable_host_tx_lcc(hba);
5050                 if (ret)
5051                         goto out;
5052         }
5053
5054         /* Include any host controller configuration via UIC commands */
5055         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5056         if (ret)
5057                 goto out;
5058
5059         ret = ufshcd_make_hba_operational(hba);
5060 out:
5061         if (ret) {
5062                 dev_err(hba->dev, "link startup failed %d\n", ret);
5063                 ufshcd_print_host_state(hba);
5064                 ufshcd_print_pwr_info(hba);
5065                 ufshcd_print_host_regs(hba);
5066         }
5067         return ret;
5068 }
5069
5070 /**
5071  * ufshcd_verify_dev_init() - Verify device initialization
5072  * @hba: per-adapter instance
5073  *
5074  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5075  * device Transport Protocol (UTP) layer is ready after a reset.
5076  * If the UTP layer at the device side is not initialized, it may
5077  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5078  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5079  */
5080 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5081 {
5082         int err = 0;
5083         int retries;
5084
5085         ufshcd_hold_all(hba);
5086         mutex_lock(&hba->dev_cmd.lock);
5087         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5088                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5089                                                NOP_OUT_TIMEOUT);
5090
5091                 if (!err || err == -ETIMEDOUT)
5092                         break;
5093
5094                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5095         }
5096         mutex_unlock(&hba->dev_cmd.lock);
5097         ufshcd_release_all(hba);
5098
5099         if (err)
5100                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5101         return err;
5102 }
5103
5104 /**
5105  * ufshcd_set_queue_depth - set lun queue depth
5106  * @sdev: pointer to SCSI device
5107  *
5108  * Read bLUQueueDepth value and activate scsi tagged command
5109  * queueing. For WLUN, queue depth is set to 1. For best-effort
5110  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
5111  * value that host can queue.
5112  */
5113 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
5114 {
5115         int ret = 0;
5116         u8 lun_qdepth;
5117         struct ufs_hba *hba;
5118
5119         hba = shost_priv(sdev->host);
5120
5121         lun_qdepth = hba->nutrs;
5122         ret = ufshcd_read_unit_desc_param(hba,
5123                           ufshcd_scsi_to_upiu_lun(sdev->lun),
5124                           UNIT_DESC_PARAM_LU_Q_DEPTH,
5125                           &lun_qdepth,
5126                           sizeof(lun_qdepth));
5127
5128         /* Some WLUN doesn't support unit descriptor */
5129         if (ret == -EOPNOTSUPP)
5130                 lun_qdepth = 1;
5131         else if (!lun_qdepth)
5132                 /* eventually, we can figure out the real queue depth */
5133                 lun_qdepth = hba->nutrs;
5134         else
5135                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
5136
5137         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
5138                         __func__, lun_qdepth);
5139         scsi_change_queue_depth(sdev, lun_qdepth);
5140 }
5141
5142 /*
5143  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
5144  * @hba: per-adapter instance
5145  * @lun: UFS device lun id
5146  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
5147  *
5148  * Returns 0 in case of success and b_lu_write_protect status would be returned
5149  * @b_lu_write_protect parameter.
5150  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
5151  * Returns -EINVAL in case of invalid parameters passed to this function.
5152  */
5153 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
5154                             u8 lun,
5155                             u8 *b_lu_write_protect)
5156 {
5157         int ret;
5158
5159         if (!b_lu_write_protect)
5160                 ret = -EINVAL;
5161         /*
5162          * According to UFS device spec, RPMB LU can't be write
5163          * protected so skip reading bLUWriteProtect parameter for
5164          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
5165          */
5166         else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
5167                 ret = -ENOTSUPP;
5168         else
5169                 ret = ufshcd_read_unit_desc_param(hba,
5170                                           lun,
5171                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
5172                                           b_lu_write_protect,
5173                                           sizeof(*b_lu_write_protect));
5174         return ret;
5175 }
5176
5177 /**
5178  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
5179  * status
5180  * @hba: per-adapter instance
5181  * @sdev: pointer to SCSI device
5182  *
5183  */
5184 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
5185                                                     struct scsi_device *sdev)
5186 {
5187         if (hba->dev_info.f_power_on_wp_en &&
5188             !hba->dev_info.is_lu_power_on_wp) {
5189                 u8 b_lu_write_protect;
5190
5191                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
5192                                       &b_lu_write_protect) &&
5193                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
5194                         hba->dev_info.is_lu_power_on_wp = true;
5195         }
5196 }
5197
5198 /**
5199  * ufshcd_slave_alloc - handle initial SCSI device configurations
5200  * @sdev: pointer to SCSI device
5201  *
5202  * Returns success
5203  */
5204 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5205 {
5206         struct ufs_hba *hba;
5207
5208         hba = shost_priv(sdev->host);
5209
5210         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5211         sdev->use_10_for_ms = 1;
5212
5213         /* allow SCSI layer to restart the device in case of errors */
5214         sdev->allow_restart = 1;
5215
5216         /* REPORT SUPPORTED OPERATION CODES is not supported */
5217         sdev->no_report_opcodes = 1;
5218
5219         /* WRITE_SAME command is not supported */
5220         sdev->no_write_same = 1;
5221
5222         ufshcd_set_queue_depth(sdev);
5223
5224         ufshcd_get_lu_power_on_wp_status(hba, sdev);
5225
5226         return 0;
5227 }
5228
5229 /**
5230  * ufshcd_change_queue_depth - change queue depth
5231  * @sdev: pointer to SCSI device
5232  * @depth: required depth to set
5233  *
5234  * Change queue depth and make sure the max. limits are not crossed.
5235  */
5236 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5237 {
5238         struct ufs_hba *hba = shost_priv(sdev->host);
5239
5240         if (depth > hba->nutrs)
5241                 depth = hba->nutrs;
5242         return scsi_change_queue_depth(sdev, depth);
5243 }
5244
5245 /**
5246  * ufshcd_slave_configure - adjust SCSI device configurations
5247  * @sdev: pointer to SCSI device
5248  */
5249 static int ufshcd_slave_configure(struct scsi_device *sdev)
5250 {
5251         struct request_queue *q = sdev->request_queue;
5252
5253         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5254         blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
5255
5256         sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
5257         sdev->use_rpm_auto = 1;
5258
5259         return 0;
5260 }
5261
5262 /**
5263  * ufshcd_slave_destroy - remove SCSI device configurations
5264  * @sdev: pointer to SCSI device
5265  */
5266 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5267 {
5268         struct ufs_hba *hba;
5269
5270         hba = shost_priv(sdev->host);
5271         /* Drop the reference as it won't be needed anymore */
5272         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5273                 unsigned long flags;
5274
5275                 spin_lock_irqsave(hba->host->host_lock, flags);
5276                 hba->sdev_ufs_device = NULL;
5277                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5278         }
5279 }
5280
5281 /**
5282  * ufshcd_task_req_compl - handle task management request completion
5283  * @hba: per adapter instance
5284  * @index: index of the completed request
5285  * @resp: task management service response
5286  *
5287  * Returns non-zero value on error, zero on success
5288  */
5289 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
5290 {
5291         struct utp_task_req_desc *task_req_descp;
5292         struct utp_upiu_task_rsp *task_rsp_upiup;
5293         unsigned long flags;
5294         int ocs_value;
5295         int task_result;
5296
5297         spin_lock_irqsave(hba->host->host_lock, flags);
5298
5299         /* Clear completed tasks from outstanding_tasks */
5300         __clear_bit(index, &hba->outstanding_tasks);
5301
5302         task_req_descp = hba->utmrdl_base_addr;
5303         ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
5304
5305         if (ocs_value == OCS_SUCCESS) {
5306                 task_rsp_upiup = (struct utp_upiu_task_rsp *)
5307                                 task_req_descp[index].task_rsp_upiu;
5308                 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
5309                 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
5310                 if (resp)
5311                         *resp = (u8)task_result;
5312         } else {
5313                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5314                                 __func__, ocs_value);
5315         }
5316         spin_unlock_irqrestore(hba->host->host_lock, flags);
5317
5318         return ocs_value;
5319 }
5320
5321 /**
5322  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5323  * @lrb: pointer to local reference block of completed command
5324  * @scsi_status: SCSI command status
5325  *
5326  * Returns value base on SCSI command status
5327  */
5328 static inline int
5329 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5330 {
5331         int result = 0;
5332
5333         switch (scsi_status) {
5334         case SAM_STAT_CHECK_CONDITION:
5335                 ufshcd_copy_sense_data(lrbp);
5336         case SAM_STAT_GOOD:
5337                 result |= DID_OK << 16 |
5338                           COMMAND_COMPLETE << 8 |
5339                           scsi_status;
5340                 break;
5341         case SAM_STAT_TASK_SET_FULL:
5342         case SAM_STAT_BUSY:
5343         case SAM_STAT_TASK_ABORTED:
5344                 ufshcd_copy_sense_data(lrbp);
5345                 result |= scsi_status;
5346                 break;
5347         default:
5348                 result |= DID_ERROR << 16;
5349                 break;
5350         } /* end of switch */
5351
5352         return result;
5353 }
5354
5355 /**
5356  * ufshcd_transfer_rsp_status - Get overall status of the response
5357  * @hba: per adapter instance
5358  * @lrb: pointer to local reference block of completed command
5359  *
5360  * Returns result of the command to notify SCSI midlayer
5361  */
5362 static inline int
5363 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5364 {
5365         int result = 0;
5366         int scsi_status;
5367         int ocs;
5368         bool print_prdt;
5369
5370         /* overall command status of utrd */
5371         ocs = ufshcd_get_tr_ocs(lrbp);
5372
5373         switch (ocs) {
5374         case OCS_SUCCESS:
5375                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5376                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5377                 switch (result) {
5378                 case UPIU_TRANSACTION_RESPONSE:
5379                         /*
5380                          * get the response UPIU result to extract
5381                          * the SCSI command status
5382                          */
5383                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5384
5385                         /*
5386                          * get the result based on SCSI status response
5387                          * to notify the SCSI midlayer of the command status
5388                          */
5389                         scsi_status = result & MASK_SCSI_STATUS;
5390                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5391
5392                         /*
5393                          * Currently we are only supporting BKOPs exception
5394                          * events hence we can ignore BKOPs exception event
5395                          * during power management callbacks. BKOPs exception
5396                          * event is not expected to be raised in runtime suspend
5397                          * callback as it allows the urgent bkops.
5398                          * During system suspend, we are anyway forcefully
5399                          * disabling the bkops and if urgent bkops is needed
5400                          * it will be enabled on system resume. Long term
5401                          * solution could be to abort the system suspend if
5402                          * UFS device needs urgent BKOPs.
5403                          */
5404                         if (!hba->pm_op_in_progress &&
5405                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5406                                 schedule_work(&hba->eeh_work);
5407                         break;
5408                 case UPIU_TRANSACTION_REJECT_UPIU:
5409                         /* TODO: handle Reject UPIU Response */
5410                         result = DID_ERROR << 16;
5411                         dev_err(hba->dev,
5412                                 "Reject UPIU not fully implemented\n");
5413                         break;
5414                 default:
5415                         result = DID_ERROR << 16;
5416                         dev_err(hba->dev,
5417                                 "Unexpected request response code = %x\n",
5418                                 result);
5419                         break;
5420                 }
5421                 break;
5422         case OCS_ABORTED:
5423                 result |= DID_ABORT << 16;
5424                 break;
5425         case OCS_INVALID_COMMAND_STATUS:
5426                 result |= DID_REQUEUE << 16;
5427                 break;
5428         case OCS_INVALID_CMD_TABLE_ATTR:
5429         case OCS_INVALID_PRDT_ATTR:
5430         case OCS_MISMATCH_DATA_BUF_SIZE:
5431         case OCS_MISMATCH_RESP_UPIU_SIZE:
5432         case OCS_PEER_COMM_FAILURE:
5433         case OCS_FATAL_ERROR:
5434         case OCS_DEVICE_FATAL_ERROR:
5435         case OCS_INVALID_CRYPTO_CONFIG:
5436         case OCS_GENERAL_CRYPTO_ERROR:
5437         default:
5438                 result |= DID_ERROR << 16;
5439                 dev_err(hba->dev,
5440                                 "OCS error from controller = %x for tag %d\n",
5441                                 ocs, lrbp->task_tag);
5442                 /*
5443                  * This is called in interrupt context, hence avoid sleep
5444                  * while printing debug registers. Also print only the minimum
5445                  * debug registers needed to debug OCS failure.
5446                  */
5447                 __ufshcd_print_host_regs(hba, true);
5448                 ufshcd_print_host_state(hba);
5449                 break;
5450         } /* end of switch */
5451
5452         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
5453                 print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
5454                         ocs == OCS_MISMATCH_DATA_BUF_SIZE);
5455                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
5456         }
5457
5458         if ((host_byte(result) == DID_ERROR) ||
5459             (host_byte(result) == DID_ABORT))
5460                 ufsdbg_set_err_state(hba);
5461
5462         return result;
5463 }
5464
5465 /**
5466  * ufshcd_uic_cmd_compl - handle completion of uic command
5467  * @hba: per adapter instance
5468  * @intr_status: interrupt status generated by the controller
5469  *
5470  * Returns
5471  *  IRQ_HANDLED - If interrupt is valid
5472  *  IRQ_NONE    - If invalid interrupt
5473  */
5474 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5475 {
5476         irqreturn_t retval = IRQ_NONE;
5477
5478         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5479                 hba->active_uic_cmd->argument2 |=
5480                         ufshcd_get_uic_cmd_result(hba);
5481                 hba->active_uic_cmd->argument3 =
5482                         ufshcd_get_dme_attr_val(hba);
5483                 complete(&hba->active_uic_cmd->done);
5484                 retval = IRQ_HANDLED;
5485         }
5486
5487         if (intr_status & UFSHCD_UIC_PWR_MASK) {
5488                 if (hba->uic_async_done) {
5489                         complete(hba->uic_async_done);
5490                         retval = IRQ_HANDLED;
5491                 } else if (ufshcd_is_auto_hibern8_supported(hba)) {
5492                         /*
5493                          * If uic_async_done flag is not set then this
5494                          * is an Auto hibern8 err interrupt.
5495                          * Perform a host reset followed by a full
5496                          * link recovery.
5497                          */
5498                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
5499                         hba->force_host_reset = true;
5500                         dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5501                                 __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
5502                                 "Enter" : "Exit",
5503                                 intr_status, ufshcd_get_upmcrs(hba));
5504                         __ufshcd_print_host_regs(hba, true);
5505                         ufshcd_print_host_state(hba);
5506                         schedule_work(&hba->eh_work);
5507                         retval = IRQ_HANDLED;
5508                 }
5509         }
5510         return retval;
5511 }
5512
5513 /**
5514  * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
5515  * @hba: per adapter instance
5516  * @result: error result to inform scsi layer about
5517  */
5518 void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
5519 {
5520         u8 index;
5521         struct ufshcd_lrb *lrbp;
5522         struct scsi_cmnd *cmd;
5523
5524         if (!hba->outstanding_reqs)
5525                 return;
5526
5527         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5528                 lrbp = &hba->lrb[index];
5529                 cmd = lrbp->cmd;
5530                 if (cmd) {
5531                         ufshcd_cond_add_cmd_trace(hba, index, "failed");
5532                         ufshcd_update_error_stats(hba,
5533                                         UFS_ERR_INT_FATAL_ERRORS);
5534                         scsi_dma_unmap(cmd);
5535                         cmd->result = result;
5536                         /* Clear pending transfer requests */
5537                         ufshcd_clear_cmd(hba, index);
5538                         ufshcd_outstanding_req_clear(hba, index);
5539                         clear_bit_unlock(index, &hba->lrb_in_use);
5540                         lrbp->complete_time_stamp = ktime_get();
5541                         update_req_stats(hba, lrbp);
5542                         /* Mark completed command as NULL in LRB */
5543                         lrbp->cmd = NULL;
5544                         ufshcd_release_all(hba);
5545                         if (cmd->request) {
5546                                 /*
5547                                  * As we are accessing the "request" structure,
5548                                  * this must be called before calling
5549                                  * ->scsi_done() callback.
5550                                  */
5551                                 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5552                                         true);
5553                                 ufshcd_vops_crypto_engine_cfg_end(hba,
5554                                                 lrbp, cmd->request);
5555                         }
5556                         /* Do not touch lrbp after scsi done */
5557                         cmd->scsi_done(cmd);
5558                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5559                         if (hba->dev_cmd.complete) {
5560                                 ufshcd_cond_add_cmd_trace(hba, index,
5561                                                         "dev_failed");
5562                                 ufshcd_outstanding_req_clear(hba, index);
5563                                 complete(hba->dev_cmd.complete);
5564                         }
5565                 }
5566                 if (ufshcd_is_clkscaling_supported(hba))
5567                         hba->clk_scaling.active_reqs--;
5568         }
5569 }
5570
5571 /**
5572  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5573  * @hba: per adapter instance
5574  * @completed_reqs: requests to complete
5575  */
5576 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5577                                         unsigned long completed_reqs)
5578 {
5579         struct ufshcd_lrb *lrbp;
5580         struct scsi_cmnd *cmd;
5581         int result;
5582         int index;
5583         struct request *req;
5584
5585         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5586                 lrbp = &hba->lrb[index];
5587                 cmd = lrbp->cmd;
5588                 if (cmd) {
5589                         ufshcd_cond_add_cmd_trace(hba, index, "complete");
5590                         ufshcd_update_tag_stats_completion(hba, cmd);
5591                         result = ufshcd_transfer_rsp_status(hba, lrbp);
5592                         scsi_dma_unmap(cmd);
5593                         cmd->result = result;
5594                         clear_bit_unlock(index, &hba->lrb_in_use);
5595                         lrbp->complete_time_stamp = ktime_get();
5596                         update_req_stats(hba, lrbp);
5597                         /* Mark completed command as NULL in LRB */
5598                         lrbp->cmd = NULL;
5599                         hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
5600                         __ufshcd_release(hba, false);
5601                         __ufshcd_hibern8_release(hba, false);
5602                         if (cmd->request) {
5603                                 /*
5604                                  * As we are accessing the "request" structure,
5605                                  * this must be called before calling
5606                                  * ->scsi_done() callback.
5607                                  */
5608                                 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5609                                         false);
5610                                 ufshcd_vops_crypto_engine_cfg_end(hba,
5611                                         lrbp, cmd->request);
5612                         }
5613
5614                         req = cmd->request;
5615                         if (req) {
5616                                 /* Update IO svc time latency histogram */
5617                                 if (req->lat_hist_enabled) {
5618                                         ktime_t completion;
5619                                         u_int64_t delta_us;
5620
5621                                         completion = ktime_get();
5622                                         delta_us = ktime_us_delta(completion,
5623                                                   req->lat_hist_io_start);
5624                                         blk_update_latency_hist(
5625                                                 (rq_data_dir(req) == READ) ?
5626                                                 &hba->io_lat_read :
5627                                                 &hba->io_lat_write, delta_us);
5628                                 }
5629                         }
5630                         /* Do not touch lrbp after scsi done */
5631                         cmd->scsi_done(cmd);
5632                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5633                         if (hba->dev_cmd.complete) {
5634                                 ufshcd_cond_add_cmd_trace(hba, index,
5635                                                 "dcmp");
5636                                 complete(hba->dev_cmd.complete);
5637                         }
5638                 }
5639                 if (ufshcd_is_clkscaling_supported(hba))
5640                         hba->clk_scaling.active_reqs--;
5641         }
5642
5643         /* clear corresponding bits of completed commands */
5644         hba->outstanding_reqs ^= completed_reqs;
5645
5646         ufshcd_clk_scaling_update_busy(hba);
5647
5648         /* we might have free'd some tags above */
5649         wake_up(&hba->dev_cmd.tag_wq);
5650 }
5651
5652 /**
5653  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5654  * @hba: per adapter instance
5655  *
5656  * Returns
5657  *  IRQ_HANDLED - If interrupt is valid
5658  *  IRQ_NONE    - If invalid interrupt
5659  */
5660 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5661 {
5662         unsigned long completed_reqs;
5663         u32 tr_doorbell;
5664
5665         /* Resetting interrupt aggregation counters first and reading the
5666          * DOOR_BELL afterward allows us to handle all the completed requests.
5667          * In order to prevent other interrupts starvation the DB is read once
5668          * after reset. The down side of this solution is the possibility of
5669          * false interrupt if device completes another request after resetting
5670          * aggregation and before reading the DB.
5671          */
5672         if (ufshcd_is_intr_aggr_allowed(hba))
5673                 ufshcd_reset_intr_aggr(hba);
5674
5675         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5676         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5677
5678         if (completed_reqs) {
5679                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5680                 return IRQ_HANDLED;
5681         } else {
5682                 return IRQ_NONE;
5683         }
5684 }
5685
5686 /**
5687  * ufshcd_disable_ee - disable exception event
5688  * @hba: per-adapter instance
5689  * @mask: exception event to disable
5690  *
5691  * Disables exception event in the device so that the EVENT_ALERT
5692  * bit is not set.
5693  *
5694  * Returns zero on success, non-zero error value on failure.
5695  */
5696 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5697 {
5698         int err = 0;
5699         u32 val;
5700
5701         if (!(hba->ee_ctrl_mask & mask))
5702                 goto out;
5703
5704         val = hba->ee_ctrl_mask & ~mask;
5705         val &= 0xFFFF; /* 2 bytes */
5706         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5707                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5708         if (!err)
5709                 hba->ee_ctrl_mask &= ~mask;
5710 out:
5711         return err;
5712 }
5713
5714 /**
5715  * ufshcd_enable_ee - enable exception event
5716  * @hba: per-adapter instance
5717  * @mask: exception event to enable
5718  *
5719  * Enable corresponding exception event in the device to allow
5720  * device to alert host in critical scenarios.
5721  *
5722  * Returns zero on success, non-zero error value on failure.
5723  */
5724 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5725 {
5726         int err = 0;
5727         u32 val;
5728
5729         if (hba->ee_ctrl_mask & mask)
5730                 goto out;
5731
5732         val = hba->ee_ctrl_mask | mask;
5733         val &= 0xFFFF; /* 2 bytes */
5734         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5735                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5736         if (!err)
5737                 hba->ee_ctrl_mask |= mask;
5738 out:
5739         return err;
5740 }
5741
5742 /**
5743  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5744  * @hba: per-adapter instance
5745  *
5746  * Allow device to manage background operations on its own. Enabling
5747  * this might lead to inconsistent latencies during normal data transfers
5748  * as the device is allowed to manage its own way of handling background
5749  * operations.
5750  *
5751  * Returns zero on success, non-zero on failure.
5752  */
5753 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5754 {
5755         int err = 0;
5756
5757         if (hba->auto_bkops_enabled)
5758                 goto out;
5759
5760         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5761                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
5762         if (err) {
5763                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5764                                 __func__, err);
5765                 goto out;
5766         }
5767
5768         hba->auto_bkops_enabled = true;
5769         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
5770
5771         /* No need of URGENT_BKOPS exception from the device */
5772         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5773         if (err)
5774                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5775                                 __func__, err);
5776 out:
5777         return err;
5778 }
5779
5780 /**
5781  * ufshcd_disable_auto_bkops - block device in doing background operations
5782  * @hba: per-adapter instance
5783  *
5784  * Disabling background operations improves command response latency but
5785  * has drawback of device moving into critical state where the device is
5786  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5787  * host is idle so that BKOPS are managed effectively without any negative
5788  * impacts.
5789  *
5790  * Returns zero on success, non-zero on failure.
5791  */
5792 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5793 {
5794         int err = 0;
5795
5796         if (!hba->auto_bkops_enabled)
5797                 goto out;
5798
5799         /*
5800          * If host assisted BKOPs is to be enabled, make sure
5801          * urgent bkops exception is allowed.
5802          */
5803         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5804         if (err) {
5805                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5806                                 __func__, err);
5807                 goto out;
5808         }
5809
5810         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5811                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
5812         if (err) {
5813                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5814                                 __func__, err);
5815                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5816                 goto out;
5817         }
5818
5819         hba->auto_bkops_enabled = false;
5820         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
5821 out:
5822         return err;
5823 }
5824
5825 /**
5826  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5827  * @hba: per adapter instance
5828  *
5829  * After a device reset the device may toggle the BKOPS_EN flag
5830  * to default value. The s/w tracking variables should be updated
5831  * as well. This function would change the auto-bkops state based on
5832  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5833  */
5834 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5835 {
5836         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5837                 hba->auto_bkops_enabled = false;
5838                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5839                 ufshcd_enable_auto_bkops(hba);
5840         } else {
5841                 hba->auto_bkops_enabled = true;
5842                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5843                 ufshcd_disable_auto_bkops(hba);
5844         }
5845 }
5846
5847 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5848 {
5849         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5850                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5851 }
5852
5853 /**
5854  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5855  * @hba: per-adapter instance
5856  * @status: bkops_status value
5857  *
5858  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5859  * flag in the device to permit background operations if the device
5860  * bkops_status is greater than or equal to "status" argument passed to
5861  * this function, disable otherwise.
5862  *
5863  * Returns 0 for success, non-zero in case of failure.
5864  *
5865  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5866  * to know whether auto bkops is enabled or disabled after this function
5867  * returns control to it.
5868  */
5869 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5870                              enum bkops_status status)
5871 {
5872         int err;
5873         u32 curr_status = 0;
5874
5875         err = ufshcd_get_bkops_status(hba, &curr_status);
5876         if (err) {
5877                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5878                                 __func__, err);
5879                 goto out;
5880         } else if (curr_status > BKOPS_STATUS_MAX) {
5881                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5882                                 __func__, curr_status);
5883                 err = -EINVAL;
5884                 goto out;
5885         }
5886
5887         if (curr_status >= status)
5888                 err = ufshcd_enable_auto_bkops(hba);
5889         else
5890                 err = ufshcd_disable_auto_bkops(hba);
5891 out:
5892         return err;
5893 }
5894
5895 /**
5896  * ufshcd_urgent_bkops - handle urgent bkops exception event
5897  * @hba: per-adapter instance
5898  *
5899  * Enable fBackgroundOpsEn flag in the device to permit background
5900  * operations.
5901  *
5902  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5903  * and negative error value for any other failure.
5904  */
5905 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5906 {
5907         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5908 }
5909
5910 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5911 {
5912         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5913                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5914 }
5915
5916 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5917 {
5918         int err;
5919         u32 curr_status = 0;
5920
5921         if (hba->is_urgent_bkops_lvl_checked)
5922                 goto enable_auto_bkops;
5923
5924         err = ufshcd_get_bkops_status(hba, &curr_status);
5925         if (err) {
5926                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5927                                 __func__, err);
5928                 goto out;
5929         }
5930
5931         /*
5932          * We are seeing that some devices are raising the urgent bkops
5933          * exception events even when BKOPS status doesn't indicate performace
5934          * impacted or critical. Handle these device by determining their urgent
5935          * bkops status at runtime.
5936          */
5937         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5938                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5939                                 __func__, curr_status);
5940                 /* update the current status as the urgent bkops level */
5941                 hba->urgent_bkops_lvl = curr_status;
5942                 hba->is_urgent_bkops_lvl_checked = true;
5943         }
5944
5945 enable_auto_bkops:
5946         err = ufshcd_enable_auto_bkops(hba);
5947 out:
5948         if (err < 0)
5949                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5950                                 __func__, err);
5951 }
5952
5953 /**
5954  * ufshcd_exception_event_handler - handle exceptions raised by device
5955  * @work: pointer to work data
5956  *
5957  * Read bExceptionEventStatus attribute from the device and handle the
5958  * exception event accordingly.
5959  */
5960 static void ufshcd_exception_event_handler(struct work_struct *work)
5961 {
5962         struct ufs_hba *hba;
5963         int err;
5964         u32 status = 0;
5965         hba = container_of(work, struct ufs_hba, eeh_work);
5966
5967         pm_runtime_get_sync(hba->dev);
5968         ufshcd_scsi_block_requests(hba);
5969         err = ufshcd_get_ee_status(hba, &status);
5970         if (err) {
5971                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5972                                 __func__, err);
5973                 goto out;
5974         }
5975
5976         status &= hba->ee_ctrl_mask;
5977
5978         if (status & MASK_EE_URGENT_BKOPS)
5979                 ufshcd_bkops_exception_event_handler(hba);
5980
5981 out:
5982         ufshcd_scsi_unblock_requests(hba);
5983         pm_runtime_put(hba->dev);
5984         return;
5985 }
5986
5987 /* Complete requests that have door-bell cleared */
5988 static void ufshcd_complete_requests(struct ufs_hba *hba)
5989 {
5990         ufshcd_transfer_req_compl(hba);
5991         ufshcd_tmc_handler(hba);
5992 }
5993
5994 /**
5995  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5996  *                              to recover from the DL NAC errors or not.
5997  * @hba: per-adapter instance
5998  *
5999  * Returns true if error handling is required, false otherwise
6000  */
6001 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6002 {
6003         unsigned long flags;
6004         bool err_handling = true;
6005
6006         spin_lock_irqsave(hba->host->host_lock, flags);
6007         /*
6008          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6009          * device fatal error and/or DL NAC & REPLAY timeout errors.
6010          */
6011         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6012                 goto out;
6013
6014         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6015             ((hba->saved_err & UIC_ERROR) &&
6016              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
6017                 /*
6018                  * we have to do error recovery but atleast silence the error
6019                  * logs.
6020                  */
6021                 hba->silence_err_logs = true;
6022                 goto out;
6023         }
6024
6025         if ((hba->saved_err & UIC_ERROR) &&
6026             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6027                 int err;
6028                 /*
6029                  * wait for 50ms to see if we can get any other errors or not.
6030                  */
6031                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6032                 msleep(50);
6033                 spin_lock_irqsave(hba->host->host_lock, flags);
6034
6035                 /*
6036                  * now check if we have got any other severe errors other than
6037                  * DL NAC error?
6038                  */
6039                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6040                     ((hba->saved_err & UIC_ERROR) &&
6041                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
6042                         if (((hba->saved_err & INT_FATAL_ERRORS) ==
6043                                 DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
6044                                         ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
6045                                 hba->silence_err_logs = true;
6046                         goto out;
6047                 }
6048
6049                 /*
6050                  * As DL NAC is the only error received so far, send out NOP
6051                  * command to confirm if link is still active or not.
6052                  *   - If we don't get any response then do error recovery.
6053                  *   - If we get response then clear the DL NAC error bit.
6054                  */
6055
6056                 /* silence the error logs from NOP command */
6057                 hba->silence_err_logs = true;
6058                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6059                 err = ufshcd_verify_dev_init(hba);
6060                 spin_lock_irqsave(hba->host->host_lock, flags);
6061                 hba->silence_err_logs = false;
6062
6063                 if (err) {
6064                         hba->silence_err_logs = true;
6065                         goto out;
6066                 }
6067
6068                 /* Link seems to be alive hence ignore the DL NAC errors */
6069                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6070                         hba->saved_err &= ~UIC_ERROR;
6071                 /* clear NAC error */
6072                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6073                 if (!hba->saved_uic_err) {
6074                         err_handling = false;
6075                         goto out;
6076                 }
6077                 /*
6078                  * there seems to be some errors other than NAC, so do error
6079                  * recovery
6080                  */
6081                 hba->silence_err_logs = true;
6082         }
6083 out:
6084         spin_unlock_irqrestore(hba->host->host_lock, flags);
6085         return err_handling;
6086 }
6087
6088 /**
6089  * ufshcd_err_handler - handle UFS errors that require s/w attention
6090  * @work: pointer to work structure
6091  */
6092 static void ufshcd_err_handler(struct work_struct *work)
6093 {
6094         struct ufs_hba *hba;
6095         unsigned long flags;
6096         bool err_xfer = false, err_tm = false;
6097         int err = 0;
6098         int tag;
6099         bool needs_reset = false;
6100         bool clks_enabled = false;
6101
6102         hba = container_of(work, struct ufs_hba, eh_work);
6103
6104         spin_lock_irqsave(hba->host->host_lock, flags);
6105         ufsdbg_set_err_state(hba);
6106
6107         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6108                 goto out;
6109
6110         /*
6111          * Make sure the clocks are ON before we proceed with err
6112          * handling. For the majority of cases err handler would be
6113          * run with clocks ON. There is a possibility that the err
6114          * handler was scheduled due to auto hibern8 error interrupt,
6115          * in which case the clocks could be gated or be in the
6116          * process of gating when the err handler runs.
6117          */
6118         if (unlikely((hba->clk_gating.state != CLKS_ON) &&
6119             ufshcd_is_auto_hibern8_supported(hba))) {
6120                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6121                 hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
6122                 ufshcd_hold(hba, false);
6123                 spin_lock_irqsave(hba->host->host_lock, flags);
6124                 clks_enabled = true;
6125         }
6126
6127         hba->ufshcd_state = UFSHCD_STATE_RESET;
6128         ufshcd_set_eh_in_progress(hba);
6129
6130         /* Complete requests that have door-bell cleared by h/w */
6131         ufshcd_complete_requests(hba);
6132
6133         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6134                 bool ret;
6135
6136                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6137                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6138                 ret = ufshcd_quirk_dl_nac_errors(hba);
6139                 spin_lock_irqsave(hba->host->host_lock, flags);
6140                 if (!ret)
6141                         goto skip_err_handling;
6142         }
6143
6144         /*
6145          * Dump controller state before resetting. Transfer requests state
6146          * will be dump as part of the request completion.
6147          */
6148         if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
6149                 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
6150                         __func__, hba->saved_err, hba->saved_uic_err);
6151                 if (!hba->silence_err_logs) {
6152                         /* release lock as print host regs sleeps */
6153                         spin_unlock_irqrestore(hba->host->host_lock, flags);
6154                         ufshcd_print_host_regs(hba);
6155                         ufshcd_print_host_state(hba);
6156                         ufshcd_print_pwr_info(hba);
6157                         ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6158                         ufshcd_print_cmd_log(hba);
6159                         spin_lock_irqsave(hba->host->host_lock, flags);
6160                 }
6161         }
6162
6163         if ((hba->saved_err & INT_FATAL_ERRORS)
6164             || hba->saved_ce_err || hba->force_host_reset ||
6165             ((hba->saved_err & UIC_ERROR) &&
6166             (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
6167                                    UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6168                                    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
6169                 needs_reset = true;
6170
6171         /*
6172          * if host reset is required then skip clearing the pending
6173          * transfers forcefully because they will automatically get
6174          * cleared after link startup.
6175          */
6176         if (needs_reset)
6177                 goto skip_pending_xfer_clear;
6178
6179         /* release lock as clear command might sleep */
6180         spin_unlock_irqrestore(hba->host->host_lock, flags);
6181         /* Clear pending transfer requests */
6182         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6183                 if (ufshcd_clear_cmd(hba, tag)) {
6184                         err_xfer = true;
6185                         goto lock_skip_pending_xfer_clear;
6186                 }
6187         }
6188
6189         /* Clear pending task management requests */
6190         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6191                 if (ufshcd_clear_tm_cmd(hba, tag)) {
6192                         err_tm = true;
6193                         goto lock_skip_pending_xfer_clear;
6194                 }
6195         }
6196
6197 lock_skip_pending_xfer_clear:
6198         spin_lock_irqsave(hba->host->host_lock, flags);
6199
6200         /* Complete the requests that are cleared by s/w */
6201         ufshcd_complete_requests(hba);
6202
6203         if (err_xfer || err_tm)
6204                 needs_reset = true;
6205
6206 skip_pending_xfer_clear:
6207         /* Fatal errors need reset */
6208         if (needs_reset) {
6209                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
6210
6211                 if (hba->saved_err & INT_FATAL_ERRORS)
6212                         ufshcd_update_error_stats(hba,
6213                                                   UFS_ERR_INT_FATAL_ERRORS);
6214                 if (hba->saved_ce_err)
6215                         ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
6216
6217                 if (hba->saved_err & UIC_ERROR)
6218                         ufshcd_update_error_stats(hba,
6219                                                   UFS_ERR_INT_UIC_ERROR);
6220
6221                 if (err_xfer || err_tm)
6222                         ufshcd_update_error_stats(hba,
6223                                                   UFS_ERR_CLEAR_PEND_XFER_TM);
6224
6225                 /*
6226                  * ufshcd_reset_and_restore() does the link reinitialization
6227                  * which will need atleast one empty doorbell slot to send the
6228                  * device management commands (NOP and query commands).
6229                  * If there is no slot empty at this moment then free up last
6230                  * slot forcefully.
6231                  */
6232                 if (hba->outstanding_reqs == max_doorbells)
6233                         __ufshcd_transfer_req_compl(hba,
6234                                                     (1UL << (hba->nutrs - 1)));
6235
6236                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6237                 err = ufshcd_reset_and_restore(hba);
6238                 spin_lock_irqsave(hba->host->host_lock, flags);
6239                 if (err) {
6240                         dev_err(hba->dev, "%s: reset and restore failed\n",
6241                                         __func__);
6242                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
6243                 }
6244                 /*
6245                  * Inform scsi mid-layer that we did reset and allow to handle
6246                  * Unit Attention properly.
6247                  */
6248                 scsi_report_bus_reset(hba->host, 0);
6249                 hba->saved_err = 0;
6250                 hba->saved_uic_err = 0;
6251                 hba->saved_ce_err = 0;
6252                 hba->force_host_reset = false;
6253         }
6254
6255 skip_err_handling:
6256         if (!needs_reset) {
6257                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6258                 if (hba->saved_err || hba->saved_uic_err)
6259                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6260                             __func__, hba->saved_err, hba->saved_uic_err);
6261         }
6262
6263         hba->silence_err_logs = false;
6264
6265         if (clks_enabled) {
6266                 __ufshcd_release(hba, false);
6267                 hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
6268         }
6269 out:
6270         ufshcd_clear_eh_in_progress(hba);
6271         spin_unlock_irqrestore(hba->host->host_lock, flags);
6272 }
6273
6274 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
6275                 u32 reg)
6276 {
6277         reg_hist->reg[reg_hist->pos] = reg;
6278         reg_hist->tstamp[reg_hist->pos] = ktime_get();
6279         reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
6280 }
6281
6282 static void ufshcd_rls_handler(struct work_struct *work)
6283 {
6284         struct ufs_hba *hba;
6285         int ret = 0;
6286         u32 mode;
6287
6288         hba = container_of(work, struct ufs_hba, rls_work);
6289         ufshcd_scsi_block_requests(hba);
6290         pm_runtime_get_sync(hba->dev);
6291         ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
6292         if (ret) {
6293                 dev_err(hba->dev,
6294                         "Timed out (%d) waiting for DB to clear\n",
6295                         ret);
6296                 goto out;
6297         }
6298
6299         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6300         if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
6301                 hba->restore_needed = true;
6302
6303         if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
6304                 hba->restore_needed = true;
6305
6306         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
6307         if (hba->pwr_info.gear_rx != mode)
6308                 hba->restore_needed = true;
6309
6310         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
6311         if (hba->pwr_info.gear_tx != mode)
6312                 hba->restore_needed = true;
6313
6314         if (hba->restore_needed)
6315                 ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6316
6317         if (ret)
6318                 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6319                         __func__, ret);
6320         else
6321                 hba->restore_needed = false;
6322
6323 out:
6324         ufshcd_scsi_unblock_requests(hba);
6325         pm_runtime_put_sync(hba->dev);
6326 }
6327
6328 /**
6329  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6330  * @hba: per-adapter instance
6331  *
6332  * Returns
6333  *  IRQ_HANDLED - If interrupt is valid
6334  *  IRQ_NONE    - If invalid interrupt
6335  */
6336 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6337 {
6338         u32 reg;
6339         irqreturn_t retval = IRQ_NONE;
6340
6341         /* PHY layer lane error */
6342         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6343         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6344             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6345                 /*
6346                  * To know whether this error is fatal or not, DB timeout
6347                  * must be checked but this error is handled separately.
6348                  */
6349                 dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
6350                                 __func__, reg);
6351                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
6352
6353                 /*
6354                  * Don't ignore LINERESET indication during hibern8
6355                  * enter operation.
6356                  */
6357                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6358                         struct uic_command *cmd = hba->active_uic_cmd;
6359
6360                         if (cmd) {
6361                                 if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
6362                                         dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
6363                                                 __func__, reg);
6364                                         hba->full_init_linereset = true;
6365                                 }
6366                         }
6367                         if (!hba->full_init_linereset)
6368                                 schedule_work(&hba->rls_work);
6369                 }
6370                 retval |= IRQ_HANDLED;
6371         }
6372
6373         /* PA_INIT_ERROR is fatal and needs UIC reset */
6374         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6375         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6376             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6377                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
6378
6379                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
6380                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6381                 } else if (hba->dev_quirks &
6382                            UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6383                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6384                                 hba->uic_error |=
6385                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6386                         else if (reg &
6387                                  UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6388                                 hba->uic_error |=
6389                                         UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6390                 }
6391                 retval |= IRQ_HANDLED;
6392         }
6393
6394         /* UIC NL/TL/DME errors needs software retry */
6395         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6396         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6397             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6398                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
6399                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6400                 retval |= IRQ_HANDLED;
6401         }
6402
6403         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6404         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6405             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6406                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
6407                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6408                 retval |= IRQ_HANDLED;
6409         }
6410
6411         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6412         if ((reg & UIC_DME_ERROR) &&
6413             (reg & UIC_DME_ERROR_CODE_MASK)) {
6414                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
6415                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6416                 retval |= IRQ_HANDLED;
6417         }
6418
6419         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6420                         __func__, hba->uic_error);
6421         return retval;
6422 }
6423
6424 /**
6425  * ufshcd_check_errors - Check for errors that need s/w attention
6426  * @hba: per-adapter instance
6427  *
6428  * Returns
6429  *  IRQ_HANDLED - If interrupt is valid
6430  *  IRQ_NONE    - If invalid interrupt
6431  */
6432 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6433 {
6434         bool queue_eh_work = false;
6435         irqreturn_t retval = IRQ_NONE;
6436
6437         if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
6438                 queue_eh_work = true;
6439
6440         if (hba->errors & UIC_ERROR) {
6441                 hba->uic_error = 0;
6442                 retval = ufshcd_update_uic_error(hba);
6443                 if (hba->uic_error)
6444                         queue_eh_work = true;
6445         }
6446
6447         if (queue_eh_work) {
6448                 /*
6449                  * update the transfer error masks to sticky bits, let's do this
6450                  * irrespective of current ufshcd_state.
6451                  */
6452                 hba->saved_err |= hba->errors;
6453                 hba->saved_uic_err |= hba->uic_error;
6454                 hba->saved_ce_err |= hba->ce_error;
6455
6456                 /* handle fatal errors only when link is functional */
6457                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
6458                         /*
6459                          * Set error handling in progress flag early so that we
6460                          * don't issue new requests any more.
6461                          */
6462                         ufshcd_set_eh_in_progress(hba);
6463
6464                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
6465                         schedule_work(&hba->eh_work);
6466                 }
6467                 retval |= IRQ_HANDLED;
6468         }
6469         /*
6470          * if (!queue_eh_work) -
6471          * Other errors are either non-fatal where host recovers
6472          * itself without s/w intervention or errors that will be
6473          * handled by the SCSI core layer.
6474          */
6475         return retval;
6476 }
6477
6478 /**
6479  * ufshcd_tmc_handler - handle task management function completion
6480  * @hba: per adapter instance
6481  *
6482  * Returns
6483  *  IRQ_HANDLED - If interrupt is valid
6484  *  IRQ_NONE    - If invalid interrupt
6485  */
6486 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6487 {
6488         u32 tm_doorbell;
6489
6490         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6491         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
6492         if (hba->tm_condition) {
6493                 wake_up(&hba->tm_wq);
6494                 return IRQ_HANDLED;
6495         } else {
6496                 return IRQ_NONE;
6497         }
6498 }
6499
6500 /**
6501  * ufshcd_sl_intr - Interrupt service routine
6502  * @hba: per adapter instance
6503  * @intr_status: contains interrupts generated by the controller
6504  *
6505  * Returns
6506  *  IRQ_HANDLED - If interrupt is valid
6507  *  IRQ_NONE    - If invalid interrupt
6508  */
6509 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6510 {
6511         irqreturn_t retval = IRQ_NONE;
6512
6513         ufsdbg_error_inject_dispatcher(hba,
6514                 ERR_INJECT_INTR, intr_status, &intr_status);
6515
6516         ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
6517
6518         hba->errors = UFSHCD_ERROR_MASK & intr_status;
6519         if (hba->errors || hba->ce_error)
6520                 retval |= ufshcd_check_errors(hba);
6521
6522         if (intr_status & UFSHCD_UIC_MASK)
6523                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6524
6525         if (intr_status & UTP_TASK_REQ_COMPL)
6526                 retval |= ufshcd_tmc_handler(hba);
6527
6528         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6529                 retval |= ufshcd_transfer_req_compl(hba);
6530
6531         return retval;
6532 }
6533
6534 /**
6535  * ufshcd_intr - Main interrupt service routine
6536  * @irq: irq number
6537  * @__hba: pointer to adapter instance
6538  *
6539  * Returns
6540  *  IRQ_HANDLED - If interrupt is valid
6541  *  IRQ_NONE    - If invalid interrupt
6542  */
6543 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6544 {
6545         u32 intr_status, enabled_intr_status;
6546         irqreturn_t retval = IRQ_NONE;
6547         struct ufs_hba *hba = __hba;
6548         int retries = hba->nutrs;
6549
6550         spin_lock(hba->host->host_lock);
6551         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6552         hba->ufs_stats.last_intr_status = intr_status;
6553         hba->ufs_stats.last_intr_ts = ktime_get();
6554         /*
6555          * There could be max of hba->nutrs reqs in flight and in worst case
6556          * if the reqs get finished 1 by 1 after the interrupt status is
6557          * read, make sure we handle them by checking the interrupt status
6558          * again in a loop until we process all of the reqs before returning.
6559          */
6560         do {
6561                 enabled_intr_status =
6562                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6563                 if (intr_status)
6564                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6565                 if (enabled_intr_status)
6566                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6567
6568                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6569         } while (intr_status && --retries);
6570
6571         if (retval == IRQ_NONE) {
6572                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6573                                         __func__, intr_status);
6574                 ufshcd_hex_dump("host regs: ", hba->mmio_base,
6575                                         UFSHCI_REG_SPACE_SIZE);
6576         }
6577
6578         spin_unlock(hba->host->host_lock);
6579         return retval;
6580 }
6581
6582 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6583 {
6584         int err = 0;
6585         u32 mask = 1 << tag;
6586         unsigned long flags;
6587
6588         if (!test_bit(tag, &hba->outstanding_tasks))
6589                 goto out;
6590
6591         spin_lock_irqsave(hba->host->host_lock, flags);
6592         ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
6593         spin_unlock_irqrestore(hba->host->host_lock, flags);
6594
6595         /* poll for max. 1 sec to clear door bell register by h/w */
6596         err = ufshcd_wait_for_register(hba,
6597                         REG_UTP_TASK_REQ_DOOR_BELL,
6598                         mask, 0, 1000, 1000, true);
6599 out:
6600         return err;
6601 }
6602
6603 /**
6604  * ufshcd_issue_tm_cmd - issues task management commands to controller
6605  * @hba: per adapter instance
6606  * @lun_id: LUN ID to which TM command is sent
6607  * @task_id: task ID to which the TM command is applicable
6608  * @tm_function: task management function opcode
6609  * @tm_response: task management service response return value
6610  *
6611  * Returns non-zero value on error, zero on success.
6612  */
6613 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6614                 u8 tm_function, u8 *tm_response)
6615 {
6616         struct utp_task_req_desc *task_req_descp;
6617         struct utp_upiu_task_req *task_req_upiup;
6618         struct Scsi_Host *host;
6619         unsigned long flags;
6620         int free_slot;
6621         int err;
6622         int task_tag;
6623
6624         host = hba->host;
6625
6626         /*
6627          * Get free slot, sleep if slots are unavailable.
6628          * Even though we use wait_event() which sleeps indefinitely,
6629          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6630          */
6631         wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
6632         hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
6633         ufshcd_hold_all(hba);
6634
6635         spin_lock_irqsave(host->host_lock, flags);
6636         task_req_descp = hba->utmrdl_base_addr;
6637         task_req_descp += free_slot;
6638
6639         /* Configure task request descriptor */
6640         task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6641         task_req_descp->header.dword_2 =
6642                         cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6643
6644         /* Configure task request UPIU */
6645         task_req_upiup =
6646                 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
6647         task_tag = hba->nutrs + free_slot;
6648         task_req_upiup->header.dword_0 =
6649                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
6650                                               lun_id, task_tag);
6651         task_req_upiup->header.dword_1 =
6652                 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
6653         /*
6654          * The host shall provide the same value for LUN field in the basic
6655          * header and for Input Parameter.
6656          */
6657         task_req_upiup->input_param1 = cpu_to_be32(lun_id);
6658         task_req_upiup->input_param2 = cpu_to_be32(task_id);
6659
6660         /* send command to the controller */
6661         __set_bit(free_slot, &hba->outstanding_tasks);
6662
6663         /* Make sure descriptors are ready before ringing the task doorbell */
6664         wmb();
6665
6666         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6667         /* Make sure that doorbell is committed immediately */
6668         wmb();
6669
6670         spin_unlock_irqrestore(host->host_lock, flags);
6671
6672         /* wait until the task management command is completed */
6673         err = wait_event_timeout(hba->tm_wq,
6674                         test_bit(free_slot, &hba->tm_condition),
6675                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6676         if (!err) {
6677                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6678                                 __func__, tm_function);
6679                 if (ufshcd_clear_tm_cmd(hba, free_slot))
6680                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6681                                         __func__, free_slot);
6682                 err = -ETIMEDOUT;
6683         } else {
6684                 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
6685         }
6686
6687         clear_bit(free_slot, &hba->tm_condition);
6688         ufshcd_put_tm_slot(hba, free_slot);
6689         wake_up(&hba->tm_tag_wq);
6690         hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
6691
6692         ufshcd_release_all(hba);
6693         return err;
6694 }
6695
6696 /**
6697  * ufshcd_eh_device_reset_handler - device reset handler registered to
6698  *                                    scsi layer.
6699  * @cmd: SCSI command pointer
6700  *
6701  * Returns SUCCESS/FAILED
6702  */
6703 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6704 {
6705         struct Scsi_Host *host;
6706         struct ufs_hba *hba;
6707         unsigned int tag;
6708         u32 pos;
6709         int err;
6710         u8 resp = 0xF;
6711         struct ufshcd_lrb *lrbp;
6712         unsigned long flags;
6713
6714         host = cmd->device->host;
6715         hba = shost_priv(host);
6716         tag = cmd->request->tag;
6717
6718         ufshcd_print_cmd_log(hba);
6719         lrbp = &hba->lrb[tag];
6720         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6721         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6722                 if (!err)
6723                         err = resp;
6724                 goto out;
6725         }
6726
6727         /* clear the commands that were pending for corresponding LUN */
6728         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6729                 if (hba->lrb[pos].lun == lrbp->lun) {
6730                         err = ufshcd_clear_cmd(hba, pos);
6731                         if (err)
6732                                 break;
6733                 }
6734         }
6735         spin_lock_irqsave(host->host_lock, flags);
6736         ufshcd_transfer_req_compl(hba);
6737         spin_unlock_irqrestore(host->host_lock, flags);
6738
6739 out:
6740         hba->req_abort_count = 0;
6741         if (!err) {
6742                 err = SUCCESS;
6743         } else {
6744                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6745                 err = FAILED;
6746         }
6747         return err;
6748 }
6749
6750 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6751 {
6752         struct ufshcd_lrb *lrbp;
6753         int tag;
6754
6755         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6756                 lrbp = &hba->lrb[tag];
6757                 lrbp->req_abort_skip = true;
6758         }
6759 }
6760
6761 /**
6762  * ufshcd_abort - abort a specific command
6763  * @cmd: SCSI command pointer
6764  *
6765  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6766  * command, and in host controller by clearing the door-bell register. There can
6767  * be race between controller sending the command to the device while abort is
6768  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6769  * really issued and then try to abort it.
6770  *
6771  * Returns SUCCESS/FAILED
6772  */
6773 static int ufshcd_abort(struct scsi_cmnd *cmd)
6774 {
6775         struct Scsi_Host *host;
6776         struct ufs_hba *hba;
6777         unsigned long flags;
6778         unsigned int tag;
6779         int err = 0;
6780         int poll_cnt;
6781         u8 resp = 0xF;
6782         struct ufshcd_lrb *lrbp;
6783         u32 reg;
6784
6785         host = cmd->device->host;
6786         hba = shost_priv(host);
6787         tag = cmd->request->tag;
6788         if (!ufshcd_valid_tag(hba, tag)) {
6789                 dev_err(hba->dev,
6790                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6791                         __func__, tag, cmd, cmd->request);
6792                 BUG();
6793         }
6794
6795         lrbp = &hba->lrb[tag];
6796
6797         ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
6798
6799         /*
6800          * Task abort to the device W-LUN is illegal. When this command
6801          * will fail, due to spec violation, scsi err handling next step
6802          * will be to send LU reset which, again, is a spec violation.
6803          * To avoid these unnecessary/illegal step we skip to the last error
6804          * handling stage: reset and restore.
6805          */
6806         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6807                 return ufshcd_eh_host_reset_handler(cmd);
6808
6809         ufshcd_hold_all(hba);
6810         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6811         /* If command is already aborted/completed, return SUCCESS */
6812         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6813                 dev_err(hba->dev,
6814                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6815                         __func__, tag, hba->outstanding_reqs, reg);
6816                 goto out;
6817         }
6818
6819         if (!(reg & (1 << tag))) {
6820                 dev_err(hba->dev,
6821                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6822                 __func__, tag);
6823         }
6824
6825         /* Print Transfer Request of aborted task */
6826         dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
6827
6828         /*
6829          * Print detailed info about aborted request.
6830          * As more than one request might get aborted at the same time,
6831          * print full information only for the first aborted request in order
6832          * to reduce repeated printouts. For other aborted requests only print
6833          * basic details.
6834          */
6835         scsi_print_command(cmd);
6836         if (!hba->req_abort_count) {
6837                 ufshcd_print_fsm_state(hba);
6838                 ufshcd_print_host_regs(hba);
6839                 ufshcd_print_host_state(hba);
6840                 ufshcd_print_pwr_info(hba);
6841                 ufshcd_print_trs(hba, 1 << tag, true);
6842         } else {
6843                 ufshcd_print_trs(hba, 1 << tag, false);
6844         }
6845         hba->req_abort_count++;
6846
6847
6848         /* Skip task abort in case previous aborts failed and report failure */
6849         if (lrbp->req_abort_skip) {
6850                 err = -EIO;
6851                 goto out;
6852         }
6853
6854         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6855                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6856                                 UFS_QUERY_TASK, &resp);
6857                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6858                         /* cmd pending in the device */
6859                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
6860                                 __func__, tag);
6861                         break;
6862                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6863                         /*
6864                          * cmd not pending in the device, check if it is
6865                          * in transition.
6866                          */
6867                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
6868                                 __func__, tag);
6869                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6870                         if (reg & (1 << tag)) {
6871                                 /* sleep for max. 200us to stabilize */
6872                                 usleep_range(100, 200);
6873                                 continue;
6874                         }
6875                         /* command completed already */
6876                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
6877                                 __func__, tag);
6878                         goto out;
6879                 } else {
6880                         dev_err(hba->dev,
6881                                 "%s: no response from device. tag = %d, err %d",
6882                                 __func__, tag, err);
6883                         if (!err)
6884                                 err = resp; /* service response error */
6885                         goto out;
6886                 }
6887         }
6888
6889         if (!poll_cnt) {
6890                 err = -EBUSY;
6891                 goto out;
6892         }
6893
6894         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6895                         UFS_ABORT_TASK, &resp);
6896         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6897                 if (!err) {
6898                         err = resp; /* service response error */
6899                         dev_err(hba->dev, "%s: issued. tag = %d, err %d",
6900                                 __func__, tag, err);
6901                 }
6902                 goto out;
6903         }
6904
6905         err = ufshcd_clear_cmd(hba, tag);
6906         if (err) {
6907                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
6908                         __func__, tag, err);
6909                 goto out;
6910         }
6911
6912         scsi_dma_unmap(cmd);
6913
6914         spin_lock_irqsave(host->host_lock, flags);
6915         ufshcd_outstanding_req_clear(hba, tag);
6916         hba->lrb[tag].cmd = NULL;
6917         spin_unlock_irqrestore(host->host_lock, flags);
6918
6919         clear_bit_unlock(tag, &hba->lrb_in_use);
6920         wake_up(&hba->dev_cmd.tag_wq);
6921
6922 out:
6923         if (!err) {
6924                 err = SUCCESS;
6925         } else {
6926                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6927                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6928                 err = FAILED;
6929         }
6930
6931         /*
6932          * This ufshcd_release_all() corresponds to the original scsi cmd that
6933          * got aborted here (as we won't get any IRQ for it).
6934          */
6935         ufshcd_release_all(hba);
6936         return err;
6937 }
6938
6939 /**
6940  * ufshcd_host_reset_and_restore - reset and restore host controller
6941  * @hba: per-adapter instance
6942  *
6943  * Note that host controller reset may issue DME_RESET to
6944  * local and remote (device) Uni-Pro stack and the attributes
6945  * are reset to default state.
6946  *
6947  * Returns zero on success, non-zero on failure
6948  */
6949 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6950 {
6951         int err;
6952         unsigned long flags;
6953
6954         /* Reset the host controller */
6955         spin_lock_irqsave(hba->host->host_lock, flags);
6956         ufshcd_hba_stop(hba, false);
6957         spin_unlock_irqrestore(hba->host->host_lock, flags);
6958
6959         /* scale up clocks to max frequency before full reinitialization */
6960         ufshcd_set_clk_freq(hba, true);
6961
6962         err = ufshcd_hba_enable(hba);
6963         if (err)
6964                 goto out;
6965
6966         /* Establish the link again and restore the device */
6967         err = ufshcd_probe_hba(hba);
6968
6969         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
6970                 err = -EIO;
6971                 goto out;
6972         }
6973
6974         if (!err) {
6975                 err = ufshcd_vops_crypto_engine_reset(hba);
6976                 if (err) {
6977                         dev_err(hba->dev,
6978                                 "%s: failed to reset crypto engine %d\n",
6979                                 __func__, err);
6980                         goto out;
6981                 }
6982         }
6983
6984 out:
6985         if (err)
6986                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6987
6988         return err;
6989 }
6990
6991 /**
6992  * ufshcd_reset_and_restore - reset and re-initialize host/device
6993  * @hba: per-adapter instance
6994  *
6995  * Reset and recover device, host and re-establish link. This
6996  * is helpful to recover the communication in fatal error conditions.
6997  *
6998  * Returns zero on success, non-zero on failure
6999  */
7000 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7001 {
7002         int err = 0;
7003         unsigned long flags;
7004         int retries = MAX_HOST_RESET_RETRIES;
7005
7006         do {
7007                 err = ufshcd_vops_full_reset(hba);
7008                 if (err)
7009                         dev_warn(hba->dev, "%s: full reset returned %d\n",
7010                                  __func__, err);
7011
7012                 err = ufshcd_reset_device(hba);
7013                 if (err)
7014                         dev_warn(hba->dev, "%s: device reset failed. err %d\n",
7015                                  __func__, err);
7016
7017                 err = ufshcd_host_reset_and_restore(hba);
7018         } while (err && --retries);
7019
7020         /*
7021          * There is no point proceeding even after failing
7022          * to recover after multiple retries.
7023          */
7024         if (err)
7025                 BUG();
7026         /*
7027          * After reset the door-bell might be cleared, complete
7028          * outstanding requests in s/w here.
7029          */
7030         spin_lock_irqsave(hba->host->host_lock, flags);
7031         ufshcd_transfer_req_compl(hba);
7032         ufshcd_tmc_handler(hba);
7033         spin_unlock_irqrestore(hba->host->host_lock, flags);
7034
7035         return err;
7036 }
7037
7038 /**
7039  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7040  * @cmd - SCSI command pointer
7041  *
7042  * Returns SUCCESS/FAILED
7043  */
7044 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7045 {
7046         int err = SUCCESS;
7047         unsigned long flags;
7048         struct ufs_hba *hba;
7049
7050         hba = shost_priv(cmd->device->host);
7051
7052         /*
7053          * Check if there is any race with fatal error handling.
7054          * If so, wait for it to complete. Even though fatal error
7055          * handling does reset and restore in some cases, don't assume
7056          * anything out of it. We are just avoiding race here.
7057          */
7058         do {
7059                 spin_lock_irqsave(hba->host->host_lock, flags);
7060                 if (!(work_pending(&hba->eh_work) ||
7061                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
7062                         break;
7063                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7064                 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7065                 flush_work(&hba->eh_work);
7066         } while (1);
7067
7068         /*
7069          * we don't know if previous reset had really reset the host controller
7070          * or not. So let's force reset here to be sure.
7071          */
7072         hba->ufshcd_state = UFSHCD_STATE_ERROR;
7073         hba->force_host_reset = true;
7074         schedule_work(&hba->eh_work);
7075
7076         /* wait for the reset work to finish */
7077         do {
7078                 if (!(work_pending(&hba->eh_work) ||
7079                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
7080                         break;
7081                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7082                 dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
7083                 flush_work(&hba->eh_work);
7084                 spin_lock_irqsave(hba->host->host_lock, flags);
7085         } while (1);
7086
7087         if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
7088               ufshcd_is_link_active(hba))) {
7089                 err = FAILED;
7090                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7091         }
7092
7093         spin_unlock_irqrestore(hba->host->host_lock, flags);
7094
7095         return err;
7096 }
7097
7098 /**
7099  * ufshcd_get_max_icc_level - calculate the ICC level
7100  * @sup_curr_uA: max. current supported by the regulator
7101  * @start_scan: row at the desc table to start scan from
7102  * @buff: power descriptor buffer
7103  *
7104  * Returns calculated max ICC level for specific regulator
7105  */
7106 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7107 {
7108         int i;
7109         int curr_uA;
7110         u16 data;
7111         u16 unit;
7112
7113         for (i = start_scan; i >= 0; i--) {
7114                 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
7115                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7116                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7117                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7118                 switch (unit) {
7119                 case UFSHCD_NANO_AMP:
7120                         curr_uA = curr_uA / 1000;
7121                         break;
7122                 case UFSHCD_MILI_AMP:
7123                         curr_uA = curr_uA * 1000;
7124                         break;
7125                 case UFSHCD_AMP:
7126                         curr_uA = curr_uA * 1000 * 1000;
7127                         break;
7128                 case UFSHCD_MICRO_AMP:
7129                 default:
7130                         break;
7131                 }
7132                 if (sup_curr_uA >= curr_uA)
7133                         break;
7134         }
7135         if (i < 0) {
7136                 i = 0;
7137                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7138         }
7139
7140         return (u32)i;
7141 }
7142
7143 /**
7144  * ufshcd_calc_icc_level - calculate the max ICC level
7145  * In case regulators are not initialized we'll return 0
7146  * @hba: per-adapter instance
7147  * @desc_buf: power descriptor buffer to extract ICC levels from.
7148  * @len: length of desc_buff
7149  *
7150  * Returns calculated ICC level
7151  */
7152 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7153                                                         u8 *desc_buf, int len)
7154 {
7155         u32 icc_level = 0;
7156
7157         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7158                                                 !hba->vreg_info.vccq2) {
7159                 dev_err(hba->dev,
7160                         "%s: Regulator capability was not set, actvIccLevel=%d",
7161                                                         __func__, icc_level);
7162                 goto out;
7163         }
7164
7165         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
7166                 icc_level = ufshcd_get_max_icc_level(
7167                                 hba->vreg_info.vcc->max_uA,
7168                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7169                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7170
7171         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
7172                 icc_level = ufshcd_get_max_icc_level(
7173                                 hba->vreg_info.vccq->max_uA,
7174                                 icc_level,
7175                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7176
7177         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7178                 icc_level = ufshcd_get_max_icc_level(
7179                                 hba->vreg_info.vccq2->max_uA,
7180                                 icc_level,
7181                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7182 out:
7183         return icc_level;
7184 }
7185
7186 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
7187 {
7188         int ret;
7189         int buff_len = QUERY_DESC_POWER_MAX_SIZE;
7190         u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
7191
7192         ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
7193         if (ret) {
7194                 dev_err(hba->dev,
7195                         "%s: Failed reading power descriptor.len = %d ret = %d",
7196                         __func__, buff_len, ret);
7197                 return;
7198         }
7199
7200         hba->init_prefetch_data.icc_level =
7201                         ufshcd_find_max_sup_active_icc_level(hba,
7202                         desc_buf, buff_len);
7203         dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
7204                         __func__, hba->init_prefetch_data.icc_level);
7205
7206         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7207                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
7208                 &hba->init_prefetch_data.icc_level);
7209
7210         if (ret)
7211                 dev_err(hba->dev,
7212                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7213                         __func__, hba->init_prefetch_data.icc_level , ret);
7214
7215 }
7216
7217 /**
7218  * ufshcd_scsi_add_wlus - Adds required W-LUs
7219  * @hba: per-adapter instance
7220  *
7221  * UFS device specification requires the UFS devices to support 4 well known
7222  * logical units:
7223  *      "REPORT_LUNS" (address: 01h)
7224  *      "UFS Device" (address: 50h)
7225  *      "RPMB" (address: 44h)
7226  *      "BOOT" (address: 30h)
7227  * UFS device's power management needs to be controlled by "POWER CONDITION"
7228  * field of SSU (START STOP UNIT) command. But this "power condition" field
7229  * will take effect only when its sent to "UFS device" well known logical unit
7230  * hence we require the scsi_device instance to represent this logical unit in
7231  * order for the UFS host driver to send the SSU command for power management.
7232
7233  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7234  * Block) LU so user space process can control this LU. User space may also
7235  * want to have access to BOOT LU.
7236
7237  * This function adds scsi device instances for each of all well known LUs
7238  * (except "REPORT LUNS" LU).
7239  *
7240  * Returns zero on success (all required W-LUs are added successfully),
7241  * non-zero error value on failure (if failed to add any of the required W-LU).
7242  */
7243 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7244 {
7245         int ret = 0;
7246         struct scsi_device *sdev_rpmb;
7247         struct scsi_device *sdev_boot;
7248
7249         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7250                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7251         if (IS_ERR(hba->sdev_ufs_device)) {
7252                 ret = PTR_ERR(hba->sdev_ufs_device);
7253                 hba->sdev_ufs_device = NULL;
7254                 goto out;
7255         }
7256         scsi_device_put(hba->sdev_ufs_device);
7257
7258         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7259                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7260         if (IS_ERR(sdev_boot)) {
7261                 ret = PTR_ERR(sdev_boot);
7262                 goto remove_sdev_ufs_device;
7263         }
7264         scsi_device_put(sdev_boot);
7265
7266         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7267                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7268         if (IS_ERR(sdev_rpmb)) {
7269                 ret = PTR_ERR(sdev_rpmb);
7270                 goto remove_sdev_boot;
7271         }
7272         scsi_device_put(sdev_rpmb);
7273         goto out;
7274
7275 remove_sdev_boot:
7276         scsi_remove_device(sdev_boot);
7277 remove_sdev_ufs_device:
7278         scsi_remove_device(hba->sdev_ufs_device);
7279 out:
7280         return ret;
7281 }
7282
7283 /**
7284  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7285  * @hba: per-adapter instance
7286  *
7287  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7288  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7289  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7290  * the hibern8 exit latency.
7291  *
7292  * Returns zero on success, non-zero error value on failure.
7293  */
7294 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7295 {
7296         int ret = 0;
7297         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7298
7299         if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
7300                 return 0;
7301
7302         ret = ufshcd_dme_peer_get(hba,
7303                                   UIC_ARG_MIB_SEL(
7304                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7305                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7306                                   &peer_rx_min_activatetime);
7307         if (ret)
7308                 goto out;
7309
7310         /* make sure proper unit conversion is applied */
7311         tuned_pa_tactivate =
7312                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7313                  / PA_TACTIVATE_TIME_UNIT_US);
7314         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7315                              tuned_pa_tactivate);
7316
7317 out:
7318         return ret;
7319 }
7320
7321 /**
7322  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7323  * @hba: per-adapter instance
7324  *
7325  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7326  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7327  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7328  * This optimal value can help reduce the hibern8 exit latency.
7329  *
7330  * Returns zero on success, non-zero error value on failure.
7331  */
7332 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7333 {
7334         int ret = 0;
7335         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7336         u32 max_hibern8_time, tuned_pa_hibern8time;
7337
7338         ret = ufshcd_dme_get(hba,
7339                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7340                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7341                                   &local_tx_hibern8_time_cap);
7342         if (ret)
7343                 goto out;
7344
7345         ret = ufshcd_dme_peer_get(hba,
7346                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7347                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7348                                   &peer_rx_hibern8_time_cap);
7349         if (ret)
7350                 goto out;
7351
7352         max_hibern8_time = max(local_tx_hibern8_time_cap,
7353                                peer_rx_hibern8_time_cap);
7354         /* make sure proper unit conversion is applied */
7355         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7356                                 / PA_HIBERN8_TIME_UNIT_US);
7357         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7358                              tuned_pa_hibern8time);
7359 out:
7360         return ret;
7361 }
7362
7363 /**
7364  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7365  * less than device PA_TACTIVATE time.
7366  * @hba: per-adapter instance
7367  *
7368  * Some UFS devices require host PA_TACTIVATE to be lower than device
7369  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7370  * for such devices.
7371  *
7372  * Returns zero on success, non-zero error value on failure.
7373  */
7374 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7375 {
7376         int ret = 0;
7377         u32 granularity, peer_granularity;
7378         u32 pa_tactivate, peer_pa_tactivate;
7379         u32 pa_tactivate_us, peer_pa_tactivate_us;
7380         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7381
7382         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7383                                   &granularity);
7384         if (ret)
7385                 goto out;
7386
7387         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7388                                   &peer_granularity);
7389         if (ret)
7390                 goto out;
7391
7392         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7393             (granularity > PA_GRANULARITY_MAX_VAL)) {
7394                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7395                         __func__, granularity);
7396                 return -EINVAL;
7397         }
7398
7399         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7400             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7401                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7402                         __func__, peer_granularity);
7403                 return -EINVAL;
7404         }
7405
7406         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7407         if (ret)
7408                 goto out;
7409
7410         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7411                                   &peer_pa_tactivate);
7412         if (ret)
7413                 goto out;
7414
7415         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7416         peer_pa_tactivate_us = peer_pa_tactivate *
7417                              gran_to_us_table[peer_granularity - 1];
7418
7419         if (pa_tactivate_us > peer_pa_tactivate_us) {
7420                 u32 new_peer_pa_tactivate;
7421
7422                 new_peer_pa_tactivate = pa_tactivate_us /
7423                                       gran_to_us_table[peer_granularity - 1];
7424                 new_peer_pa_tactivate++;
7425                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7426                                           new_peer_pa_tactivate);
7427         }
7428
7429 out:
7430         return ret;
7431 }
7432
7433 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7434 {
7435         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7436                 ufshcd_tune_pa_tactivate(hba);
7437                 ufshcd_tune_pa_hibern8time(hba);
7438         }
7439
7440         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7441                 /* set 1ms timeout for PA_TACTIVATE */
7442                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7443
7444         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7445                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7446
7447         ufshcd_vops_apply_dev_quirks(hba);
7448 }
7449
7450 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7451 {
7452         int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
7453
7454         memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
7455         memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
7456         memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
7457         memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
7458         memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
7459
7460         hba->req_abort_count = 0;
7461 }
7462
7463 static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
7464 {
7465         if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
7466                 if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
7467                     UIC_LINK_OFF_STATE) {
7468                         hba->rpm_lvl =
7469                                 ufs_get_desired_pm_lvl_for_dev_link_state(
7470                                                 UFS_SLEEP_PWR_MODE,
7471                                                 UIC_LINK_HIBERN8_STATE);
7472                         dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
7473                                 hba->rpm_lvl);
7474                 }
7475                 if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7476                     UIC_LINK_OFF_STATE) {
7477                         hba->spm_lvl =
7478                                 ufs_get_desired_pm_lvl_for_dev_link_state(
7479                                                 UFS_SLEEP_PWR_MODE,
7480                                                 UIC_LINK_HIBERN8_STATE);
7481                         dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
7482                                 hba->spm_lvl);
7483                 }
7484         }
7485 }
7486
7487 /**
7488  * ufshcd_probe_hba - probe hba to detect device and initialize
7489  * @hba: per-adapter instance
7490  *
7491  * Execute link-startup and verify device initialization
7492  */
7493 static int ufshcd_probe_hba(struct ufs_hba *hba)
7494 {
7495         int ret;
7496         ktime_t start = ktime_get();
7497
7498         ret = ufshcd_link_startup(hba);
7499         if (ret)
7500                 goto out;
7501
7502         /* Debug counters initialization */
7503         ufshcd_clear_dbg_ufs_stats(hba);
7504         /* set the default level for urgent bkops */
7505         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
7506         hba->is_urgent_bkops_lvl_checked = false;
7507
7508         /* UniPro link is active now */
7509         ufshcd_set_link_active(hba);
7510
7511         ret = ufshcd_verify_dev_init(hba);
7512         if (ret)
7513                 goto out;
7514
7515         ret = ufshcd_complete_dev_init(hba);
7516         if (ret)
7517                 goto out;
7518
7519         ufs_advertise_fixup_device(hba);
7520         ufshcd_tune_unipro_params(hba);
7521
7522         ufshcd_apply_pm_quirks(hba);
7523         ret = ufshcd_set_vccq_rail_unused(hba,
7524                 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
7525         if (ret)
7526                 goto out;
7527
7528         /* UFS device is also active now */
7529         ufshcd_set_ufs_dev_active(hba);
7530         ufshcd_force_reset_auto_bkops(hba);
7531         hba->wlun_dev_clr_ua = true;
7532
7533         if (ufshcd_get_max_pwr_mode(hba)) {
7534                 dev_err(hba->dev,
7535                         "%s: Failed getting max supported power mode\n",
7536                         __func__);
7537         } else {
7538                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7539                 if (ret) {
7540                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7541                                         __func__, ret);
7542                         goto out;
7543                 }
7544         }
7545
7546         /* set the state as operational after switching to desired gear */
7547         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7548         /*
7549          * If we are in error handling context or in power management callbacks
7550          * context, no need to scan the host
7551          */
7552         if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7553                 bool flag;
7554
7555                 /* clear any previous UFS device information */
7556                 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7557                 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7558                                 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
7559                         hba->dev_info.f_power_on_wp_en = flag;
7560
7561                 if (!hba->is_init_prefetch)
7562                         ufshcd_init_icc_levels(hba);
7563
7564                 /* Add required well known logical units to scsi mid layer */
7565                 if (ufshcd_scsi_add_wlus(hba))
7566                         goto out;
7567
7568                 /* Initialize devfreq after UFS device is detected */
7569                 if (ufshcd_is_clkscaling_supported(hba)) {
7570                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
7571                             &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
7572                         hba->clk_scaling.saved_pwr_info.is_valid = true;
7573                         hba->clk_scaling.is_scaled_up = true;
7574                         if (!hba->devfreq) {
7575                                 hba->devfreq = devfreq_add_device(hba->dev,
7576                                                         &ufs_devfreq_profile,
7577                                                         "simple_ondemand",
7578                                                         gov_data);
7579                                 if (IS_ERR(hba->devfreq)) {
7580                                         ret = PTR_ERR(hba->devfreq);
7581                                         dev_err(hba->dev, "Unable to register with devfreq %d\n",
7582                                                 ret);
7583                                         goto out;
7584                                 }
7585                         }
7586                         hba->clk_scaling.is_allowed = true;
7587                 }
7588
7589                 scsi_scan_host(hba->host);
7590                 pm_runtime_put_sync(hba->dev);
7591         }
7592
7593         if (!hba->is_init_prefetch)
7594                 hba->is_init_prefetch = true;
7595
7596         /*
7597          * Enable auto hibern8 if supported, after full host and
7598          * device initialization.
7599          */
7600         if (ufshcd_is_auto_hibern8_supported(hba))
7601                 ufshcd_set_auto_hibern8_timer(hba,
7602                                       hba->hibern8_on_idle.delay_ms);
7603 out:
7604         /*
7605          * If we failed to initialize the device or the device is not
7606          * present, turn off the power/clocks etc.
7607          */
7608         if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7609                 pm_runtime_put_sync(hba->dev);
7610                 ufshcd_hba_exit(hba);
7611         }
7612
7613         trace_ufshcd_init(dev_name(hba->dev), ret,
7614                 ktime_to_us(ktime_sub(ktime_get(), start)),
7615                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7616         return ret;
7617 }
7618
7619 /**
7620  * ufshcd_async_scan - asynchronous execution for probing hba
7621  * @data: data pointer to pass to this function
7622  * @cookie: cookie data
7623  */
7624 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7625 {
7626         struct ufs_hba *hba = (struct ufs_hba *)data;
7627
7628         /*
7629          * Don't allow clock gating and hibern8 enter for faster device
7630          * detection.
7631          */
7632         ufshcd_hold_all(hba);
7633         ufshcd_probe_hba(hba);
7634         ufshcd_release_all(hba);
7635 }
7636
7637 /**
7638  * ufshcd_query_ioctl - perform user read queries
7639  * @hba: per-adapter instance
7640  * @lun: used for lun specific queries
7641  * @buffer: user space buffer for reading and submitting query data and params
7642  * @return: 0 for success negative error code otherwise
7643  *
7644  * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7645  * It will read the opcode, idn and buf_length parameters, and, put the
7646  * response in the buffer field while updating the used size in buf_length.
7647  */
7648 static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
7649 {
7650         struct ufs_ioctl_query_data *ioctl_data;
7651         int err = 0;
7652         int length = 0;
7653         void *data_ptr;
7654         bool flag;
7655         u32 att;
7656         u8 index;
7657         u8 *desc = NULL;
7658
7659         ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
7660         if (!ioctl_data) {
7661                 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
7662                                 sizeof(struct ufs_ioctl_query_data));
7663                 err = -ENOMEM;
7664                 goto out;
7665         }
7666
7667         /* extract params from user buffer */
7668         err = copy_from_user(ioctl_data, buffer,
7669                         sizeof(struct ufs_ioctl_query_data));
7670         if (err) {
7671                 dev_err(hba->dev,
7672                         "%s: Failed copying buffer from user, err %d\n",
7673                         __func__, err);
7674                 goto out_release_mem;
7675         }
7676
7677         /* verify legal parameters & send query */
7678         switch (ioctl_data->opcode) {
7679         case UPIU_QUERY_OPCODE_READ_DESC:
7680                 switch (ioctl_data->idn) {
7681                 case QUERY_DESC_IDN_DEVICE:
7682                 case QUERY_DESC_IDN_CONFIGURAION:
7683                 case QUERY_DESC_IDN_INTERCONNECT:
7684                 case QUERY_DESC_IDN_GEOMETRY:
7685                 case QUERY_DESC_IDN_POWER:
7686                         index = 0;
7687                         break;
7688                 case QUERY_DESC_IDN_UNIT:
7689                         if (!ufs_is_valid_unit_desc_lun(lun)) {
7690                                 dev_err(hba->dev,
7691                                         "%s: No unit descriptor for lun 0x%x\n",
7692                                         __func__, lun);
7693                                 err = -EINVAL;
7694                                 goto out_release_mem;
7695                         }
7696                         index = lun;
7697                         break;
7698                 default:
7699                         goto out_einval;
7700                 }
7701                 length = min_t(int, QUERY_DESC_MAX_SIZE,
7702                                 ioctl_data->buf_size);
7703                 desc = kzalloc(length, GFP_KERNEL);
7704                 if (!desc) {
7705                         dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
7706                                         __func__, length);
7707                         err = -ENOMEM;
7708                         goto out_release_mem;
7709                 }
7710                 err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
7711                                 ioctl_data->idn, index, 0, desc, &length);
7712                 break;
7713         case UPIU_QUERY_OPCODE_READ_ATTR:
7714                 switch (ioctl_data->idn) {
7715                 case QUERY_ATTR_IDN_BOOT_LU_EN:
7716                 case QUERY_ATTR_IDN_POWER_MODE:
7717                 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
7718                 case QUERY_ATTR_IDN_OOO_DATA_EN:
7719                 case QUERY_ATTR_IDN_BKOPS_STATUS:
7720                 case QUERY_ATTR_IDN_PURGE_STATUS:
7721                 case QUERY_ATTR_IDN_MAX_DATA_IN:
7722                 case QUERY_ATTR_IDN_MAX_DATA_OUT:
7723                 case QUERY_ATTR_IDN_REF_CLK_FREQ:
7724                 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
7725                 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
7726                 case QUERY_ATTR_IDN_EE_CONTROL:
7727                 case QUERY_ATTR_IDN_EE_STATUS:
7728                 case QUERY_ATTR_IDN_SECONDS_PASSED:
7729                         index = 0;
7730                         break;
7731                 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
7732                 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
7733                         index = lun;
7734                         break;
7735                 default:
7736                         goto out_einval;
7737                 }
7738                 err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
7739                                         index, 0, &att);
7740                 break;
7741
7742         case UPIU_QUERY_OPCODE_WRITE_ATTR:
7743                 err = copy_from_user(&att,
7744                                 buffer + sizeof(struct ufs_ioctl_query_data),
7745                                 sizeof(u32));
7746                 if (err) {
7747                         dev_err(hba->dev,
7748                                 "%s: Failed copying buffer from user, err %d\n",
7749                                 __func__, err);
7750                         goto out_release_mem;
7751                 }
7752
7753                 switch (ioctl_data->idn) {
7754                 case QUERY_ATTR_IDN_BOOT_LU_EN:
7755                         index = 0;
7756                         if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
7757                                 dev_err(hba->dev,
7758                                         "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
7759                                         __func__, ioctl_data->opcode,
7760                                         (unsigned int)ioctl_data->idn, att);
7761                                 err = -EINVAL;
7762                                 goto out_release_mem;
7763                         }
7764                         break;
7765                 default:
7766                         goto out_einval;
7767                 }
7768                 err = ufshcd_query_attr(hba, ioctl_data->opcode,
7769                                         ioctl_data->idn, index, 0, &att);
7770                 break;
7771
7772         case UPIU_QUERY_OPCODE_READ_FLAG:
7773                 switch (ioctl_data->idn) {
7774                 case QUERY_FLAG_IDN_FDEVICEINIT:
7775                 case QUERY_FLAG_IDN_PERMANENT_WPE:
7776                 case QUERY_FLAG_IDN_PWR_ON_WPE:
7777                 case QUERY_FLAG_IDN_BKOPS_EN:
7778                 case QUERY_FLAG_IDN_PURGE_ENABLE:
7779                 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
7780                 case QUERY_FLAG_IDN_BUSY_RTC:
7781                         break;
7782                 default:
7783                         goto out_einval;
7784                 }
7785                 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
7786                                 ioctl_data->idn, &flag);
7787                 break;
7788         default:
7789                 goto out_einval;
7790         }
7791
7792         if (err) {
7793                 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
7794                                 ioctl_data->idn);
7795                 goto out_release_mem;
7796         }
7797
7798         /*
7799          * copy response data
7800          * As we might end up reading less data then what is specified in
7801          * "ioctl_data->buf_size". So we are updating "ioctl_data->
7802          * buf_size" to what exactly we have read.
7803          */
7804         switch (ioctl_data->opcode) {
7805         case UPIU_QUERY_OPCODE_READ_DESC:
7806                 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
7807                 data_ptr = desc;
7808                 break;
7809         case UPIU_QUERY_OPCODE_READ_ATTR:
7810                 ioctl_data->buf_size = sizeof(u32);
7811                 data_ptr = &att;
7812                 break;
7813         case UPIU_QUERY_OPCODE_READ_FLAG:
7814                 ioctl_data->buf_size = 1;
7815                 data_ptr = &flag;
7816                 break;
7817         case UPIU_QUERY_OPCODE_WRITE_ATTR:
7818                 goto out_release_mem;
7819         default:
7820                 goto out_einval;
7821         }
7822
7823         /* copy to user */
7824         err = copy_to_user(buffer, ioctl_data,
7825                         sizeof(struct ufs_ioctl_query_data));
7826         if (err)
7827                 dev_err(hba->dev, "%s: Failed copying back to user.\n",
7828                         __func__);
7829         err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
7830                         data_ptr, ioctl_data->buf_size);
7831         if (err)
7832                 dev_err(hba->dev, "%s: err %d copying back to user.\n",
7833                                 __func__, err);
7834         goto out_release_mem;
7835
7836 out_einval:
7837         dev_err(hba->dev,
7838                 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7839                 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
7840         err = -EINVAL;
7841 out_release_mem:
7842         kfree(ioctl_data);
7843         kfree(desc);
7844 out:
7845         return err;
7846 }
7847
7848 /**
7849  * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7850  * @dev: scsi device required for per LUN queries
7851  * @cmd: command opcode
7852  * @buffer: user space buffer for transferring data
7853  *
7854  * Supported commands:
7855  * UFS_IOCTL_QUERY
7856  */
7857 static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
7858 {
7859         struct ufs_hba *hba = shost_priv(dev->host);
7860         int err = 0;
7861
7862         BUG_ON(!hba);
7863         if (!buffer) {
7864                 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
7865                 return -EINVAL;
7866         }
7867
7868         switch (cmd) {
7869         case UFS_IOCTL_QUERY:
7870                 pm_runtime_get_sync(hba->dev);
7871                 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
7872                                 buffer);
7873                 pm_runtime_put_sync(hba->dev);
7874                 break;
7875         default:
7876                 err = -ENOIOCTLCMD;
7877                 dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
7878                         cmd);
7879                 break;
7880         }
7881
7882         return err;
7883 }
7884
7885 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7886 {
7887         unsigned long flags;
7888         struct Scsi_Host *host;
7889         struct ufs_hba *hba;
7890         int index;
7891         bool found = false;
7892
7893         if (!scmd || !scmd->device || !scmd->device->host)
7894                 return BLK_EH_NOT_HANDLED;
7895
7896         host = scmd->device->host;
7897         hba = shost_priv(host);
7898         if (!hba)
7899                 return BLK_EH_NOT_HANDLED;
7900
7901         spin_lock_irqsave(host->host_lock, flags);
7902
7903         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7904                 if (hba->lrb[index].cmd == scmd) {
7905                         found = true;
7906                         break;
7907                 }
7908         }
7909
7910         spin_unlock_irqrestore(host->host_lock, flags);
7911
7912         /*
7913          * Bypass SCSI error handling and reset the block layer timer if this
7914          * SCSI command was not actually dispatched to UFS driver, otherwise
7915          * let SCSI layer handle the error as usual.
7916          */
7917         return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
7918 }
7919
7920 static struct scsi_host_template ufshcd_driver_template = {
7921         .module                 = THIS_MODULE,
7922         .name                   = UFSHCD,
7923         .proc_name              = UFSHCD,
7924         .queuecommand           = ufshcd_queuecommand,
7925         .slave_alloc            = ufshcd_slave_alloc,
7926         .slave_configure        = ufshcd_slave_configure,
7927         .slave_destroy          = ufshcd_slave_destroy,
7928         .change_queue_depth     = ufshcd_change_queue_depth,
7929         .eh_abort_handler       = ufshcd_abort,
7930         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7931         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7932         .eh_timed_out           = ufshcd_eh_timed_out,
7933         .ioctl                  = ufshcd_ioctl,
7934 #ifdef CONFIG_COMPAT
7935         .compat_ioctl           = ufshcd_ioctl,
7936 #endif
7937         .this_id                = -1,
7938         .sg_tablesize           = SG_ALL,
7939         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7940         .can_queue              = UFSHCD_CAN_QUEUE,
7941         .max_host_blocked       = 1,
7942         .track_queue_depth      = 1,
7943 };
7944
7945 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7946                                    int ua)
7947 {
7948         int ret;
7949
7950         if (!vreg)
7951                 return 0;
7952
7953         /*
7954          * "set_load" operation shall be required on those regulators
7955          * which specifically configured current limitation. Otherwise
7956          * zero max_uA may cause unexpected behavior when regulator is
7957          * enabled or set as high power mode.
7958          */
7959         if (!vreg->max_uA)
7960                 return 0;
7961
7962         ret = regulator_set_load(vreg->reg, ua);
7963         if (ret < 0) {
7964                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7965                                 __func__, vreg->name, ua, ret);
7966         }
7967
7968         return ret;
7969 }
7970
7971 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7972                                          struct ufs_vreg *vreg)
7973 {
7974         if (!vreg)
7975                 return 0;
7976         else if (vreg->unused)
7977                 return 0;
7978         else
7979                 return ufshcd_config_vreg_load(hba->dev, vreg,
7980                                                UFS_VREG_LPM_LOAD_UA);
7981 }
7982
7983 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7984                                          struct ufs_vreg *vreg)
7985 {
7986         if (!vreg)
7987                 return 0;
7988         else if (vreg->unused)
7989                 return 0;
7990         else
7991                 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7992 }
7993
7994 static int ufshcd_config_vreg(struct device *dev,
7995                 struct ufs_vreg *vreg, bool on)
7996 {
7997         int ret = 0;
7998         struct regulator *reg;
7999         const char *name;
8000         int min_uV, uA_load;
8001
8002         BUG_ON(!vreg);
8003
8004         reg = vreg->reg;
8005         name = vreg->name;
8006
8007         if (regulator_count_voltages(reg) > 0) {
8008                 uA_load = on ? vreg->max_uA : 0;
8009                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8010                 if (ret)
8011                         goto out;
8012
8013                 if (vreg->min_uV && vreg->max_uV) {
8014                         min_uV = on ? vreg->min_uV : 0;
8015                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8016                         if (ret) {
8017                                 dev_err(dev,
8018                                         "%s: %s set voltage failed, err=%d\n",
8019                                         __func__, name, ret);
8020                                 goto out;
8021                         }
8022                 }
8023         }
8024 out:
8025         return ret;
8026 }
8027
8028 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8029 {
8030         int ret = 0;
8031
8032         if (!vreg)
8033                 goto out;
8034         else if (vreg->enabled || vreg->unused)
8035                 goto out;
8036
8037         ret = ufshcd_config_vreg(dev, vreg, true);
8038         if (!ret)
8039                 ret = regulator_enable(vreg->reg);
8040
8041         if (!ret)
8042                 vreg->enabled = true;
8043         else
8044                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8045                                 __func__, vreg->name, ret);
8046 out:
8047         return ret;
8048 }
8049
8050 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8051 {
8052         int ret = 0;
8053
8054         if (!vreg)
8055                 goto out;
8056         else if (!vreg->enabled || vreg->unused)
8057                 goto out;
8058
8059         ret = regulator_disable(vreg->reg);
8060
8061         if (!ret) {
8062                 /* ignore errors on applying disable config */
8063                 ufshcd_config_vreg(dev, vreg, false);
8064                 vreg->enabled = false;
8065         } else {
8066                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8067                                 __func__, vreg->name, ret);
8068         }
8069 out:
8070         return ret;
8071 }
8072
8073 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8074 {
8075         int ret = 0;
8076         struct device *dev = hba->dev;
8077         struct ufs_vreg_info *info = &hba->vreg_info;
8078
8079         if (!info)
8080                 goto out;
8081
8082         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8083         if (ret)
8084                 goto out;
8085
8086         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8087         if (ret)
8088                 goto out;
8089
8090         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8091         if (ret)
8092                 goto out;
8093
8094 out:
8095         if (ret) {
8096                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8097                 ufshcd_toggle_vreg(dev, info->vccq, false);
8098                 ufshcd_toggle_vreg(dev, info->vcc, false);
8099         }
8100         return ret;
8101 }
8102
8103 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8104 {
8105         struct ufs_vreg_info *info = &hba->vreg_info;
8106         int ret = 0;
8107
8108         if (info->vdd_hba) {
8109                 ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8110
8111                 if (!ret)
8112                         ufshcd_vops_update_sec_cfg(hba, on);
8113         }
8114
8115         return ret;
8116 }
8117
8118 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8119 {
8120         int ret = 0;
8121
8122         if (!vreg)
8123                 goto out;
8124
8125         vreg->reg = devm_regulator_get(dev, vreg->name);
8126         if (IS_ERR(vreg->reg)) {
8127                 ret = PTR_ERR(vreg->reg);
8128                 dev_err(dev, "%s: %s get failed, err=%d\n",
8129                                 __func__, vreg->name, ret);
8130         }
8131 out:
8132         return ret;
8133 }
8134
8135 static int ufshcd_init_vreg(struct ufs_hba *hba)
8136 {
8137         int ret = 0;
8138         struct device *dev = hba->dev;
8139         struct ufs_vreg_info *info = &hba->vreg_info;
8140
8141         if (!info)
8142                 goto out;
8143
8144         ret = ufshcd_get_vreg(dev, info->vcc);
8145         if (ret)
8146                 goto out;
8147
8148         ret = ufshcd_get_vreg(dev, info->vccq);
8149         if (ret)
8150                 goto out;
8151
8152         ret = ufshcd_get_vreg(dev, info->vccq2);
8153 out:
8154         return ret;
8155 }
8156
8157 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8158 {
8159         struct ufs_vreg_info *info = &hba->vreg_info;
8160
8161         if (info)
8162                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8163
8164         return 0;
8165 }
8166
8167 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
8168 {
8169         int ret = 0;
8170         struct ufs_vreg_info *info = &hba->vreg_info;
8171
8172         if (!info)
8173                 goto out;
8174         else if (!info->vccq)
8175                 goto out;
8176
8177         if (unused) {
8178                 /* shut off the rail here */
8179                 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
8180                 /*
8181                  * Mark this rail as no longer used, so it doesn't get enabled
8182                  * later by mistake
8183                  */
8184                 if (!ret)
8185                         info->vccq->unused = true;
8186         } else {
8187                 /*
8188                  * rail should have been already enabled hence just make sure
8189                  * that unused flag is cleared.
8190                  */
8191                 info->vccq->unused = false;
8192         }
8193 out:
8194         return ret;
8195 }
8196
8197 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
8198                                bool skip_ref_clk, bool is_gating_context)
8199 {
8200         int ret = 0;
8201         struct ufs_clk_info *clki;
8202         struct list_head *head = &hba->clk_list_head;
8203         unsigned long flags;
8204         ktime_t start = ktime_get();
8205         bool clk_state_changed = false;
8206
8207         if (!head || list_empty(head))
8208                 goto out;
8209
8210         /* call vendor specific bus vote before enabling the clocks */
8211         if (on) {
8212                 ret = ufshcd_vops_set_bus_vote(hba, on);
8213                 if (ret)
8214                         return ret;
8215         }
8216
8217         /*
8218          * vendor specific setup_clocks ops may depend on clocks managed by
8219          * this standard driver hence call the vendor specific setup_clocks
8220          * before disabling the clocks managed here.
8221          */
8222         if (!on) {
8223                 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8224                 if (ret)
8225                         return ret;
8226         }
8227
8228         list_for_each_entry(clki, head, list) {
8229                 if (!IS_ERR_OR_NULL(clki->clk)) {
8230                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
8231                                 continue;
8232
8233                         clk_state_changed = on ^ clki->enabled;
8234                         if (on && !clki->enabled) {
8235                                 ret = clk_prepare_enable(clki->clk);
8236                                 if (ret) {
8237                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8238                                                 __func__, clki->name, ret);
8239                                         goto out;
8240                                 }
8241                         } else if (!on && clki->enabled) {
8242                                 clk_disable_unprepare(clki->clk);
8243                         }
8244                         clki->enabled = on;
8245                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8246                                         clki->name, on ? "en" : "dis");
8247                 }
8248         }
8249
8250         /*
8251          * vendor specific setup_clocks ops may depend on clocks managed by
8252          * this standard driver hence call the vendor specific setup_clocks
8253          * after enabling the clocks managed here.
8254          */
8255         if (on) {
8256                 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8257                 if (ret)
8258                         goto out;
8259         }
8260
8261         /*
8262          * call vendor specific bus vote to remove the vote after
8263          * disabling the clocks.
8264          */
8265         if (!on)
8266                 ret = ufshcd_vops_set_bus_vote(hba, on);
8267
8268 out:
8269         if (ret) {
8270                 if (on)
8271                         /* Can't do much if this fails */
8272                         (void) ufshcd_vops_set_bus_vote(hba, false);
8273                 list_for_each_entry(clki, head, list) {
8274                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8275                                 clk_disable_unprepare(clki->clk);
8276                 }
8277         } else if (!ret && on) {
8278                 spin_lock_irqsave(hba->host->host_lock, flags);
8279                 hba->clk_gating.state = CLKS_ON;
8280                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8281                         hba->clk_gating.state);
8282                 spin_unlock_irqrestore(hba->host->host_lock, flags);
8283                 /* restore the secure configuration as clocks are enabled */
8284                 ufshcd_vops_update_sec_cfg(hba, true);
8285         }
8286
8287         if (clk_state_changed)
8288                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8289                         (on ? "on" : "off"),
8290                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8291         return ret;
8292 }
8293
8294 static int ufshcd_enable_clocks(struct ufs_hba *hba)
8295 {
8296         return  ufshcd_setup_clocks(hba, true, false, false);
8297 }
8298
8299 static int ufshcd_disable_clocks(struct ufs_hba *hba,
8300                                  bool is_gating_context)
8301 {
8302         return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
8303 }
8304
8305 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
8306                                               bool is_gating_context)
8307 {
8308         return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
8309 }
8310
8311 static int ufshcd_init_clocks(struct ufs_hba *hba)
8312 {
8313         int ret = 0;
8314         struct ufs_clk_info *clki;
8315         struct device *dev = hba->dev;
8316         struct list_head *head = &hba->clk_list_head;
8317
8318         if (!head || list_empty(head))
8319                 goto out;
8320
8321         list_for_each_entry(clki, head, list) {
8322                 if (!clki->name)
8323                         continue;
8324
8325                 clki->clk = devm_clk_get(dev, clki->name);
8326                 if (IS_ERR(clki->clk)) {
8327                         ret = PTR_ERR(clki->clk);
8328                         dev_err(dev, "%s: %s clk get failed, %d\n",
8329                                         __func__, clki->name, ret);
8330                         goto out;
8331                 }
8332
8333                 if (clki->max_freq) {
8334                         ret = clk_set_rate(clki->clk, clki->max_freq);
8335                         if (ret) {
8336                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8337                                         __func__, clki->name,
8338                                         clki->max_freq, ret);
8339                                 goto out;
8340                         }
8341                         clki->curr_freq = clki->max_freq;
8342                 }
8343                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8344                                 clki->name, clk_get_rate(clki->clk));
8345         }
8346 out:
8347         return ret;
8348 }
8349
8350 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8351 {
8352         int err = 0;
8353
8354         if (!hba->var || !hba->var->vops)
8355                 goto out;
8356
8357         err = ufshcd_vops_init(hba);
8358         if (err)
8359                 goto out;
8360
8361         err = ufshcd_vops_setup_regulators(hba, true);
8362         if (err)
8363                 goto out_exit;
8364
8365         goto out;
8366
8367 out_exit:
8368         ufshcd_vops_exit(hba);
8369 out:
8370         if (err)
8371                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8372                         __func__, ufshcd_get_var_name(hba), err);
8373         return err;
8374 }
8375
8376 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8377 {
8378         if (!hba->var || !hba->var->vops)
8379                 return;
8380
8381         ufshcd_vops_setup_regulators(hba, false);
8382
8383         ufshcd_vops_exit(hba);
8384 }
8385
8386 static int ufshcd_hba_init(struct ufs_hba *hba)
8387 {
8388         int err;
8389
8390         /*
8391          * Handle host controller power separately from the UFS device power
8392          * rails as it will help controlling the UFS host controller power
8393          * collapse easily which is different than UFS device power collapse.
8394          * Also, enable the host controller power before we go ahead with rest
8395          * of the initialization here.
8396          */
8397         err = ufshcd_init_hba_vreg(hba);
8398         if (err)
8399                 goto out;
8400
8401         err = ufshcd_setup_hba_vreg(hba, true);
8402         if (err)
8403                 goto out;
8404
8405         err = ufshcd_init_clocks(hba);
8406         if (err)
8407                 goto out_disable_hba_vreg;
8408
8409         err = ufshcd_enable_clocks(hba);
8410         if (err)
8411                 goto out_disable_hba_vreg;
8412
8413         err = ufshcd_init_vreg(hba);
8414         if (err)
8415                 goto out_disable_clks;
8416
8417         err = ufshcd_setup_vreg(hba, true);
8418         if (err)
8419                 goto out_disable_clks;
8420
8421         err = ufshcd_variant_hba_init(hba);
8422         if (err)
8423                 goto out_disable_vreg;
8424
8425         hba->is_powered = true;
8426         goto out;
8427
8428 out_disable_vreg:
8429         ufshcd_setup_vreg(hba, false);
8430 out_disable_clks:
8431         ufshcd_disable_clocks(hba, false);
8432 out_disable_hba_vreg:
8433         ufshcd_setup_hba_vreg(hba, false);
8434 out:
8435         return err;
8436 }
8437
8438 static void ufshcd_hba_exit(struct ufs_hba *hba)
8439 {
8440         if (hba->is_powered) {
8441                 ufshcd_variant_hba_exit(hba);
8442                 ufshcd_setup_vreg(hba, false);
8443                 if (ufshcd_is_clkscaling_supported(hba)) {
8444                         if (hba->devfreq)
8445                                 ufshcd_suspend_clkscaling(hba);
8446                         if (hba->clk_scaling.workq)
8447                                 destroy_workqueue(hba->clk_scaling.workq);
8448                 }
8449                 ufshcd_disable_clocks(hba, false);
8450                 ufshcd_setup_hba_vreg(hba, false);
8451                 hba->is_powered = false;
8452         }
8453 }
8454
8455 static int
8456 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8457 {
8458         unsigned char cmd[6] = {REQUEST_SENSE,
8459                                 0,
8460                                 0,
8461                                 0,
8462                                 UFSHCD_REQ_SENSE_SIZE,
8463                                 0};
8464         char *buffer;
8465         int ret;
8466
8467         buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
8468         if (!buffer) {
8469                 ret = -ENOMEM;
8470                 goto out;
8471         }
8472
8473         ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
8474                                 UFSHCD_REQ_SENSE_SIZE, NULL,
8475                                 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
8476         if (ret)
8477                 pr_err("%s: failed with err %d\n", __func__, ret);
8478
8479         kfree(buffer);
8480 out:
8481         return ret;
8482 }
8483
8484 /**
8485  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8486  *                           power mode
8487  * @hba: per adapter instance
8488  * @pwr_mode: device power mode to set
8489  *
8490  * Returns 0 if requested power mode is set successfully
8491  * Returns non-zero if failed to set the requested power mode
8492  */
8493 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8494                                      enum ufs_dev_pwr_mode pwr_mode)
8495 {
8496         unsigned char cmd[6] = { START_STOP };
8497         struct scsi_sense_hdr sshdr;
8498         struct scsi_device *sdp;
8499         unsigned long flags;
8500         int ret;
8501
8502         spin_lock_irqsave(hba->host->host_lock, flags);
8503         sdp = hba->sdev_ufs_device;
8504         if (sdp) {
8505                 ret = scsi_device_get(sdp);
8506                 if (!ret && !scsi_device_online(sdp)) {
8507                         ret = -ENODEV;
8508                         scsi_device_put(sdp);
8509                 }
8510         } else {
8511                 ret = -ENODEV;
8512         }
8513         spin_unlock_irqrestore(hba->host->host_lock, flags);
8514
8515         if (ret)
8516                 return ret;
8517
8518         /*
8519          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8520          * handling, which would wait for host to be resumed. Since we know
8521          * we are functional while we are here, skip host resume in error
8522          * handling context.
8523          */
8524         hba->host->eh_noresume = 1;
8525         if (hba->wlun_dev_clr_ua) {
8526                 ret = ufshcd_send_request_sense(hba, sdp);
8527                 if (ret)
8528                         goto out;
8529                 /* Unit attention condition is cleared now */
8530                 hba->wlun_dev_clr_ua = false;
8531         }
8532
8533         cmd[4] = pwr_mode << 4;
8534
8535         /*
8536          * Current function would be generally called from the power management
8537          * callbacks hence set the REQ_PM flag so that it doesn't resume the
8538          * already suspended childs.
8539          */
8540         ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
8541                                      START_STOP_TIMEOUT, 0, NULL, REQ_PM);
8542         if (ret) {
8543                 sdev_printk(KERN_WARNING, sdp,
8544                             "START_STOP failed for power mode: %d, result %x\n",
8545                             pwr_mode, ret);
8546                 if (driver_byte(ret) & DRIVER_SENSE)
8547                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8548         }
8549
8550         if (!ret)
8551                 hba->curr_dev_pwr_mode = pwr_mode;
8552 out:
8553         scsi_device_put(sdp);
8554         hba->host->eh_noresume = 0;
8555         return ret;
8556 }
8557
8558 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8559                                         enum uic_link_state req_link_state,
8560                                         int check_for_bkops)
8561 {
8562         int ret = 0;
8563
8564         if (req_link_state == hba->uic_link_state)
8565                 return 0;
8566
8567         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8568                 ret = ufshcd_uic_hibern8_enter(hba);
8569                 if (!ret)
8570                         ufshcd_set_link_hibern8(hba);
8571                 else
8572                         goto out;
8573         }
8574         /*
8575          * If autobkops is enabled, link can't be turned off because
8576          * turning off the link would also turn off the device.
8577          */
8578         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8579                    (!check_for_bkops || (check_for_bkops &&
8580                     !hba->auto_bkops_enabled))) {
8581                 /*
8582                  * Let's make sure that link is in low power mode, we are doing
8583                  * this currently by putting the link in Hibern8. Otherway to
8584                  * put the link in low power mode is to send the DME end point
8585                  * to device and then send the DME reset command to local
8586                  * unipro. But putting the link in hibern8 is much faster.
8587                  */
8588                 ret = ufshcd_uic_hibern8_enter(hba);
8589                 if (ret)
8590                         goto out;
8591                 /*
8592                  * Change controller state to "reset state" which
8593                  * should also put the link in off/reset state
8594                  */
8595                 ufshcd_hba_stop(hba, true);
8596                 /*
8597                  * TODO: Check if we need any delay to make sure that
8598                  * controller is reset
8599                  */
8600                 ufshcd_set_link_off(hba);
8601         }
8602
8603 out:
8604         return ret;
8605 }
8606
8607 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8608 {
8609         /*
8610          * It seems some UFS devices may keep drawing more than sleep current
8611          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8612          * To avoid this situation, add 2ms delay before putting these UFS
8613          * rails in LPM mode.
8614          */
8615         if (!ufshcd_is_link_active(hba))
8616                 usleep_range(2000, 2100);
8617
8618         /*
8619          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8620          * power.
8621          *
8622          * If UFS device and link is in OFF state, all power supplies (VCC,
8623          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8624          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8625          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8626          *
8627          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8628          * in low power state which would save some power.
8629          */
8630         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8631             !hba->dev_info.is_lu_power_on_wp) {
8632                 ufshcd_setup_vreg(hba, false);
8633         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8634                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8635                 if (!ufshcd_is_link_active(hba)) {
8636                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8637                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8638                 }
8639         }
8640 }
8641
8642 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8643 {
8644         int ret = 0;
8645
8646         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8647             !hba->dev_info.is_lu_power_on_wp) {
8648                 ret = ufshcd_setup_vreg(hba, true);
8649         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8650                 if (!ret && !ufshcd_is_link_active(hba)) {
8651                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8652                         if (ret)
8653                                 goto vcc_disable;
8654                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8655                         if (ret)
8656                                 goto vccq_lpm;
8657                 }
8658                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8659         }
8660         goto out;
8661
8662 vccq_lpm:
8663         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8664 vcc_disable:
8665         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8666 out:
8667         return ret;
8668 }
8669
8670 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8671 {
8672         if (ufshcd_is_link_off(hba) ||
8673             (ufshcd_is_link_hibern8(hba)
8674              && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
8675                 ufshcd_setup_hba_vreg(hba, false);
8676 }
8677
8678 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8679 {
8680         if (ufshcd_is_link_off(hba) ||
8681             (ufshcd_is_link_hibern8(hba)
8682              && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
8683                 ufshcd_setup_hba_vreg(hba, true);
8684 }
8685
8686 /**
8687  * ufshcd_suspend - helper function for suspend operations
8688  * @hba: per adapter instance
8689  * @pm_op: desired low power operation type
8690  *
8691  * This function will try to put the UFS device and link into low power
8692  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8693  * (System PM level).
8694  *
8695  * If this function is called during shutdown, it will make sure that
8696  * both UFS device and UFS link is powered off.
8697  *
8698  * NOTE: UFS device & link must be active before we enter in this function.
8699  *
8700  * Returns 0 for success and non-zero for failure
8701  */
8702 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8703 {
8704         int ret = 0;
8705         enum ufs_pm_level pm_lvl;
8706         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8707         enum uic_link_state req_link_state;
8708
8709         hba->pm_op_in_progress = 1;
8710         if (!ufshcd_is_shutdown_pm(pm_op)) {
8711                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8712                          hba->rpm_lvl : hba->spm_lvl;
8713                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8714                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8715         } else {
8716                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8717                 req_link_state = UIC_LINK_OFF_STATE;
8718         }
8719
8720         /*
8721          * If we can't transition into any of the low power modes
8722          * just gate the clocks.
8723          */
8724         WARN_ON(hba->hibern8_on_idle.is_enabled &&
8725                 hba->hibern8_on_idle.active_reqs);
8726         ufshcd_hold_all(hba);
8727         hba->clk_gating.is_suspended = true;
8728         hba->hibern8_on_idle.is_suspended = true;
8729
8730         if (hba->clk_scaling.is_allowed) {
8731                 cancel_work_sync(&hba->clk_scaling.suspend_work);
8732                 cancel_work_sync(&hba->clk_scaling.resume_work);
8733                 ufshcd_suspend_clkscaling(hba);
8734         }
8735
8736         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8737                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8738                 goto disable_clks;
8739         }
8740
8741         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8742             (req_link_state == hba->uic_link_state))
8743                 goto enable_gating;
8744
8745         /* UFS device & link must be active before we enter in this function */
8746         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8747                 ret = -EINVAL;
8748                 goto enable_gating;
8749         }
8750
8751         if (ufshcd_is_runtime_pm(pm_op)) {
8752                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8753                         /*
8754                          * The device is idle with no requests in the queue,
8755                          * allow background operations if bkops status shows
8756                          * that performance might be impacted.
8757                          */
8758                         ret = ufshcd_urgent_bkops(hba);
8759                         if (ret)
8760                                 goto enable_gating;
8761                 } else {
8762                         /* make sure that auto bkops is disabled */
8763                         ufshcd_disable_auto_bkops(hba);
8764                 }
8765         }
8766
8767         if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
8768              ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8769                !ufshcd_is_runtime_pm(pm_op))) {
8770                 /* ensure that bkops is disabled */
8771                 ufshcd_disable_auto_bkops(hba);
8772                 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8773                 if (ret)
8774                         goto enable_gating;
8775         }
8776
8777         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8778         if (ret)
8779                 goto set_dev_active;
8780
8781         if (ufshcd_is_link_hibern8(hba) &&
8782             ufshcd_is_hibern8_on_idle_allowed(hba))
8783                 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8784
8785         ufshcd_vreg_set_lpm(hba);
8786
8787 disable_clks:
8788         /*
8789          * Call vendor specific suspend callback. As these callbacks may access
8790          * vendor specific host controller register space call them before the
8791          * host clocks are ON.
8792          */
8793         ret = ufshcd_vops_suspend(hba, pm_op);
8794         if (ret)
8795                 goto set_link_active;
8796
8797         if (!ufshcd_is_link_active(hba))
8798                 ret = ufshcd_disable_clocks(hba, false);
8799         else
8800                 /* If link is active, device ref_clk can't be switched off */
8801                 ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
8802         if (ret)
8803                 goto set_link_active;
8804
8805         if (ufshcd_is_clkgating_allowed(hba)) {
8806                 hba->clk_gating.state = CLKS_OFF;
8807                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8808                                         hba->clk_gating.state);
8809         }
8810         /*
8811          * Disable the host irq as host controller as there won't be any
8812          * host controller transaction expected till resume.
8813          */
8814         ufshcd_disable_irq(hba);
8815         /* Put the host controller in low power mode if possible */
8816         ufshcd_hba_vreg_set_lpm(hba);
8817         goto out;
8818
8819 set_link_active:
8820         if (hba->clk_scaling.is_allowed)
8821                 ufshcd_resume_clkscaling(hba);
8822         ufshcd_vreg_set_hpm(hba);
8823         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
8824                 ufshcd_set_link_active(hba);
8825         } else if (ufshcd_is_link_off(hba)) {
8826                 ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
8827                 ufshcd_host_reset_and_restore(hba);
8828         }
8829 set_dev_active:
8830         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8831                 ufshcd_disable_auto_bkops(hba);
8832 enable_gating:
8833         if (hba->clk_scaling.is_allowed)
8834                 ufshcd_resume_clkscaling(hba);
8835         hba->hibern8_on_idle.is_suspended = false;
8836         hba->clk_gating.is_suspended = false;
8837         ufshcd_release_all(hba);
8838 out:
8839         hba->pm_op_in_progress = 0;
8840
8841         if (ret)
8842                 ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
8843
8844         return ret;
8845 }
8846
8847 /**
8848  * ufshcd_resume - helper function for resume operations
8849  * @hba: per adapter instance
8850  * @pm_op: runtime PM or system PM
8851  *
8852  * This function basically brings the UFS device, UniPro link and controller
8853  * to active state.
8854  *
8855  * Returns 0 for success and non-zero for failure
8856  */
8857 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8858 {
8859         int ret;
8860         enum uic_link_state old_link_state;
8861
8862         hba->pm_op_in_progress = 1;
8863         old_link_state = hba->uic_link_state;
8864
8865         ufshcd_hba_vreg_set_hpm(hba);
8866         /* Make sure clocks are enabled before accessing controller */
8867         ret = ufshcd_enable_clocks(hba);
8868         if (ret)
8869                 goto out;
8870
8871         /* enable the host irq as host controller would be active soon */
8872         ufshcd_enable_irq(hba);
8873
8874         ret = ufshcd_vreg_set_hpm(hba);
8875         if (ret)
8876                 goto disable_irq_and_vops_clks;
8877
8878         /*
8879          * Call vendor specific resume callback. As these callbacks may access
8880          * vendor specific host controller register space call them when the
8881          * host clocks are ON.
8882          */
8883         ret = ufshcd_vops_resume(hba, pm_op);
8884         if (ret)
8885                 goto disable_vreg;
8886
8887         if (ufshcd_is_link_hibern8(hba)) {
8888                 ret = ufshcd_uic_hibern8_exit(hba);
8889                 if (!ret) {
8890                         ufshcd_set_link_active(hba);
8891                         if (ufshcd_is_hibern8_on_idle_allowed(hba))
8892                                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
8893                 } else {
8894                         goto vendor_suspend;
8895                 }
8896         } else if (ufshcd_is_link_off(hba)) {
8897                 /*
8898                  * A full initialization of the host and the device is required
8899                  * since the link was put to off during suspend.
8900                  */
8901                 ret = ufshcd_reset_and_restore(hba);
8902                 /*
8903                  * ufshcd_reset_and_restore() should have already
8904                  * set the link state as active
8905                  */
8906                 if (ret || !ufshcd_is_link_active(hba))
8907                         goto vendor_suspend;
8908                 /* mark link state as hibern8 exited */
8909                 if (ufshcd_is_hibern8_on_idle_allowed(hba))
8910                         hba->hibern8_on_idle.state = HIBERN8_EXITED;
8911         }
8912
8913         if (!ufshcd_is_ufs_dev_active(hba)) {
8914                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8915                 if (ret)
8916                         goto set_old_link_state;
8917         }
8918
8919         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8920                 ufshcd_enable_auto_bkops(hba);
8921         else
8922                 /*
8923                  * If BKOPs operations are urgently needed at this moment then
8924                  * keep auto-bkops enabled or else disable it.
8925                  */
8926                 ufshcd_urgent_bkops(hba);
8927
8928         hba->clk_gating.is_suspended = false;
8929         hba->hibern8_on_idle.is_suspended = false;
8930
8931         if (hba->clk_scaling.is_allowed)
8932                 ufshcd_resume_clkscaling(hba);
8933
8934         /* Schedule clock gating in case of no access to UFS device yet */
8935         ufshcd_release_all(hba);
8936         goto out;
8937
8938 set_old_link_state:
8939         ufshcd_link_state_transition(hba, old_link_state, 0);
8940         if (ufshcd_is_link_hibern8(hba) &&
8941             ufshcd_is_hibern8_on_idle_allowed(hba))
8942                 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8943 vendor_suspend:
8944         ufshcd_vops_suspend(hba, pm_op);
8945 disable_vreg:
8946         ufshcd_vreg_set_lpm(hba);
8947 disable_irq_and_vops_clks:
8948         ufshcd_disable_irq(hba);
8949         if (hba->clk_scaling.is_allowed)
8950                 ufshcd_suspend_clkscaling(hba);
8951         ufshcd_disable_clocks(hba, false);
8952         if (ufshcd_is_clkgating_allowed(hba))
8953                 hba->clk_gating.state = CLKS_OFF;
8954 out:
8955         hba->pm_op_in_progress = 0;
8956
8957         if (ret)
8958                 ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
8959
8960         return ret;
8961 }
8962
8963 /**
8964  * ufshcd_system_suspend - system suspend routine
8965  * @hba: per adapter instance
8966  * @pm_op: runtime PM or system PM
8967  *
8968  * Check the description of ufshcd_suspend() function for more details.
8969  *
8970  * Returns 0 for success and non-zero for failure
8971  */
8972 int ufshcd_system_suspend(struct ufs_hba *hba)
8973 {
8974         int ret = 0;
8975         ktime_t start = ktime_get();
8976
8977         if (!hba || !hba->is_powered)
8978                 return 0;
8979
8980         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8981              hba->curr_dev_pwr_mode) &&
8982             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8983              hba->uic_link_state))
8984                 goto out;
8985
8986         if (pm_runtime_suspended(hba->dev)) {
8987                 /*
8988                  * UFS device and/or UFS link low power states during runtime
8989                  * suspend seems to be different than what is expected during
8990                  * system suspend. Hence runtime resume the devic & link and
8991                  * let the system suspend low power states to take effect.
8992                  * TODO: If resume takes longer time, we might have optimize
8993                  * it in future by not resuming everything if possible.
8994                  */
8995                 ret = ufshcd_runtime_resume(hba);
8996                 if (ret)
8997                         goto out;
8998         }
8999
9000         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
9001 out:
9002         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9003                 ktime_to_us(ktime_sub(ktime_get(), start)),
9004                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9005         if (!ret)
9006                 hba->is_sys_suspended = true;
9007         return ret;
9008 }
9009 EXPORT_SYMBOL(ufshcd_system_suspend);
9010
9011 /**
9012  * ufshcd_system_resume - system resume routine
9013  * @hba: per adapter instance
9014  *
9015  * Returns 0 for success and non-zero for failure
9016  */
9017
9018 int ufshcd_system_resume(struct ufs_hba *hba)
9019 {
9020         int ret = 0;
9021         ktime_t start = ktime_get();
9022
9023         if (!hba)
9024                 return -EINVAL;
9025
9026         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
9027                 /*
9028                  * Let the runtime resume take care of resuming
9029                  * if runtime suspended.
9030                  */
9031                 goto out;
9032         else
9033                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
9034 out:
9035         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9036                 ktime_to_us(ktime_sub(ktime_get(), start)),
9037                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9038         return ret;
9039 }
9040 EXPORT_SYMBOL(ufshcd_system_resume);
9041
9042 /**
9043  * ufshcd_runtime_suspend - runtime suspend routine
9044  * @hba: per adapter instance
9045  *
9046  * Check the description of ufshcd_suspend() function for more details.
9047  *
9048  * Returns 0 for success and non-zero for failure
9049  */
9050 int ufshcd_runtime_suspend(struct ufs_hba *hba)
9051 {
9052         int ret = 0;
9053         ktime_t start = ktime_get();
9054
9055         if (!hba)
9056                 return -EINVAL;
9057
9058         if (!hba->is_powered)
9059                 goto out;
9060         else
9061                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
9062 out:
9063         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9064                 ktime_to_us(ktime_sub(ktime_get(), start)),
9065                 hba->curr_dev_pwr_mode,
9066                 hba->uic_link_state);
9067         return ret;
9068
9069 }
9070 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9071
9072 /**
9073  * ufshcd_runtime_resume - runtime resume routine
9074  * @hba: per adapter instance
9075  *
9076  * This function basically brings the UFS device, UniPro link and controller
9077  * to active state. Following operations are done in this function:
9078  *
9079  * 1. Turn on all the controller related clocks
9080  * 2. Bring the UniPro link out of Hibernate state
9081  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9082  *    to active state.
9083  * 4. If auto-bkops is enabled on the device, disable it.
9084  *
9085  * So following would be the possible power state after this function return
9086  * successfully:
9087  *      S1: UFS device in Active state with VCC rail ON
9088  *          UniPro link in Active state
9089  *          All the UFS/UniPro controller clocks are ON
9090  *
9091  * Returns 0 for success and non-zero for failure
9092  */
9093 int ufshcd_runtime_resume(struct ufs_hba *hba)
9094 {
9095         int ret = 0;
9096         ktime_t start = ktime_get();
9097
9098         if (!hba)
9099                 return -EINVAL;
9100
9101         if (!hba->is_powered)
9102                 goto out;
9103         else
9104                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9105 out:
9106         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9107                 ktime_to_us(ktime_sub(ktime_get(), start)),
9108                 hba->curr_dev_pwr_mode,
9109                 hba->uic_link_state);
9110         return ret;
9111 }
9112 EXPORT_SYMBOL(ufshcd_runtime_resume);
9113
9114 int ufshcd_runtime_idle(struct ufs_hba *hba)
9115 {
9116         return 0;
9117 }
9118 EXPORT_SYMBOL(ufshcd_runtime_idle);
9119
9120 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
9121                                            struct device_attribute *attr,
9122                                            const char *buf, size_t count,
9123                                            bool rpm)
9124 {
9125         struct ufs_hba *hba = dev_get_drvdata(dev);
9126         unsigned long flags, value;
9127
9128         if (kstrtoul(buf, 0, &value))
9129                 return -EINVAL;
9130
9131         if (value >= UFS_PM_LVL_MAX)
9132                 return -EINVAL;
9133
9134         spin_lock_irqsave(hba->host->host_lock, flags);
9135         if (rpm)
9136                 hba->rpm_lvl = value;
9137         else
9138                 hba->spm_lvl = value;
9139         ufshcd_apply_pm_quirks(hba);
9140         spin_unlock_irqrestore(hba->host->host_lock, flags);
9141         return count;
9142 }
9143
9144 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
9145                 struct device_attribute *attr, char *buf)
9146 {
9147         struct ufs_hba *hba = dev_get_drvdata(dev);
9148         int curr_len;
9149         u8 lvl;
9150
9151         curr_len = snprintf(buf, PAGE_SIZE,
9152                             "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
9153                             hba->rpm_lvl,
9154                             ufschd_ufs_dev_pwr_mode_to_string(
9155                                 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
9156                             ufschd_uic_link_state_to_string(
9157                                 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
9158
9159         curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9160                              "\nAll available Runtime PM levels info:\n");
9161         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9162                 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9163                                      "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
9164                                     lvl,
9165                                     ufschd_ufs_dev_pwr_mode_to_string(
9166                                         ufs_pm_lvl_states[lvl].dev_state),
9167                                     ufschd_uic_link_state_to_string(
9168                                         ufs_pm_lvl_states[lvl].link_state));
9169
9170         return curr_len;
9171 }
9172
9173 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
9174                 struct device_attribute *attr, const char *buf, size_t count)
9175 {
9176         return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
9177 }
9178
9179 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
9180 {
9181         hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
9182         hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
9183         sysfs_attr_init(&hba->rpm_lvl_attr.attr);
9184         hba->rpm_lvl_attr.attr.name = "rpm_lvl";
9185         hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9186         if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
9187                 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
9188 }
9189
9190 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
9191                 struct device_attribute *attr, char *buf)
9192 {
9193         struct ufs_hba *hba = dev_get_drvdata(dev);
9194         int curr_len;
9195         u8 lvl;
9196
9197         curr_len = snprintf(buf, PAGE_SIZE,
9198                             "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
9199                             hba->spm_lvl,
9200                             ufschd_ufs_dev_pwr_mode_to_string(
9201                                 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
9202                             ufschd_uic_link_state_to_string(
9203                                 ufs_pm_lvl_states[hba->spm_lvl].link_state));
9204
9205         curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9206                              "\nAll available System PM levels info:\n");
9207         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9208                 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9209                                      "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
9210                                     lvl,
9211                                     ufschd_ufs_dev_pwr_mode_to_string(
9212                                         ufs_pm_lvl_states[lvl].dev_state),
9213                                     ufschd_uic_link_state_to_string(
9214                                         ufs_pm_lvl_states[lvl].link_state));
9215
9216         return curr_len;
9217 }
9218
9219 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
9220                 struct device_attribute *attr, const char *buf, size_t count)
9221 {
9222         return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
9223 }
9224
9225 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
9226 {
9227         hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
9228         hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
9229         sysfs_attr_init(&hba->spm_lvl_attr.attr);
9230         hba->spm_lvl_attr.attr.name = "spm_lvl";
9231         hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9232         if (device_create_file(hba->dev, &hba->spm_lvl_attr))
9233                 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
9234 }
9235
9236 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
9237 {
9238         ufshcd_add_rpm_lvl_sysfs_nodes(hba);
9239         ufshcd_add_spm_lvl_sysfs_nodes(hba);
9240 }
9241
9242 static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
9243 {
9244         bool suspend = false;
9245         unsigned long flags;
9246
9247         spin_lock_irqsave(hba->host->host_lock, flags);
9248         if (hba->clk_scaling.is_allowed) {
9249                 hba->clk_scaling.is_allowed = false;
9250                 suspend = true;
9251         }
9252         spin_unlock_irqrestore(hba->host->host_lock, flags);
9253
9254         /**
9255          * Scaling may be scheduled before, hence make sure it
9256          * doesn't race with shutdown
9257          */
9258         if (ufshcd_is_clkscaling_supported(hba)) {
9259                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9260                 cancel_work_sync(&hba->clk_scaling.suspend_work);
9261                 cancel_work_sync(&hba->clk_scaling.resume_work);
9262                 if (suspend)
9263                         ufshcd_suspend_clkscaling(hba);
9264         }
9265
9266         /* Unregister so that devfreq_monitor can't race with shutdown */
9267         if (hba->devfreq)
9268                 devfreq_remove_device(hba->devfreq);
9269 }
9270
9271 /**
9272  * ufshcd_shutdown - shutdown routine
9273  * @hba: per adapter instance
9274  *
9275  * This function would power off both UFS device and UFS link.
9276  *
9277  * Returns 0 always to allow force shutdown even in case of errors.
9278  */
9279 int ufshcd_shutdown(struct ufs_hba *hba)
9280 {
9281         int ret = 0;
9282
9283         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9284                 goto out;
9285
9286         pm_runtime_get_sync(hba->dev);
9287         ufshcd_hold_all(hba);
9288         ufshcd_mark_shutdown_ongoing(hba);
9289         ufshcd_shutdown_clkscaling(hba);
9290         /**
9291          * (1) Acquire the lock to stop any more requests
9292          * (2) Wait for all issued requests to complete
9293          */
9294         ufshcd_get_write_lock(hba);
9295         ufshcd_scsi_block_requests(hba);
9296         ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
9297         if (ret)
9298                 dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
9299                         __func__, ret);
9300         /* Requests may have errored out above, let it be handled */
9301         flush_work(&hba->eh_work);
9302         /* reqs issued from contexts other than shutdown will fail from now */
9303         ufshcd_scsi_unblock_requests(hba);
9304         ufshcd_release_all(hba);
9305         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9306 out:
9307         if (ret)
9308                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9309         /* allow force shutdown even in case of errors */
9310         return 0;
9311 }
9312 EXPORT_SYMBOL(ufshcd_shutdown);
9313
9314 /*
9315  * Values permitted 0, 1, 2.
9316  * 0 -> Disable IO latency histograms (default)
9317  * 1 -> Enable IO latency histograms
9318  * 2 -> Zero out IO latency histograms
9319  */
9320 static ssize_t
9321 latency_hist_store(struct device *dev, struct device_attribute *attr,
9322                    const char *buf, size_t count)
9323 {
9324         struct ufs_hba *hba = dev_get_drvdata(dev);
9325         long value;
9326
9327         if (kstrtol(buf, 0, &value))
9328                 return -EINVAL;
9329         if (value == BLK_IO_LAT_HIST_ZERO) {
9330                 memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
9331                 memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
9332         } else if (value == BLK_IO_LAT_HIST_ENABLE ||
9333                  value == BLK_IO_LAT_HIST_DISABLE)
9334                 hba->latency_hist_enabled = value;
9335         return count;
9336 }
9337
9338 ssize_t
9339 latency_hist_show(struct device *dev, struct device_attribute *attr,
9340                   char *buf)
9341 {
9342         struct ufs_hba *hba = dev_get_drvdata(dev);
9343         size_t written_bytes;
9344
9345         written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
9346                         buf, PAGE_SIZE);
9347         written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
9348                         buf + written_bytes, PAGE_SIZE - written_bytes);
9349
9350         return written_bytes;
9351 }
9352
9353 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
9354                    latency_hist_show, latency_hist_store);
9355
9356 static void
9357 ufshcd_init_latency_hist(struct ufs_hba *hba)
9358 {
9359         if (device_create_file(hba->dev, &dev_attr_latency_hist))
9360                 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
9361 }
9362
9363 static void
9364 ufshcd_exit_latency_hist(struct ufs_hba *hba)
9365 {
9366         device_create_file(hba->dev, &dev_attr_latency_hist);
9367 }
9368
9369 /**
9370  * ufshcd_remove - de-allocate SCSI host and host memory space
9371  *              data structure memory
9372  * @hba - per adapter instance
9373  */
9374 void ufshcd_remove(struct ufs_hba *hba)
9375 {
9376         scsi_remove_host(hba->host);
9377         /* disable interrupts */
9378         ufshcd_disable_intr(hba, hba->intr_mask);
9379         ufshcd_hba_stop(hba, true);
9380
9381         ufshcd_exit_clk_gating(hba);
9382         ufshcd_exit_hibern8_on_idle(hba);
9383         if (ufshcd_is_clkscaling_supported(hba)) {
9384                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9385                 ufshcd_exit_latency_hist(hba);
9386                 devfreq_remove_device(hba->devfreq);
9387         }
9388         ufshcd_hba_exit(hba);
9389         ufsdbg_remove_debugfs(hba);
9390 }
9391 EXPORT_SYMBOL_GPL(ufshcd_remove);
9392
9393 /**
9394  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9395  * @hba: pointer to Host Bus Adapter (HBA)
9396  */
9397 void ufshcd_dealloc_host(struct ufs_hba *hba)
9398 {
9399         scsi_host_put(hba->host);
9400 }
9401 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9402
9403 /**
9404  * ufshcd_set_dma_mask - Set dma mask based on the controller
9405  *                       addressing capability
9406  * @hba: per adapter instance
9407  *
9408  * Returns 0 for success, non-zero for failure
9409  */
9410 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9411 {
9412         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9413                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9414                         return 0;
9415         }
9416         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9417 }
9418
9419 /**
9420  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9421  * @dev: pointer to device handle
9422  * @hba_handle: driver private handle
9423  * Returns 0 on success, non-zero value on failure
9424  */
9425 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9426 {
9427         struct Scsi_Host *host;
9428         struct ufs_hba *hba;
9429         int err = 0;
9430
9431         if (!dev) {
9432                 dev_err(dev,
9433                 "Invalid memory reference for dev is NULL\n");
9434                 err = -ENODEV;
9435                 goto out_error;
9436         }
9437
9438         host = scsi_host_alloc(&ufshcd_driver_template,
9439                                 sizeof(struct ufs_hba));
9440         if (!host) {
9441                 dev_err(dev, "scsi_host_alloc failed\n");
9442                 err = -ENOMEM;
9443                 goto out_error;
9444         }
9445         hba = shost_priv(host);
9446         hba->host = host;
9447         hba->dev = dev;
9448         *hba_handle = hba;
9449
9450 out_error:
9451         return err;
9452 }
9453 EXPORT_SYMBOL(ufshcd_alloc_host);
9454
9455 /**
9456  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
9457  * @hba: per adapter instance
9458  * @scale_up: True if scaling up and false if scaling down
9459  *
9460  * Returns true if scaling is required, false otherwise.
9461  */
9462 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
9463                                                bool scale_up)
9464 {
9465         struct ufs_clk_info *clki;
9466         struct list_head *head = &hba->clk_list_head;
9467
9468         if (!head || list_empty(head))
9469                 return false;
9470
9471         list_for_each_entry(clki, head, list) {
9472                 if (!IS_ERR_OR_NULL(clki->clk)) {
9473                         if (scale_up && clki->max_freq) {
9474                                 if (clki->curr_freq == clki->max_freq)
9475                                         continue;
9476                                 return true;
9477                         } else if (!scale_up && clki->min_freq) {
9478                                 if (clki->curr_freq == clki->min_freq)
9479                                         continue;
9480                                 return true;
9481                         }
9482                 }
9483         }
9484
9485         return false;
9486 }
9487
9488 /**
9489  * ufshcd_scale_gear - scale up/down UFS gear
9490  * @hba: per adapter instance
9491  * @scale_up: True for scaling up gear and false for scaling down
9492  *
9493  * Returns 0 for success,
9494  * Returns -EBUSY if scaling can't happen at this time
9495  * Returns non-zero for any other errors
9496  */
9497 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
9498 {
9499         int ret = 0;
9500         struct ufs_pa_layer_attr new_pwr_info;
9501         u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
9502
9503         BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
9504
9505         if (scale_up) {
9506                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
9507                        sizeof(struct ufs_pa_layer_attr));
9508                 /*
9509                  * Some UFS devices may stop responding after switching from
9510                  * HS-G1 to HS-G3. Also, it is found that these devices work
9511                  * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
9512                  * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
9513                  * quirk is enabled for such devices, this 2 steps gear switch
9514                  * workaround will be applied.
9515                  */
9516                 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
9517                     && (hba->pwr_info.gear_tx == UFS_HS_G1)
9518                     && (new_pwr_info.gear_tx == UFS_HS_G3)) {
9519                         /* scale up to G2 first */
9520                         new_pwr_info.gear_tx = UFS_HS_G2;
9521                         new_pwr_info.gear_rx = UFS_HS_G2;
9522                         ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9523                         if (ret)
9524                                 goto out;
9525
9526                         /* scale up to G3 now */
9527                         new_pwr_info.gear_tx = UFS_HS_G3;
9528                         new_pwr_info.gear_rx = UFS_HS_G3;
9529                         /* now, fall through to set the HS-G3 */
9530                 }
9531                 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9532                 if (ret)
9533                         goto out;
9534         } else {
9535                 memcpy(&new_pwr_info, &hba->pwr_info,
9536                        sizeof(struct ufs_pa_layer_attr));
9537
9538                 if (hba->pwr_info.gear_tx > scale_down_gear
9539                     || hba->pwr_info.gear_rx > scale_down_gear) {
9540                         /* save the current power mode */
9541                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
9542                                 &hba->pwr_info,
9543                                 sizeof(struct ufs_pa_layer_attr));
9544
9545                         /* scale down gear */
9546                         new_pwr_info.gear_tx = scale_down_gear;
9547                         new_pwr_info.gear_rx = scale_down_gear;
9548                         if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
9549                                 new_pwr_info.pwr_tx = FASTAUTO_MODE;
9550                                 new_pwr_info.pwr_rx = FASTAUTO_MODE;
9551                         }
9552                 }
9553                 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9554         }
9555
9556 out:
9557         if (ret)
9558                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
9559                         __func__, ret,
9560                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
9561                         new_pwr_info.gear_tx, new_pwr_info.gear_rx,
9562                         scale_up);
9563
9564         return ret;
9565 }
9566
9567 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
9568 {
9569         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
9570         int ret = 0;
9571         /*
9572          * make sure that there are no outstanding requests when
9573          * clock scaling is in progress
9574          */
9575         ufshcd_scsi_block_requests(hba);
9576         down_write(&hba->lock);
9577         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
9578                 ret = -EBUSY;
9579                 up_write(&hba->lock);
9580                 ufshcd_scsi_unblock_requests(hba);
9581         }
9582
9583         return ret;
9584 }
9585
9586 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
9587 {
9588         up_write(&hba->lock);
9589         ufshcd_scsi_unblock_requests(hba);
9590 }
9591
9592 /**
9593  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
9594  * @hba: per adapter instance
9595  * @scale_up: True for scaling up and false for scalin down
9596  *
9597  * Returns 0 for success,
9598  * Returns -EBUSY if scaling can't happen at this time
9599  * Returns non-zero for any other errors
9600  */
9601 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
9602 {
9603         int ret = 0;
9604
9605         /* let's not get into low power until clock scaling is completed */
9606         hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
9607         ufshcd_hold_all(hba);
9608
9609         ret = ufshcd_clock_scaling_prepare(hba);
9610         if (ret)
9611                 goto out;
9612
9613         /* scale down the gear before scaling down clocks */
9614         if (!scale_up) {
9615                 ret = ufshcd_scale_gear(hba, false);
9616                 if (ret)
9617                         goto clk_scaling_unprepare;
9618         }
9619
9620         /*
9621          * If auto hibern8 is supported then put the link in
9622          * hibern8 manually, this is to avoid auto hibern8
9623          * racing during clock frequency scaling sequence.
9624          */
9625         if (ufshcd_is_auto_hibern8_supported(hba)) {
9626                 ret = ufshcd_uic_hibern8_enter(hba);
9627                 if (ret)
9628                         /* link will be bad state so no need to scale_up_gear */
9629                         return ret;
9630         }
9631
9632         ret = ufshcd_scale_clks(hba, scale_up);
9633         if (ret)
9634                 goto scale_up_gear;
9635
9636         if (ufshcd_is_auto_hibern8_supported(hba)) {
9637                 ret = ufshcd_uic_hibern8_exit(hba);
9638                 if (ret)
9639                         /* link will be bad state so no need to scale_up_gear */
9640                         return ret;
9641         }
9642
9643         /* scale up the gear after scaling up clocks */
9644         if (scale_up) {
9645                 ret = ufshcd_scale_gear(hba, true);
9646                 if (ret) {
9647                         ufshcd_scale_clks(hba, false);
9648                         goto clk_scaling_unprepare;
9649                 }
9650         }
9651
9652         if (!ret) {
9653                 hba->clk_scaling.is_scaled_up = scale_up;
9654                 if (scale_up)
9655                         hba->clk_gating.delay_ms =
9656                                 hba->clk_gating.delay_ms_perf;
9657                 else
9658                         hba->clk_gating.delay_ms =
9659                                 hba->clk_gating.delay_ms_pwr_save;
9660         }
9661
9662         goto clk_scaling_unprepare;
9663
9664 scale_up_gear:
9665         if (!scale_up)
9666                 ufshcd_scale_gear(hba, true);
9667 clk_scaling_unprepare:
9668         ufshcd_clock_scaling_unprepare(hba);
9669 out:
9670         hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
9671         ufshcd_release_all(hba);
9672         return ret;
9673 }
9674
9675 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9676 {
9677         unsigned long flags;
9678
9679         devfreq_suspend_device(hba->devfreq);
9680         spin_lock_irqsave(hba->host->host_lock, flags);
9681         hba->clk_scaling.window_start_t = 0;
9682         spin_unlock_irqrestore(hba->host->host_lock, flags);
9683 }
9684
9685 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9686 {
9687         unsigned long flags;
9688         bool suspend = false;
9689
9690         if (!ufshcd_is_clkscaling_supported(hba))
9691                 return;
9692
9693         spin_lock_irqsave(hba->host->host_lock, flags);
9694         if (!hba->clk_scaling.is_suspended) {
9695                 suspend = true;
9696                 hba->clk_scaling.is_suspended = true;
9697         }
9698         spin_unlock_irqrestore(hba->host->host_lock, flags);
9699
9700         if (suspend)
9701                 __ufshcd_suspend_clkscaling(hba);
9702 }
9703
9704 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
9705 {
9706         unsigned long flags;
9707         bool resume = false;
9708
9709         if (!ufshcd_is_clkscaling_supported(hba))
9710                 return;
9711
9712         spin_lock_irqsave(hba->host->host_lock, flags);
9713         if (hba->clk_scaling.is_suspended) {
9714                 resume = true;
9715                 hba->clk_scaling.is_suspended = false;
9716         }
9717         spin_unlock_irqrestore(hba->host->host_lock, flags);
9718
9719         if (resume)
9720                 devfreq_resume_device(hba->devfreq);
9721 }
9722
9723 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
9724                 struct device_attribute *attr, char *buf)
9725 {
9726         struct ufs_hba *hba = dev_get_drvdata(dev);
9727
9728         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
9729 }
9730
9731 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
9732                 struct device_attribute *attr, const char *buf, size_t count)
9733 {
9734         struct ufs_hba *hba = dev_get_drvdata(dev);
9735         u32 value;
9736         int err;
9737
9738         if (kstrtou32(buf, 0, &value))
9739                 return -EINVAL;
9740
9741         value = !!value;
9742         if (value == hba->clk_scaling.is_allowed)
9743                 goto out;
9744
9745         pm_runtime_get_sync(hba->dev);
9746         ufshcd_hold(hba, false);
9747
9748         cancel_work_sync(&hba->clk_scaling.suspend_work);
9749         cancel_work_sync(&hba->clk_scaling.resume_work);
9750
9751         hba->clk_scaling.is_allowed = value;
9752
9753         if (value) {
9754                 ufshcd_resume_clkscaling(hba);
9755         } else {
9756                 ufshcd_suspend_clkscaling(hba);
9757                 err = ufshcd_devfreq_scale(hba, true);
9758                 if (err)
9759                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
9760                                         __func__, err);
9761         }
9762
9763         ufshcd_release(hba, false);
9764         pm_runtime_put_sync(hba->dev);
9765 out:
9766         return count;
9767 }
9768
9769 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
9770 {
9771         struct ufs_hba *hba = container_of(work, struct ufs_hba,
9772                                            clk_scaling.suspend_work);
9773         unsigned long irq_flags;
9774
9775         spin_lock_irqsave(hba->host->host_lock, irq_flags);
9776         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
9777                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9778                 return;
9779         }
9780         hba->clk_scaling.is_suspended = true;
9781         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9782
9783         __ufshcd_suspend_clkscaling(hba);
9784 }
9785
9786 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
9787 {
9788         struct ufs_hba *hba = container_of(work, struct ufs_hba,
9789                                            clk_scaling.resume_work);
9790         unsigned long irq_flags;
9791
9792         spin_lock_irqsave(hba->host->host_lock, irq_flags);
9793         if (!hba->clk_scaling.is_suspended) {
9794                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9795                 return;
9796         }
9797         hba->clk_scaling.is_suspended = false;
9798         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9799
9800         devfreq_resume_device(hba->devfreq);
9801 }
9802
9803 static int ufshcd_devfreq_target(struct device *dev,
9804                                 unsigned long *freq, u32 flags)
9805 {
9806         int ret = 0;
9807         struct ufs_hba *hba = dev_get_drvdata(dev);
9808         unsigned long irq_flags;
9809         ktime_t start;
9810         bool scale_up, sched_clk_scaling_suspend_work = false;
9811
9812         if (!ufshcd_is_clkscaling_supported(hba))
9813                 return -EINVAL;
9814
9815         if ((*freq > 0) && (*freq < UINT_MAX)) {
9816                 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
9817                 return -EINVAL;
9818         }
9819
9820         spin_lock_irqsave(hba->host->host_lock, irq_flags);
9821         if (ufshcd_eh_in_progress(hba)) {
9822                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9823                 return 0;
9824         }
9825
9826         if (!hba->clk_scaling.active_reqs)
9827                 sched_clk_scaling_suspend_work = true;
9828
9829         scale_up = (*freq == UINT_MAX) ? true : false;
9830         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
9831                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9832                 ret = 0;
9833                 goto out; /* no state change required */
9834         }
9835         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9836
9837         start = ktime_get();
9838         ret = ufshcd_devfreq_scale(hba, scale_up);
9839         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
9840                 (scale_up ? "up" : "down"),
9841                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9842
9843 out:
9844         if (sched_clk_scaling_suspend_work)
9845                 queue_work(hba->clk_scaling.workq,
9846                            &hba->clk_scaling.suspend_work);
9847
9848         return ret;
9849 }
9850
9851 static int ufshcd_devfreq_get_dev_status(struct device *dev,
9852                 struct devfreq_dev_status *stat)
9853 {
9854         struct ufs_hba *hba = dev_get_drvdata(dev);
9855         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
9856         unsigned long flags;
9857
9858         if (!ufshcd_is_clkscaling_supported(hba))
9859                 return -EINVAL;
9860
9861         memset(stat, 0, sizeof(*stat));
9862
9863         spin_lock_irqsave(hba->host->host_lock, flags);
9864         if (!scaling->window_start_t)
9865                 goto start_window;
9866
9867         if (scaling->is_busy_started)
9868                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
9869                                         scaling->busy_start_t));
9870
9871         stat->total_time = jiffies_to_usecs((long)jiffies -
9872                                 (long)scaling->window_start_t);
9873         stat->busy_time = scaling->tot_busy_t;
9874 start_window:
9875         scaling->window_start_t = jiffies;
9876         scaling->tot_busy_t = 0;
9877
9878         if (hba->outstanding_reqs) {
9879                 scaling->busy_start_t = ktime_get();
9880                 scaling->is_busy_started = true;
9881         } else {
9882                 scaling->busy_start_t = ktime_set(0, 0);
9883                 scaling->is_busy_started = false;
9884         }
9885         spin_unlock_irqrestore(hba->host->host_lock, flags);
9886         return 0;
9887 }
9888
9889 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
9890 {
9891         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
9892         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
9893         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
9894         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
9895         hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
9896         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
9897                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
9898 }
9899
9900 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
9901 {
9902         struct device *dev = hba->dev;
9903         int ret;
9904
9905         ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
9906                 &hba->lanes_per_direction);
9907         if (ret) {
9908                 dev_dbg(hba->dev,
9909                         "%s: failed to read lanes-per-direction, ret=%d\n",
9910                         __func__, ret);
9911                 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
9912         }
9913 }
9914 /**
9915  * ufshcd_init - Driver initialization routine
9916  * @hba: per-adapter instance
9917  * @mmio_base: base register address
9918  * @irq: Interrupt line of device
9919  * Returns 0 on success, non-zero value on failure
9920  */
9921 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9922 {
9923         int err;
9924         struct Scsi_Host *host = hba->host;
9925         struct device *dev = hba->dev;
9926
9927         if (!mmio_base) {
9928                 dev_err(hba->dev,
9929                 "Invalid memory reference for mmio_base is NULL\n");
9930                 err = -ENODEV;
9931                 goto out_error;
9932         }
9933
9934         hba->mmio_base = mmio_base;
9935         hba->irq = irq;
9936
9937         ufshcd_init_lanes_per_dir(hba);
9938
9939         err = ufshcd_hba_init(hba);
9940         if (err)
9941                 goto out_error;
9942
9943         /* Read capabilities registers */
9944         ufshcd_hba_capabilities(hba);
9945
9946         /* Get UFS version supported by the controller */
9947         hba->ufs_version = ufshcd_get_ufs_version(hba);
9948
9949         /* print error message if ufs_version is not valid */
9950         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9951             (hba->ufs_version != UFSHCI_VERSION_11) &&
9952             (hba->ufs_version != UFSHCI_VERSION_20) &&
9953             (hba->ufs_version != UFSHCI_VERSION_21))
9954                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
9955                         hba->ufs_version);
9956
9957         /* Get Interrupt bit mask per version */
9958         hba->intr_mask = ufshcd_get_intr_mask(hba);
9959
9960         /* Enable debug prints */
9961         hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
9962
9963         err = ufshcd_set_dma_mask(hba);
9964         if (err) {
9965                 dev_err(hba->dev, "set dma mask failed\n");
9966                 goto out_disable;
9967         }
9968
9969         /* Allocate memory for host memory space */
9970         err = ufshcd_memory_alloc(hba);
9971         if (err) {
9972                 dev_err(hba->dev, "Memory allocation failed\n");
9973                 goto out_disable;
9974         }
9975
9976         /* Configure LRB */
9977         ufshcd_host_memory_configure(hba);
9978
9979         host->can_queue = hba->nutrs;
9980         host->cmd_per_lun = hba->nutrs;
9981         host->max_id = UFSHCD_MAX_ID;
9982         host->max_lun = UFS_MAX_LUNS;
9983         host->max_channel = UFSHCD_MAX_CHANNEL;
9984         host->unique_id = host->host_no;
9985         host->max_cmd_len = MAX_CDB_SIZE;
9986         host->set_dbd_for_caching = 1;
9987
9988         hba->max_pwr_info.is_valid = false;
9989
9990         /* Initailize wait queue for task management */
9991         init_waitqueue_head(&hba->tm_wq);
9992         init_waitqueue_head(&hba->tm_tag_wq);
9993
9994         /* Initialize work queues */
9995         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9996         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9997         INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
9998
9999         /* Initialize UIC command mutex */
10000         mutex_init(&hba->uic_cmd_mutex);
10001
10002         /* Initialize mutex for device management commands */
10003         mutex_init(&hba->dev_cmd.lock);
10004
10005         init_rwsem(&hba->lock);
10006
10007         /* Initialize device management tag acquire wait queue */
10008         init_waitqueue_head(&hba->dev_cmd.tag_wq);
10009
10010         ufshcd_init_clk_gating(hba);
10011         ufshcd_init_hibern8_on_idle(hba);
10012
10013         /*
10014          * In order to avoid any spurious interrupt immediately after
10015          * registering UFS controller interrupt handler, clear any pending UFS
10016          * interrupt status and disable all the UFS interrupts.
10017          */
10018         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10019                       REG_INTERRUPT_STATUS);
10020         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10021         /*
10022          * Make sure that UFS interrupts are disabled and any pending interrupt
10023          * status is cleared before registering UFS interrupt handler.
10024          */
10025         mb();
10026
10027         /* IRQ registration */
10028         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10029         if (err) {
10030                 dev_err(hba->dev, "request irq failed\n");
10031                 goto exit_gating;
10032         } else {
10033                 hba->is_irq_enabled = true;
10034         }
10035
10036         err = scsi_add_host(host, hba->dev);
10037         if (err) {
10038                 dev_err(hba->dev, "scsi_add_host failed\n");
10039                 goto exit_gating;
10040         }
10041
10042         /* Reset controller to power on reset (POR) state */
10043         ufshcd_vops_full_reset(hba);
10044
10045         /* reset connected UFS device */
10046         err = ufshcd_reset_device(hba);
10047         if (err)
10048                 dev_warn(hba->dev, "%s: device reset failed. err %d\n",
10049                          __func__, err);
10050
10051         /* Host controller enable */
10052         err = ufshcd_hba_enable(hba);
10053         if (err) {
10054                 dev_err(hba->dev, "Host controller enable failed\n");
10055                 ufshcd_print_host_regs(hba);
10056                 ufshcd_print_host_state(hba);
10057                 goto out_remove_scsi_host;
10058         }
10059
10060         if (ufshcd_is_clkscaling_supported(hba)) {
10061                 char wq_name[sizeof("ufs_clkscaling_00")];
10062
10063                 INIT_WORK(&hba->clk_scaling.suspend_work,
10064                           ufshcd_clk_scaling_suspend_work);
10065                 INIT_WORK(&hba->clk_scaling.resume_work,
10066                           ufshcd_clk_scaling_resume_work);
10067
10068                 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
10069                          host->host_no);
10070                 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
10071
10072                 ufshcd_clkscaling_init_sysfs(hba);
10073         }
10074
10075         /*
10076          * If rpm_lvl and and spm_lvl are not already set to valid levels,
10077          * set the default power management level for UFS runtime and system
10078          * suspend. Default power saving mode selected is keeping UFS link in
10079          * Hibern8 state and UFS device in sleep.
10080          */
10081         if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
10082                 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10083                                                         UFS_SLEEP_PWR_MODE,
10084                                                         UIC_LINK_HIBERN8_STATE);
10085         if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
10086                 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10087                                                         UFS_SLEEP_PWR_MODE,
10088                                                         UIC_LINK_HIBERN8_STATE);
10089
10090         /* Hold auto suspend until async scan completes */
10091         pm_runtime_get_sync(dev);
10092
10093         ufshcd_init_latency_hist(hba);
10094
10095         /*
10096          * We are assuming that device wasn't put in sleep/power-down
10097          * state exclusively during the boot stage before kernel.
10098          * This assumption helps avoid doing link startup twice during
10099          * ufshcd_probe_hba().
10100          */
10101         ufshcd_set_ufs_dev_active(hba);
10102
10103         ufshcd_cmd_log_init(hba);
10104
10105         async_schedule(ufshcd_async_scan, hba);
10106
10107         ufsdbg_add_debugfs(hba);
10108
10109         ufshcd_add_sysfs_nodes(hba);
10110
10111         return 0;
10112
10113 out_remove_scsi_host:
10114         scsi_remove_host(hba->host);
10115 exit_gating:
10116         ufshcd_exit_clk_gating(hba);
10117         ufshcd_exit_latency_hist(hba);
10118 out_disable:
10119         hba->is_irq_enabled = false;
10120         ufshcd_hba_exit(hba);
10121 out_error:
10122         return err;
10123 }
10124 EXPORT_SYMBOL_GPL(ufshcd_init);
10125
10126 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10127 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10128 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10129 MODULE_LICENSE("GPL");
10130 MODULE_VERSION(UFSHCD_DRIVER_VERSION);