OSDN Git Service

Merge tag 'v4.4.214' into 10
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <scsi/ufs/ioctl.h>
42 #include <linux/devfreq.h>
43 #include <linux/nls.h>
44 #include <linux/of.h>
45 #include <linux/blkdev.h>
46 #include <asm/unaligned.h>
47
48 #include "ufshcd.h"
49 #include "ufshci.h"
50 #include "ufs_quirks.h"
51 #include "ufs-debugfs.h"
52 #include "ufs-qcom.h"
53
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ufs.h>
56
57 #ifdef CONFIG_DEBUG_FS
58
59 static int ufshcd_tag_req_type(struct request *rq)
60 {
61         int rq_type = TS_WRITE;
62
63         if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
64                 rq_type = TS_NOT_SUPPORTED;
65         else if (rq->cmd_flags & REQ_FLUSH)
66                 rq_type = TS_FLUSH;
67         else if (rq_data_dir(rq) == READ)
68                 rq_type = (rq->cmd_flags & REQ_URGENT) ?
69                         TS_URGENT_READ : TS_READ;
70         else if (rq->cmd_flags & REQ_URGENT)
71                 rq_type = TS_URGENT_WRITE;
72
73         return rq_type;
74 }
75
76 static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
77 {
78         ufsdbg_set_err_state(hba);
79         if (type < UFS_ERR_MAX)
80                 hba->ufs_stats.err_stats[type]++;
81 }
82
83 static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
84 {
85         struct request *rq =
86                 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
87         u64 **tag_stats = hba->ufs_stats.tag_stats;
88         int rq_type;
89
90         if (!hba->ufs_stats.enabled)
91                 return;
92
93         tag_stats[tag][TS_TAG]++;
94         if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
95                 return;
96
97         WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
98         rq_type = ufshcd_tag_req_type(rq);
99         if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
100                 tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
101 }
102
103 static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
104                 struct scsi_cmnd *cmd)
105 {
106         struct request *rq = cmd ? cmd->request : NULL;
107
108         if (rq && rq->cmd_type & REQ_TYPE_FS)
109                 hba->ufs_stats.q_depth--;
110 }
111
112 static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
113 {
114         int rq_type;
115         struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
116         s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
117                 lrbp->issue_time_stamp);
118
119         /* update general request statistics */
120         if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
121                 hba->ufs_stats.req_stats[TS_TAG].min = delta;
122         hba->ufs_stats.req_stats[TS_TAG].count++;
123         hba->ufs_stats.req_stats[TS_TAG].sum += delta;
124         if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
125                 hba->ufs_stats.req_stats[TS_TAG].max = delta;
126         if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
127                         hba->ufs_stats.req_stats[TS_TAG].min = delta;
128
129         rq_type = ufshcd_tag_req_type(rq);
130         if (rq_type == TS_NOT_SUPPORTED)
131                 return;
132
133         /* update request type specific statistics */
134         if (hba->ufs_stats.req_stats[rq_type].count == 0)
135                 hba->ufs_stats.req_stats[rq_type].min = delta;
136         hba->ufs_stats.req_stats[rq_type].count++;
137         hba->ufs_stats.req_stats[rq_type].sum += delta;
138         if (delta > hba->ufs_stats.req_stats[rq_type].max)
139                 hba->ufs_stats.req_stats[rq_type].max = delta;
140         if (delta < hba->ufs_stats.req_stats[rq_type].min)
141                         hba->ufs_stats.req_stats[rq_type].min = delta;
142 }
143
144 static void
145 ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
146 {
147         if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
148                 hba->ufs_stats.query_stats_arr[opcode][idn]++;
149 }
150
151 #else
152 static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
153 {
154 }
155
156 static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
157                 struct scsi_cmnd *cmd)
158 {
159 }
160
161 static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
162 {
163 }
164
165 static inline
166 void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
167 {
168 }
169
170 static inline
171 void ufshcd_update_query_stats(struct ufs_hba *hba,
172                                enum query_opcode opcode, u8 idn)
173 {
174 }
175 #endif
176
177 #define PWR_INFO_MASK   0xF
178 #define PWR_RX_OFFSET   4
179
180 #define UFSHCD_REQ_SENSE_SIZE   18
181
182 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
183                                  UTP_TASK_REQ_COMPL |\
184                                  UFSHCD_ERROR_MASK)
185 /* UIC command timeout, unit: ms */
186 #define UIC_CMD_TIMEOUT 500
187
188 /* NOP OUT retries waiting for NOP IN response */
189 #define NOP_OUT_RETRIES    10
190 /* Timeout after 30 msecs if NOP OUT hangs without response */
191 #define NOP_OUT_TIMEOUT    30 /* msecs */
192
193 /* Query request retries */
194 #define QUERY_REQ_RETRIES 3
195 /* Query request timeout */
196 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
197
198 /* Task management command timeout */
199 #define TM_CMD_TIMEOUT  100 /* msecs */
200
201 /* maximum number of retries for a general UIC command  */
202 #define UFS_UIC_COMMAND_RETRIES 3
203
204 /* maximum number of link-startup retries */
205 #define DME_LINKSTARTUP_RETRIES 3
206
207 /* Maximum retries for Hibern8 enter */
208 #define UIC_HIBERN8_ENTER_RETRIES 3
209
210 /* maximum number of reset retries before giving up */
211 #define MAX_HOST_RESET_RETRIES 5
212
213 /* Expose the flag value from utp_upiu_query.value */
214 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
215
216 /* Interrupt aggregation default timeout, unit: 40us */
217 #define INT_AGGR_DEF_TO 0x02
218
219 /* default value of auto suspend is 3 seconds */
220 #define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
221
222 #define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE     10
223 #define UFSHCD_CLK_GATING_DELAY_MS_PERF         50
224
225 /* IOCTL opcode for command - ufs set device read only */
226 #define UFS_IOCTL_BLKROSET      BLKROSET
227
228 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION              2
229
230 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
231         ({                                                              \
232                 int _ret;                                               \
233                 if (_on)                                                \
234                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
235                 else                                                    \
236                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
237                 _ret;                                                   \
238         })
239
240 #define ufshcd_hex_dump(prefix_str, buf, len) \
241 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
242
243 static u32 ufs_query_desc_max_size[] = {
244         QUERY_DESC_DEVICE_MAX_SIZE,
245         QUERY_DESC_CONFIGURAION_MAX_SIZE,
246         QUERY_DESC_UNIT_MAX_SIZE,
247         QUERY_DESC_RFU_MAX_SIZE,
248         QUERY_DESC_INTERCONNECT_MAX_SIZE,
249         QUERY_DESC_STRING_MAX_SIZE,
250         QUERY_DESC_RFU_MAX_SIZE,
251         QUERY_DESC_GEOMETRY_MAZ_SIZE,
252         QUERY_DESC_POWER_MAX_SIZE,
253         QUERY_DESC_HEALTH_MAX_SIZE,
254         QUERY_DESC_RFU_MAX_SIZE,
255 };
256
257 enum {
258         UFSHCD_MAX_CHANNEL      = 0,
259         UFSHCD_MAX_ID           = 1,
260         UFSHCD_CMD_PER_LUN      = 32,
261         UFSHCD_CAN_QUEUE        = 32,
262 };
263
264 /* UFSHCD states */
265 enum {
266         UFSHCD_STATE_RESET,
267         UFSHCD_STATE_ERROR,
268         UFSHCD_STATE_OPERATIONAL,
269 };
270
271 /* UFSHCD error handling flags */
272 enum {
273         UFSHCD_EH_IN_PROGRESS = (1 << 0),
274 };
275
276 /* UFSHCD UIC layer error flags */
277 enum {
278         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
279         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
280         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
281         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
282         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
283         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
284 };
285
286 /* Interrupt configuration options */
287 enum {
288         UFSHCD_INT_DISABLE,
289         UFSHCD_INT_ENABLE,
290         UFSHCD_INT_CLEAR,
291 };
292
293 #define DEFAULT_UFSHCD_DBG_PRINT_EN     UFSHCD_DBG_PRINT_ALL
294
295 #define ufshcd_set_eh_in_progress(h) \
296         (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
297 #define ufshcd_eh_in_progress(h) \
298         (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
299 #define ufshcd_clear_eh_in_progress(h) \
300         (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
301
302 #define ufshcd_set_ufs_dev_active(h) \
303         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
304 #define ufshcd_set_ufs_dev_sleep(h) \
305         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
306 #define ufshcd_set_ufs_dev_poweroff(h) \
307         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
308 #define ufshcd_is_ufs_dev_active(h) \
309         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
310 #define ufshcd_is_ufs_dev_sleep(h) \
311         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
312 #define ufshcd_is_ufs_dev_poweroff(h) \
313         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
314
315 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
316         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
317         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
318         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
319         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
320         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
321         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
322 };
323
324 static inline enum ufs_dev_pwr_mode
325 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
326 {
327         return ufs_pm_lvl_states[lvl].dev_state;
328 }
329
330 static inline enum uic_link_state
331 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
332 {
333         return ufs_pm_lvl_states[lvl].link_state;
334 }
335
336 static inline enum ufs_pm_level
337 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
338                                         enum uic_link_state link_state)
339 {
340         enum ufs_pm_level lvl;
341
342         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
343                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
344                         (ufs_pm_lvl_states[lvl].link_state == link_state))
345                         return lvl;
346         }
347
348         /* if no match found, return the level 0 */
349         return UFS_PM_LVL_0;
350 }
351
352 static inline bool ufshcd_is_valid_pm_lvl(int lvl)
353 {
354         if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
355                 return true;
356         else
357                 return false;
358 }
359
360 static irqreturn_t ufshcd_intr(int irq, void *__hba);
361 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
362 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
363 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
364 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
365 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
366 static void ufshcd_hba_exit(struct ufs_hba *hba);
367 static int ufshcd_probe_hba(struct ufs_hba *hba);
368 static int ufshcd_enable_clocks(struct ufs_hba *hba);
369 static int ufshcd_disable_clocks(struct ufs_hba *hba,
370                                  bool is_gating_context);
371 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
372                                               bool is_gating_context);
373 static void ufshcd_hold_all(struct ufs_hba *hba);
374 static void ufshcd_release_all(struct ufs_hba *hba);
375 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
376 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
377 static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
378 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
379 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
380 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
381 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
382 static void ufshcd_release_all(struct ufs_hba *hba);
383 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
384 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
385 static int ufshcd_devfreq_target(struct device *dev,
386                                 unsigned long *freq, u32 flags);
387 static int ufshcd_devfreq_get_dev_status(struct device *dev,
388                 struct devfreq_dev_status *stat);
389
390 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
391 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
392         .upthreshold = 70,
393         .downdifferential = 65,
394         .simple_scaling = 1,
395 };
396
397 static void *gov_data = &ufshcd_ondemand_data;
398 #else
399 static void *gov_data;
400 #endif
401
402 static struct devfreq_dev_profile ufs_devfreq_profile = {
403         .polling_ms     = 60,
404         .target         = ufshcd_devfreq_target,
405         .get_dev_status = ufshcd_devfreq_get_dev_status,
406 };
407
408 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
409 {
410         return tag >= 0 && tag < hba->nutrs;
411 }
412
413 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
414 {
415         if (!hba->is_irq_enabled) {
416                 enable_irq(hba->irq);
417                 hba->is_irq_enabled = true;
418         }
419 }
420
421 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
422 {
423         if (hba->is_irq_enabled) {
424                 disable_irq(hba->irq);
425                 hba->is_irq_enabled = false;
426         }
427 }
428
429 void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
430 {
431         unsigned long flags;
432         bool unblock = false;
433
434         spin_lock_irqsave(hba->host->host_lock, flags);
435         hba->scsi_block_reqs_cnt--;
436         unblock = !hba->scsi_block_reqs_cnt;
437         spin_unlock_irqrestore(hba->host->host_lock, flags);
438         if (unblock)
439                 scsi_unblock_requests(hba->host);
440 }
441 EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
442
443 static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
444 {
445         if (!hba->scsi_block_reqs_cnt++)
446                 scsi_block_requests(hba->host);
447 }
448
449 void ufshcd_scsi_block_requests(struct ufs_hba *hba)
450 {
451         unsigned long flags;
452
453         spin_lock_irqsave(hba->host->host_lock, flags);
454         __ufshcd_scsi_block_requests(hba);
455         spin_unlock_irqrestore(hba->host->host_lock, flags);
456 }
457 EXPORT_SYMBOL(ufshcd_scsi_block_requests);
458
459 static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
460 {
461         int ret = 0;
462
463         if (!hba->pctrl)
464                 return 0;
465
466         /* Assert reset if ctrl == true */
467         if (ctrl)
468                 ret = pinctrl_select_state(hba->pctrl,
469                         pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
470         else
471                 ret = pinctrl_select_state(hba->pctrl,
472                         pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
473
474         if (ret < 0)
475                 dev_err(hba->dev, "%s: %s failed with err %d\n",
476                         __func__, ctrl ? "Assert" : "Deassert", ret);
477
478         return ret;
479 }
480
481 static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
482 {
483         return ufshcd_device_reset_ctrl(hba, true);
484 }
485
486 static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
487 {
488         return ufshcd_device_reset_ctrl(hba, false);
489 }
490
491 static int ufshcd_reset_device(struct ufs_hba *hba)
492 {
493         int ret;
494
495         /* reset the connected UFS device */
496         ret = ufshcd_assert_device_reset(hba);
497         if (ret)
498                 goto out;
499         /*
500          * The reset signal is active low.
501          * The UFS device shall detect more than or equal to 1us of positive
502          * or negative RST_n pulse width.
503          * To be on safe side, keep the reset low for atleast 10us.
504          */
505         usleep_range(10, 15);
506
507         ret = ufshcd_deassert_device_reset(hba);
508         if (ret)
509                 goto out;
510         /* same as assert, wait for atleast 10us after deassert */
511         usleep_range(10, 15);
512 out:
513         return ret;
514 }
515
516 /* replace non-printable or non-ASCII characters with spaces */
517 static inline void ufshcd_remove_non_printable(char *val)
518 {
519         if (!val || !*val)
520                 return;
521
522         if (*val < 0x20 || *val > 0x7e)
523                 *val = ' ';
524 }
525
526 #define UFSHCD_MAX_CMD_LOGGING  200
527
528 #ifdef CONFIG_TRACEPOINTS
529 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
530                         struct ufshcd_cmd_log_entry *entry, u8 opcode)
531 {
532         if (trace_ufshcd_command_enabled()) {
533                 u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
534
535                 trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
536                                      entry->doorbell, entry->transfer_len, intr,
537                                      entry->lba, opcode);
538         }
539 }
540 #else
541 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
542                         struct ufshcd_cmd_log_entry *entry, u8 opcode)
543 {
544 }
545 #endif
546
547 #ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
548 static void ufshcd_cmd_log_init(struct ufs_hba *hba)
549 {
550         /* Allocate log entries */
551         if (!hba->cmd_log.entries) {
552                 hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
553                         sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
554                 if (!hba->cmd_log.entries)
555                         return;
556                 dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
557                                 __func__);
558         }
559 }
560
561 #ifdef CONFIG_TRACEPOINTS
562 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
563                              unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
564                              sector_t lba, int transfer_len, u8 opcode)
565 {
566         struct ufshcd_cmd_log_entry *entry;
567
568         if (!hba->cmd_log.entries)
569                 return;
570
571         entry = &hba->cmd_log.entries[hba->cmd_log.pos];
572         entry->lun = lun;
573         entry->str = str;
574         entry->cmd_type = cmd_type;
575         entry->cmd_id = cmd_id;
576         entry->lba = lba;
577         entry->transfer_len = transfer_len;
578         entry->idn = idn;
579         entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
580         entry->tag = tag;
581         entry->tstamp = ktime_get();
582         entry->outstanding_reqs = hba->outstanding_reqs;
583         entry->seq_num = hba->cmd_log.seq_num;
584         hba->cmd_log.seq_num++;
585         hba->cmd_log.pos =
586                         (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
587
588         ufshcd_add_command_trace(hba, entry, opcode);
589 }
590 #endif
591
592 static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
593         unsigned int tag, u8 cmd_id, u8 idn)
594 {
595         __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
596                          0xff, (sector_t)-1, -1, -1);
597 }
598
599 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
600 {
601         ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
602 }
603
604 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
605 {
606         int i;
607         int pos;
608         struct ufshcd_cmd_log_entry *p;
609
610         if (!hba->cmd_log.entries)
611                 return;
612
613         pos = hba->cmd_log.pos;
614         for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
615                 p = &hba->cmd_log.entries[pos];
616                 pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
617
618                 if (ktime_to_us(p->tstamp)) {
619                         pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
620                                 p->cmd_type, p->str, p->seq_num,
621                                 p->lun, p->cmd_id, (unsigned long long)p->lba,
622                                 p->transfer_len, p->tag, p->doorbell,
623                                 p->outstanding_reqs, p->idn,
624                                 ktime_to_us(p->tstamp));
625                                 usleep_range(1000, 1100);
626                 }
627         }
628 }
629 #else
630 static void ufshcd_cmd_log_init(struct ufs_hba *hba)
631 {
632 }
633
634 #ifdef CONFIG_TRACEPOINTS
635 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
636                              unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
637                              sector_t lba, int transfer_len, u8 opcode)
638 {
639         struct ufshcd_cmd_log_entry entry;
640
641         entry.str = str;
642         entry.lba = lba;
643         entry.cmd_id = cmd_id;
644         entry.transfer_len = transfer_len;
645         entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
646         entry.tag = tag;
647
648         ufshcd_add_command_trace(hba, &entry, opcode);
649 }
650 #endif
651
652 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
653 {
654 }
655
656 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
657 {
658 }
659 #endif
660
661 #ifdef CONFIG_TRACEPOINTS
662 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
663                                         unsigned int tag, const char *str)
664 {
665         struct ufshcd_lrb *lrbp;
666         char *cmd_type = NULL;
667         u8 opcode = 0;
668         u8 cmd_id = 0, idn = 0;
669         sector_t lba = -1;
670         int transfer_len = -1;
671
672         lrbp = &hba->lrb[tag];
673
674         if (lrbp->cmd) { /* data phase exists */
675                 opcode = (u8)(*lrbp->cmd->cmnd);
676                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
677                         /*
678                          * Currently we only fully trace read(10) and write(10)
679                          * commands
680                          */
681                         if (lrbp->cmd->request && lrbp->cmd->request->bio)
682                                 lba =
683                                 lrbp->cmd->request->bio->bi_iter.bi_sector;
684                         transfer_len = be32_to_cpu(
685                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
686                 }
687         }
688
689         if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
690                 cmd_type = "scsi";
691                 cmd_id = (u8)(*lrbp->cmd->cmnd);
692         } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
693                 if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
694                         cmd_type = "nop";
695                         cmd_id = 0;
696                 } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
697                         cmd_type = "query";
698                         cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
699                         idn = hba->dev_cmd.query.request.upiu_req.idn;
700                 }
701         }
702
703         __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
704                          lrbp->lun, lba, transfer_len, opcode);
705 }
706 #else
707 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
708                                         unsigned int tag, const char *str)
709 {
710 }
711 #endif
712
713 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
714 {
715         struct ufs_clk_info *clki;
716         struct list_head *head = &hba->clk_list_head;
717
718         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
719                 return;
720
721         if (!head || list_empty(head))
722                 return;
723
724         list_for_each_entry(clki, head, list) {
725                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
726                                 clki->max_freq)
727                         dev_err(hba->dev, "clk: %s, rate: %u\n",
728                                         clki->name, clki->curr_freq);
729         }
730 }
731
732 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
733                 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
734 {
735         int i;
736
737         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
738                 return;
739
740         for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
741                 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
742
743                 if (err_hist->reg[p] == 0)
744                         continue;
745                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
746                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
747         }
748 }
749
750 static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
751 {
752         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
753                 return;
754
755         /*
756          * hex_dump reads its data without the readl macro. This might
757          * cause inconsistency issues on some platform, as the printed
758          * values may be from cache and not the most recent value.
759          * To know whether you are looking at an un-cached version verify
760          * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
761          * during platform/pci probe function.
762          */
763         ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
764         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
765                 hba->ufs_version, hba->capabilities);
766         dev_err(hba->dev,
767                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
768                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
769         dev_err(hba->dev,
770                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
771                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
772                 hba->ufs_stats.hibern8_exit_cnt);
773
774         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
775         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
776         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
777         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
778         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
779
780         ufshcd_print_clk_freqs(hba);
781
782         ufshcd_vops_dbg_register_dump(hba, no_sleep);
783 }
784
785 static void ufshcd_print_host_regs(struct ufs_hba *hba)
786 {
787         __ufshcd_print_host_regs(hba, false);
788 }
789
790 static
791 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
792 {
793         struct ufshcd_lrb *lrbp;
794         int prdt_length;
795         int tag;
796
797         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
798                 return;
799
800         for_each_set_bit(tag, &bitmap, hba->nutrs) {
801                 lrbp = &hba->lrb[tag];
802
803                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
804                                 tag, ktime_to_us(lrbp->issue_time_stamp));
805                 dev_err(hba->dev,
806                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
807                         tag, (u64)lrbp->utrd_dma_addr);
808                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
809                                 sizeof(struct utp_transfer_req_desc));
810                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
811                         (u64)lrbp->ucd_req_dma_addr);
812                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
813                                 sizeof(struct utp_upiu_req));
814                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
815                         (u64)lrbp->ucd_rsp_dma_addr);
816                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
817                                 sizeof(struct utp_upiu_rsp));
818                 prdt_length =
819                         le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
820                 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
821                         tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
822                 if (pr_prdt)
823                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
824                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
825         }
826 }
827
828 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
829 {
830         struct utp_task_req_desc *tmrdp;
831         int tag;
832
833         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
834                 return;
835
836         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
837                 tmrdp = &hba->utmrdl_base_addr[tag];
838                 dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
839                 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
840                                 sizeof(struct request_desc_header));
841                 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
842                                 tag);
843                 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
844                                 sizeof(struct utp_upiu_req));
845                 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
846                                 tag);
847                 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
848                                 sizeof(struct utp_task_req_desc));
849         }
850 }
851
852 static void ufshcd_print_fsm_state(struct ufs_hba *hba)
853 {
854         int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
855
856         err = ufshcd_dme_get(hba,
857                         UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
858                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
859                         &tx_fsm_val);
860         dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
861                         tx_fsm_val, err);
862         err = ufshcd_dme_get(hba,
863                         UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
864                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
865                         &rx_fsm_val);
866         dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
867                         rx_fsm_val, err);
868 }
869
870 static void ufshcd_print_host_state(struct ufs_hba *hba)
871 {
872         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
873                 return;
874
875         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
876         dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
877                 hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
878         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
879                 hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
880         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
881                 hba->curr_dev_pwr_mode, hba->uic_link_state);
882         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
883                 hba->pm_op_in_progress, hba->is_sys_suspended);
884         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
885                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
886         dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
887                 hba->clk_gating.state, hba->hibern8_on_idle.state);
888         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
889                 hba->eh_flags, hba->req_abort_count);
890         dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
891                 hba->capabilities, hba->caps);
892         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
893                 hba->dev_quirks);
894 }
895
896 /**
897  * ufshcd_print_pwr_info - print power params as saved in hba
898  * power info
899  * @hba: per-adapter instance
900  */
901 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
902 {
903         char *names[] = {
904                 "INVALID MODE",
905                 "FAST MODE",
906                 "SLOW_MODE",
907                 "INVALID MODE",
908                 "FASTAUTO_MODE",
909                 "SLOWAUTO_MODE",
910                 "INVALID MODE",
911         };
912
913         if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
914                 return;
915
916         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
917                  __func__,
918                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
919                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
920                  names[hba->pwr_info.pwr_rx],
921                  names[hba->pwr_info.pwr_tx],
922                  hba->pwr_info.hs_rate);
923 }
924
925 /*
926  * ufshcd_wait_for_register - wait for register value to change
927  * @hba - per-adapter interface
928  * @reg - mmio register offset
929  * @mask - mask to apply to read register value
930  * @val - wait condition
931  * @interval_us - polling interval in microsecs
932  * @timeout_ms - timeout in millisecs
933  * @can_sleep - perform sleep or just spin
934  * Returns -ETIMEDOUT on error, zero on success
935  */
936 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
937                                 u32 val, unsigned long interval_us,
938                                 unsigned long timeout_ms, bool can_sleep)
939 {
940         int err = 0;
941         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
942
943         /* ignore bits that we don't intend to wait on */
944         val = val & mask;
945
946         while ((ufshcd_readl(hba, reg) & mask) != val) {
947                 if (can_sleep)
948                         usleep_range(interval_us, interval_us + 50);
949                 else
950                         udelay(interval_us);
951                 if (time_after(jiffies, timeout)) {
952                         if ((ufshcd_readl(hba, reg) & mask) != val)
953                                 err = -ETIMEDOUT;
954                         break;
955                 }
956         }
957
958         return err;
959 }
960
961 /**
962  * ufshcd_get_intr_mask - Get the interrupt bit mask
963  * @hba - Pointer to adapter instance
964  *
965  * Returns interrupt bit mask per version
966  */
967 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
968 {
969         u32 intr_mask = 0;
970
971         switch (hba->ufs_version) {
972         case UFSHCI_VERSION_10:
973                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
974                 break;
975         /* allow fall through */
976         case UFSHCI_VERSION_11:
977         case UFSHCI_VERSION_20:
978                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
979                 break;
980         /* allow fall through */
981         case UFSHCI_VERSION_21:
982         default:
983                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
984         }
985
986         if (!ufshcd_is_crypto_supported(hba))
987                 intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
988
989         return intr_mask;
990 }
991
992 /**
993  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
994  * @hba - Pointer to adapter instance
995  *
996  * Returns UFSHCI version supported by the controller
997  */
998 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
999 {
1000         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
1001                 return ufshcd_vops_get_ufs_hci_version(hba);
1002
1003         return ufshcd_readl(hba, REG_UFS_VERSION);
1004 }
1005
1006 /**
1007  * ufshcd_is_device_present - Check if any device connected to
1008  *                            the host controller
1009  * @hba: pointer to adapter instance
1010  *
1011  * Returns 1 if device present, 0 if no device detected
1012  */
1013 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
1014 {
1015         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
1016                                                 DEVICE_PRESENT) ? 1 : 0;
1017 }
1018
1019 /**
1020  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
1021  * @lrb: pointer to local command reference block
1022  *
1023  * This function is used to get the OCS field from UTRD
1024  * Returns the OCS field in the UTRD
1025  */
1026 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
1027 {
1028         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
1029 }
1030
1031 /**
1032  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
1033  * @task_req_descp: pointer to utp_task_req_desc structure
1034  *
1035  * This function is used to get the OCS field from UTMRD
1036  * Returns the OCS field in the UTMRD
1037  */
1038 static inline int
1039 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
1040 {
1041         return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
1042 }
1043
1044 /**
1045  * ufshcd_get_tm_free_slot - get a free slot for task management request
1046  * @hba: per adapter instance
1047  * @free_slot: pointer to variable with available slot value
1048  *
1049  * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
1050  * Returns 0 if free slot is not available, else return 1 with tag value
1051  * in @free_slot.
1052  */
1053 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
1054 {
1055         int tag;
1056         bool ret = false;
1057
1058         if (!free_slot)
1059                 goto out;
1060
1061         do {
1062                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
1063                 if (tag >= hba->nutmrs)
1064                         goto out;
1065         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
1066
1067         *free_slot = tag;
1068         ret = true;
1069 out:
1070         return ret;
1071 }
1072
1073 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
1074 {
1075         clear_bit_unlock(slot, &hba->tm_slots_in_use);
1076 }
1077
1078 /**
1079  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
1080  * @hba: per adapter instance
1081  * @pos: position of the bit to be cleared
1082  */
1083 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
1084 {
1085         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1086 }
1087
1088 /**
1089  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
1090  * @hba: per adapter instance
1091  * @tag: position of the bit to be cleared
1092  */
1093 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
1094 {
1095         __clear_bit(tag, &hba->outstanding_reqs);
1096 }
1097
1098 /**
1099  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
1100  * @reg: Register value of host controller status
1101  *
1102  * Returns integer, 0 on Success and positive value if failed
1103  */
1104 static inline int ufshcd_get_lists_status(u32 reg)
1105 {
1106         /*
1107          * The mask 0xFF is for the following HCS register bits
1108          * Bit          Description
1109          *  0           Device Present
1110          *  1           UTRLRDY
1111          *  2           UTMRLRDY
1112          *  3           UCRDY
1113          * 4-7          reserved
1114          */
1115         return ((reg & 0xFF) >> 1) ^ 0x07;
1116 }
1117
1118 /**
1119  * ufshcd_get_uic_cmd_result - Get the UIC command result
1120  * @hba: Pointer to adapter instance
1121  *
1122  * This function gets the result of UIC command completion
1123  * Returns 0 on success, non zero value on error
1124  */
1125 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
1126 {
1127         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
1128                MASK_UIC_COMMAND_RESULT;
1129 }
1130
1131 /**
1132  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
1133  * @hba: Pointer to adapter instance
1134  *
1135  * This function gets UIC command argument3
1136  * Returns 0 on success, non zero value on error
1137  */
1138 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
1139 {
1140         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
1141 }
1142
1143 /**
1144  * ufshcd_get_req_rsp - returns the TR response transaction type
1145  * @ucd_rsp_ptr: pointer to response UPIU
1146  */
1147 static inline int
1148 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
1149 {
1150         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
1151 }
1152
1153 /**
1154  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
1155  * @ucd_rsp_ptr: pointer to response UPIU
1156  *
1157  * This function gets the response status and scsi_status from response UPIU
1158  * Returns the response result code.
1159  */
1160 static inline int
1161 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
1162 {
1163         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
1164 }
1165
1166 /*
1167  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
1168  *                              from response UPIU
1169  * @ucd_rsp_ptr: pointer to response UPIU
1170  *
1171  * Return the data segment length.
1172  */
1173 static inline unsigned int
1174 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
1175 {
1176         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1177                 MASK_RSP_UPIU_DATA_SEG_LEN;
1178 }
1179
1180 /**
1181  * ufshcd_is_exception_event - Check if the device raised an exception event
1182  * @ucd_rsp_ptr: pointer to response UPIU
1183  *
1184  * The function checks if the device raised an exception event indicated in
1185  * the Device Information field of response UPIU.
1186  *
1187  * Returns true if exception is raised, false otherwise.
1188  */
1189 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
1190 {
1191         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1192                         MASK_RSP_EXCEPTION_EVENT ? true : false;
1193 }
1194
1195 /**
1196  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
1197  * @hba: per adapter instance
1198  */
1199 static inline void
1200 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
1201 {
1202         ufshcd_writel(hba, INT_AGGR_ENABLE |
1203                       INT_AGGR_COUNTER_AND_TIMER_RESET,
1204                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1205 }
1206
1207 /**
1208  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
1209  * @hba: per adapter instance
1210  * @cnt: Interrupt aggregation counter threshold
1211  * @tmout: Interrupt aggregation timeout value
1212  */
1213 static inline void
1214 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
1215 {
1216         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
1217                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
1218                       INT_AGGR_TIMEOUT_VAL(tmout),
1219                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1220 }
1221
1222 /**
1223  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
1224  * @hba: per adapter instance
1225  */
1226 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
1227 {
1228         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1229 }
1230
1231 /**
1232  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
1233  *                      When run-stop registers are set to 1, it indicates the
1234  *                      host controller that it can process the requests
1235  * @hba: per adapter instance
1236  */
1237 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
1238 {
1239         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
1240                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
1241         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
1242                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
1243 }
1244
1245 /**
1246  * ufshcd_hba_start - Start controller initialization sequence
1247  * @hba: per adapter instance
1248  */
1249 static inline void ufshcd_hba_start(struct ufs_hba *hba)
1250 {
1251         u32 val = CONTROLLER_ENABLE;
1252
1253         if (ufshcd_is_crypto_supported(hba))
1254                 val |= CRYPTO_GENERAL_ENABLE;
1255         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
1256 }
1257
1258 /**
1259  * ufshcd_is_hba_active - Get controller state
1260  * @hba: per adapter instance
1261  *
1262  * Returns zero if controller is active, 1 otherwise
1263  */
1264 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
1265 {
1266         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
1267 }
1268
1269 static const char *ufschd_uic_link_state_to_string(
1270                         enum uic_link_state state)
1271 {
1272         switch (state) {
1273         case UIC_LINK_OFF_STATE:        return "OFF";
1274         case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
1275         case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
1276         default:                        return "UNKNOWN";
1277         }
1278 }
1279
1280 static const char *ufschd_ufs_dev_pwr_mode_to_string(
1281                         enum ufs_dev_pwr_mode state)
1282 {
1283         switch (state) {
1284         case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
1285         case UFS_SLEEP_PWR_MODE:        return "SLEEP";
1286         case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
1287         default:                        return "UNKNOWN";
1288         }
1289 }
1290
1291 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
1292 {
1293         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
1294         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1295             (hba->ufs_version == UFSHCI_VERSION_11))
1296                 return UFS_UNIPRO_VER_1_41;
1297         else
1298                 return UFS_UNIPRO_VER_1_6;
1299 }
1300 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1301
1302 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1303 {
1304         /*
1305          * If both host and device support UniPro ver1.6 or later, PA layer
1306          * parameters tuning happens during link startup itself.
1307          *
1308          * We can manually tune PA layer parameters if either host or device
1309          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1310          * logic simple, we will only do manual tuning if local unipro version
1311          * doesn't support ver1.6 or later.
1312          */
1313         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
1314                 return true;
1315         else
1316                 return false;
1317 }
1318
1319 /**
1320  * ufshcd_set_clk_freq - set UFS controller clock frequencies
1321  * @hba: per adapter instance
1322  * @scale_up: If True, set max possible frequency othewise set low frequency
1323  *
1324  * Returns 0 if successful
1325  * Returns < 0 for any other errors
1326  */
1327 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1328 {
1329         int ret = 0;
1330         struct ufs_clk_info *clki;
1331         struct list_head *head = &hba->clk_list_head;
1332
1333         if (!head || list_empty(head))
1334                 goto out;
1335
1336         list_for_each_entry(clki, head, list) {
1337                 if (!IS_ERR_OR_NULL(clki->clk)) {
1338                         if (scale_up && clki->max_freq) {
1339                                 if (clki->curr_freq == clki->max_freq)
1340                                         continue;
1341
1342                                 ret = clk_set_rate(clki->clk, clki->max_freq);
1343                                 if (ret) {
1344                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1345                                                 __func__, clki->name,
1346                                                 clki->max_freq, ret);
1347                                         break;
1348                                 }
1349                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1350                                                 "scaled up", clki->name,
1351                                                 clki->curr_freq,
1352                                                 clki->max_freq);
1353                                 clki->curr_freq = clki->max_freq;
1354
1355                         } else if (!scale_up && clki->min_freq) {
1356                                 if (clki->curr_freq == clki->min_freq)
1357                                         continue;
1358
1359                                 ret = clk_set_rate(clki->clk, clki->min_freq);
1360                                 if (ret) {
1361                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1362                                                 __func__, clki->name,
1363                                                 clki->min_freq, ret);
1364                                         break;
1365                                 }
1366                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1367                                                 "scaled down", clki->name,
1368                                                 clki->curr_freq,
1369                                                 clki->min_freq);
1370                                 clki->curr_freq = clki->min_freq;
1371                         }
1372                 }
1373                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1374                                 clki->name, clk_get_rate(clki->clk));
1375         }
1376
1377 out:
1378         return ret;
1379 }
1380
1381 /**
1382  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1383  * @hba: per adapter instance
1384  * @scale_up: True if scaling up and false if scaling down
1385  *
1386  * Returns 0 if successful
1387  * Returns < 0 for any other errors
1388  */
1389 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1390 {
1391         int ret = 0;
1392
1393         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1394         if (ret)
1395                 return ret;
1396
1397         ret = ufshcd_set_clk_freq(hba, scale_up);
1398         if (ret)
1399                 return ret;
1400
1401         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1402         if (ret) {
1403                 ufshcd_set_clk_freq(hba, !scale_up);
1404                 return ret;
1405         }
1406
1407         return ret;
1408 }
1409
1410 static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
1411 {
1412         hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
1413         cancel_work_sync(&hba->clk_gating.gate_work);
1414 }
1415
1416 static void ufshcd_ungate_work(struct work_struct *work)
1417 {
1418         int ret;
1419         unsigned long flags;
1420         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1421                         clk_gating.ungate_work);
1422
1423         ufshcd_cancel_gate_work(hba);
1424
1425         spin_lock_irqsave(hba->host->host_lock, flags);
1426         if (hba->clk_gating.state == CLKS_ON) {
1427                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1428                 goto unblock_reqs;
1429         }
1430
1431         spin_unlock_irqrestore(hba->host->host_lock, flags);
1432         ufshcd_hba_vreg_set_hpm(hba);
1433         ufshcd_enable_clocks(hba);
1434
1435         /* Exit from hibern8 */
1436         if (ufshcd_can_hibern8_during_gating(hba)) {
1437                 /* Prevent gating in this path */
1438                 hba->clk_gating.is_suspended = true;
1439                 if (ufshcd_is_link_hibern8(hba)) {
1440                         ret = ufshcd_uic_hibern8_exit(hba);
1441                         if (ret)
1442                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1443                                         __func__, ret);
1444                         else
1445                                 ufshcd_set_link_active(hba);
1446                 }
1447                 hba->clk_gating.is_suspended = false;
1448         }
1449 unblock_reqs:
1450         ufshcd_scsi_unblock_requests(hba);
1451 }
1452
1453 /**
1454  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1455  * Also, exit from hibern8 mode and set the link as active.
1456  * @hba: per adapter instance
1457  * @async: This indicates whether caller should ungate clocks asynchronously.
1458  */
1459 int ufshcd_hold(struct ufs_hba *hba, bool async)
1460 {
1461         int rc = 0;
1462         unsigned long flags;
1463
1464         if (!ufshcd_is_clkgating_allowed(hba))
1465                 goto out;
1466         spin_lock_irqsave(hba->host->host_lock, flags);
1467         hba->clk_gating.active_reqs++;
1468
1469         if (ufshcd_eh_in_progress(hba)) {
1470                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1471                 return 0;
1472         }
1473
1474 start:
1475         switch (hba->clk_gating.state) {
1476         case CLKS_ON:
1477                 /*
1478                  * Wait for the ungate work to complete if in progress.
1479                  * Though the clocks may be in ON state, the link could
1480                  * still be in hibner8 state if hibern8 is allowed
1481                  * during clock gating.
1482                  * Make sure we exit hibern8 state also in addition to
1483                  * clocks being ON.
1484                  */
1485                 if (ufshcd_can_hibern8_during_gating(hba) &&
1486                     ufshcd_is_link_hibern8(hba)) {
1487                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1488                         flush_work(&hba->clk_gating.ungate_work);
1489                         spin_lock_irqsave(hba->host->host_lock, flags);
1490                         goto start;
1491                 }
1492                 break;
1493         case REQ_CLKS_OFF:
1494                 /*
1495                  * If the timer was active but the callback was not running
1496                  * we have nothing to do, just change state and return.
1497                  */
1498                 if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
1499                         hba->clk_gating.state = CLKS_ON;
1500                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1501                                 hba->clk_gating.state);
1502                         break;
1503                 }
1504                 /*
1505                  * If we are here, it means gating work is either done or
1506                  * currently running. Hence, fall through to cancel gating
1507                  * work and to enable clocks.
1508                  */
1509         case CLKS_OFF:
1510                 __ufshcd_scsi_block_requests(hba);
1511                 hba->clk_gating.state = REQ_CLKS_ON;
1512                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1513                         hba->clk_gating.state);
1514                 queue_work(hba->clk_gating.clk_gating_workq,
1515                                 &hba->clk_gating.ungate_work);
1516                 /*
1517                  * fall through to check if we should wait for this
1518                  * work to be done or not.
1519                  */
1520         case REQ_CLKS_ON:
1521                 if (async) {
1522                         rc = -EAGAIN;
1523                         hba->clk_gating.active_reqs--;
1524                         break;
1525                 }
1526
1527                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1528                 flush_work(&hba->clk_gating.ungate_work);
1529                 /* Make sure state is CLKS_ON before returning */
1530                 spin_lock_irqsave(hba->host->host_lock, flags);
1531                 goto start;
1532         default:
1533                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1534                                 __func__, hba->clk_gating.state);
1535                 break;
1536         }
1537         spin_unlock_irqrestore(hba->host->host_lock, flags);
1538 out:
1539         hba->ufs_stats.clk_hold.ts = ktime_get();
1540         return rc;
1541 }
1542 EXPORT_SYMBOL_GPL(ufshcd_hold);
1543
1544 static void ufshcd_gate_work(struct work_struct *work)
1545 {
1546         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1547                                                 clk_gating.gate_work);
1548         unsigned long flags;
1549
1550         spin_lock_irqsave(hba->host->host_lock, flags);
1551         /*
1552          * In case you are here to cancel this work the gating state
1553          * would be marked as REQ_CLKS_ON. In this case save time by
1554          * skipping the gating work and exit after changing the clock
1555          * state to CLKS_ON.
1556          */
1557         if (hba->clk_gating.is_suspended ||
1558                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1559                 hba->clk_gating.state = CLKS_ON;
1560                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1561                         hba->clk_gating.state);
1562                 goto rel_lock;
1563         }
1564
1565         if (hba->clk_gating.active_reqs
1566                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1567                 || hba->lrb_in_use || hba->outstanding_tasks
1568                 || hba->active_uic_cmd || hba->uic_async_done)
1569                 goto rel_lock;
1570
1571         spin_unlock_irqrestore(hba->host->host_lock, flags);
1572
1573         if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
1574             hba->hibern8_on_idle.is_enabled)
1575                 /*
1576                  * Hibern8 enter work (on Idle) needs clocks to be ON hence
1577                  * make sure that it is flushed before turning off the clocks.
1578                  */
1579                 flush_delayed_work(&hba->hibern8_on_idle.enter_work);
1580
1581         /* put the link into hibern8 mode before turning off clocks */
1582         if (ufshcd_can_hibern8_during_gating(hba)) {
1583                 if (ufshcd_uic_hibern8_enter(hba)) {
1584                         hba->clk_gating.state = CLKS_ON;
1585                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1586                                 hba->clk_gating.state);
1587                         goto out;
1588                 }
1589                 ufshcd_set_link_hibern8(hba);
1590         }
1591
1592         /*
1593          * If auto hibern8 is supported then the link will already
1594          * be in hibern8 state and the ref clock can be gated.
1595          */
1596         if ((ufshcd_is_auto_hibern8_supported(hba) ||
1597              !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
1598                 ufshcd_disable_clocks(hba, true);
1599         else
1600                 /* If link is active, device ref_clk can't be switched off */
1601                 ufshcd_disable_clocks_skip_ref_clk(hba, true);
1602
1603         /* Put the host controller in low power mode if possible */
1604         ufshcd_hba_vreg_set_lpm(hba);
1605
1606         /*
1607          * In case you are here to cancel this work the gating state
1608          * would be marked as REQ_CLKS_ON. In this case keep the state
1609          * as REQ_CLKS_ON which would anyway imply that clocks are off
1610          * and a request to turn them on is pending. By doing this way,
1611          * we keep the state machine in tact and this would ultimately
1612          * prevent from doing cancel work multiple times when there are
1613          * new requests arriving before the current cancel work is done.
1614          */
1615         spin_lock_irqsave(hba->host->host_lock, flags);
1616         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1617                 hba->clk_gating.state = CLKS_OFF;
1618                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1619                         hba->clk_gating.state);
1620         }
1621 rel_lock:
1622         spin_unlock_irqrestore(hba->host->host_lock, flags);
1623 out:
1624         return;
1625 }
1626
1627 /* host lock must be held before calling this variant */
1628 static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
1629 {
1630         if (!ufshcd_is_clkgating_allowed(hba))
1631                 return;
1632
1633         hba->clk_gating.active_reqs--;
1634
1635         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1636                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1637                 || hba->lrb_in_use || hba->outstanding_tasks
1638                 || hba->active_uic_cmd || hba->uic_async_done
1639                 || ufshcd_eh_in_progress(hba) || no_sched)
1640                 return;
1641
1642         hba->clk_gating.state = REQ_CLKS_OFF;
1643         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1644         hba->ufs_stats.clk_rel.ts = ktime_get();
1645
1646         hrtimer_start(&hba->clk_gating.gate_hrtimer,
1647                         ms_to_ktime(hba->clk_gating.delay_ms),
1648                         HRTIMER_MODE_REL);
1649 }
1650
1651 void ufshcd_release(struct ufs_hba *hba, bool no_sched)
1652 {
1653         unsigned long flags;
1654
1655         spin_lock_irqsave(hba->host->host_lock, flags);
1656         __ufshcd_release(hba, no_sched);
1657         spin_unlock_irqrestore(hba->host->host_lock, flags);
1658 }
1659 EXPORT_SYMBOL_GPL(ufshcd_release);
1660
1661 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1662                 struct device_attribute *attr, char *buf)
1663 {
1664         struct ufs_hba *hba = dev_get_drvdata(dev);
1665
1666         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1667 }
1668
1669 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1670                 struct device_attribute *attr, const char *buf, size_t count)
1671 {
1672         struct ufs_hba *hba = dev_get_drvdata(dev);
1673         unsigned long flags, value;
1674
1675         if (kstrtoul(buf, 0, &value))
1676                 return -EINVAL;
1677
1678         spin_lock_irqsave(hba->host->host_lock, flags);
1679         hba->clk_gating.delay_ms = value;
1680         spin_unlock_irqrestore(hba->host->host_lock, flags);
1681         return count;
1682 }
1683
1684 static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
1685                 struct device_attribute *attr, char *buf)
1686 {
1687         struct ufs_hba *hba = dev_get_drvdata(dev);
1688
1689         return snprintf(buf, PAGE_SIZE, "%lu\n",
1690                         hba->clk_gating.delay_ms_pwr_save);
1691 }
1692
1693 static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
1694                 struct device_attribute *attr, const char *buf, size_t count)
1695 {
1696         struct ufs_hba *hba = dev_get_drvdata(dev);
1697         unsigned long flags, value;
1698
1699         if (kstrtoul(buf, 0, &value))
1700                 return -EINVAL;
1701
1702         spin_lock_irqsave(hba->host->host_lock, flags);
1703
1704         hba->clk_gating.delay_ms_pwr_save = value;
1705         if (ufshcd_is_clkscaling_supported(hba) &&
1706             !hba->clk_scaling.is_scaled_up)
1707                 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
1708
1709         spin_unlock_irqrestore(hba->host->host_lock, flags);
1710         return count;
1711 }
1712
1713 static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
1714                 struct device_attribute *attr, char *buf)
1715 {
1716         struct ufs_hba *hba = dev_get_drvdata(dev);
1717
1718         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
1719 }
1720
1721 static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
1722                 struct device_attribute *attr, const char *buf, size_t count)
1723 {
1724         struct ufs_hba *hba = dev_get_drvdata(dev);
1725         unsigned long flags, value;
1726
1727         if (kstrtoul(buf, 0, &value))
1728                 return -EINVAL;
1729
1730         spin_lock_irqsave(hba->host->host_lock, flags);
1731
1732         hba->clk_gating.delay_ms_perf = value;
1733         if (ufshcd_is_clkscaling_supported(hba) &&
1734             hba->clk_scaling.is_scaled_up)
1735                 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
1736
1737         spin_unlock_irqrestore(hba->host->host_lock, flags);
1738         return count;
1739 }
1740
1741 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1742                 struct device_attribute *attr, char *buf)
1743 {
1744         struct ufs_hba *hba = dev_get_drvdata(dev);
1745
1746         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1747 }
1748
1749 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1750                 struct device_attribute *attr, const char *buf, size_t count)
1751 {
1752         struct ufs_hba *hba = dev_get_drvdata(dev);
1753         unsigned long flags;
1754         u32 value;
1755
1756         if (kstrtou32(buf, 0, &value))
1757                 return -EINVAL;
1758
1759         value = !!value;
1760         if (value == hba->clk_gating.is_enabled)
1761                 goto out;
1762
1763         if (value) {
1764                 ufshcd_release(hba, false);
1765         } else {
1766                 spin_lock_irqsave(hba->host->host_lock, flags);
1767                 hba->clk_gating.active_reqs++;
1768                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1769         }
1770
1771         hba->clk_gating.is_enabled = value;
1772 out:
1773         return count;
1774 }
1775
1776 static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
1777                                         struct hrtimer *timer)
1778 {
1779         struct ufs_hba *hba = container_of(timer, struct ufs_hba,
1780                                            clk_gating.gate_hrtimer);
1781
1782         queue_work(hba->clk_gating.clk_gating_workq,
1783                                 &hba->clk_gating.gate_work);
1784
1785         return HRTIMER_NORESTART;
1786 }
1787
1788 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1789 {
1790         struct ufs_clk_gating *gating = &hba->clk_gating;
1791         char wq_name[sizeof("ufs_clk_gating_00")];
1792
1793         hba->clk_gating.state = CLKS_ON;
1794
1795         if (!ufshcd_is_clkgating_allowed(hba))
1796                 return;
1797
1798         /*
1799          * Disable hibern8 during clk gating if
1800          * auto hibern8 is supported
1801          */
1802         if (ufshcd_is_auto_hibern8_supported(hba))
1803                 hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1804
1805         INIT_WORK(&gating->gate_work, ufshcd_gate_work);
1806         INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
1807         /*
1808          * Clock gating work must be executed only after auto hibern8
1809          * timeout has expired in the hardware or after aggressive
1810          * hibern8 on idle software timeout. Using jiffy based low
1811          * resolution delayed work is not reliable to guarantee this,
1812          * hence use a high resolution timer to make sure we schedule
1813          * the gate work precisely more than hibern8 timeout.
1814          *
1815          * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
1816          */
1817         hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1818         gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
1819
1820         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1821                         hba->host->host_no);
1822         hba->clk_gating.clk_gating_workq =
1823                 create_singlethread_workqueue(wq_name);
1824
1825         gating->is_enabled = true;
1826
1827         gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
1828         gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
1829
1830         /* start with performance mode */
1831         gating->delay_ms = gating->delay_ms_perf;
1832
1833         if (!ufshcd_is_clkscaling_supported(hba))
1834                 goto scaling_not_supported;
1835
1836         gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
1837         gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
1838         sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
1839         gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
1840         gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
1841         if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
1842                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
1843
1844         gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
1845         gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
1846         sysfs_attr_init(&gating->delay_perf_attr.attr);
1847         gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
1848         gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
1849         if (device_create_file(hba->dev, &gating->delay_perf_attr))
1850                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
1851
1852         goto add_clkgate_enable;
1853
1854 scaling_not_supported:
1855         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1856         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1857         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1858         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1859         hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
1860         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1861                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1862
1863 add_clkgate_enable:
1864         gating->enable_attr.show = ufshcd_clkgate_enable_show;
1865         gating->enable_attr.store = ufshcd_clkgate_enable_store;
1866         sysfs_attr_init(&gating->enable_attr.attr);
1867         gating->enable_attr.attr.name = "clkgate_enable";
1868         gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
1869         if (device_create_file(hba->dev, &gating->enable_attr))
1870                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1871 }
1872
1873 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1874 {
1875         if (!ufshcd_is_clkgating_allowed(hba))
1876                 return;
1877         if (ufshcd_is_clkscaling_supported(hba)) {
1878                 device_remove_file(hba->dev,
1879                                    &hba->clk_gating.delay_pwr_save_attr);
1880                 device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
1881         } else {
1882                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1883         }
1884         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1885         ufshcd_cancel_gate_work(hba);
1886         cancel_work_sync(&hba->clk_gating.ungate_work);
1887         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1888 }
1889
1890 static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
1891 {
1892         ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
1893                          AUTO_HIBERN8_IDLE_TIMER_MASK,
1894                         AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
1895                         REG_AUTO_HIBERN8_IDLE_TIMER);
1896         /* Make sure the timer gets applied before further operations */
1897         mb();
1898 }
1899
1900 /**
1901  * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
1902  *
1903  * @hba: per adapter instance
1904  * @async: This indicates whether caller wants to exit hibern8 asynchronously.
1905  *
1906  * Exit from hibern8 mode and set the link as active.
1907  *
1908  * Return 0 on success, non-zero on failure.
1909  */
1910 static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
1911 {
1912         int rc = 0;
1913         unsigned long flags;
1914
1915         if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1916                 goto out;
1917
1918         spin_lock_irqsave(hba->host->host_lock, flags);
1919         hba->hibern8_on_idle.active_reqs++;
1920
1921         if (ufshcd_eh_in_progress(hba)) {
1922                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1923                 return 0;
1924         }
1925
1926 start:
1927         switch (hba->hibern8_on_idle.state) {
1928         case HIBERN8_EXITED:
1929                 break;
1930         case REQ_HIBERN8_ENTER:
1931                 if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
1932                         hba->hibern8_on_idle.state = HIBERN8_EXITED;
1933                         trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1934                                 hba->hibern8_on_idle.state);
1935                         break;
1936                 }
1937                 /*
1938                  * If we here, it means Hibern8 enter work is either done or
1939                  * currently running. Hence, fall through to cancel hibern8
1940                  * work and exit hibern8.
1941                  */
1942         case HIBERN8_ENTERED:
1943                 __ufshcd_scsi_block_requests(hba);
1944                 hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
1945                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1946                         hba->hibern8_on_idle.state);
1947                 schedule_work(&hba->hibern8_on_idle.exit_work);
1948                 /*
1949                  * fall through to check if we should wait for this
1950                  * work to be done or not.
1951                  */
1952         case REQ_HIBERN8_EXIT:
1953                 if (async) {
1954                         rc = -EAGAIN;
1955                         hba->hibern8_on_idle.active_reqs--;
1956                         break;
1957                 } else {
1958                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1959                         flush_work(&hba->hibern8_on_idle.exit_work);
1960                         /* Make sure state is HIBERN8_EXITED before returning */
1961                         spin_lock_irqsave(hba->host->host_lock, flags);
1962                         goto start;
1963                 }
1964         default:
1965                 dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
1966                                 __func__, hba->hibern8_on_idle.state);
1967                 break;
1968         }
1969         spin_unlock_irqrestore(hba->host->host_lock, flags);
1970 out:
1971         return rc;
1972 }
1973
1974 /* host lock must be held before calling this variant */
1975 static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
1976 {
1977         unsigned long delay_in_jiffies;
1978
1979         if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1980                 return;
1981
1982         hba->hibern8_on_idle.active_reqs--;
1983         BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
1984
1985         if (hba->hibern8_on_idle.active_reqs
1986                 || hba->hibern8_on_idle.is_suspended
1987                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1988                 || hba->lrb_in_use || hba->outstanding_tasks
1989                 || hba->active_uic_cmd || hba->uic_async_done
1990                 || ufshcd_eh_in_progress(hba) || no_sched)
1991                 return;
1992
1993         hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
1994         trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1995                 hba->hibern8_on_idle.state);
1996         /*
1997          * Scheduling the delayed work after 1 jiffies will make the work to
1998          * get schedule any time from 0ms to 1000/HZ ms which is not desirable
1999          * for hibern8 enter work as it may impact the performance if it gets
2000          * scheduled almost immediately. Hence make sure that hibern8 enter
2001          * work gets scheduled atleast after 2 jiffies (any time between
2002          * 1000/HZ ms to 2000/HZ ms).
2003          */
2004         delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
2005         if (delay_in_jiffies == 1)
2006                 delay_in_jiffies++;
2007
2008         schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
2009                               delay_in_jiffies);
2010 }
2011
2012 static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
2013 {
2014         unsigned long flags;
2015
2016         spin_lock_irqsave(hba->host->host_lock, flags);
2017         __ufshcd_hibern8_release(hba, no_sched);
2018         spin_unlock_irqrestore(hba->host->host_lock, flags);
2019 }
2020
2021 static void ufshcd_hibern8_enter_work(struct work_struct *work)
2022 {
2023         struct ufs_hba *hba = container_of(work, struct ufs_hba,
2024                                            hibern8_on_idle.enter_work.work);
2025         unsigned long flags;
2026
2027         spin_lock_irqsave(hba->host->host_lock, flags);
2028         if (hba->hibern8_on_idle.is_suspended) {
2029                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2030                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2031                         hba->hibern8_on_idle.state);
2032                 goto rel_lock;
2033         }
2034
2035         if (hba->hibern8_on_idle.active_reqs
2036                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
2037                 || hba->lrb_in_use || hba->outstanding_tasks
2038                 || hba->active_uic_cmd || hba->uic_async_done)
2039                 goto rel_lock;
2040
2041         spin_unlock_irqrestore(hba->host->host_lock, flags);
2042
2043         if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
2044                 /* Enter failed */
2045                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2046                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2047                         hba->hibern8_on_idle.state);
2048                 goto out;
2049         }
2050         ufshcd_set_link_hibern8(hba);
2051
2052         /*
2053          * In case you are here to cancel this work the hibern8_on_idle.state
2054          * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
2055          * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
2056          * and a request to exit from it is pending. By doing this way,
2057          * we keep the state machine in tact and this would ultimately
2058          * prevent from doing cancel work multiple times when there are
2059          * new requests arriving before the current cancel work is done.
2060          */
2061         spin_lock_irqsave(hba->host->host_lock, flags);
2062         if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
2063                 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
2064                 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2065                         hba->hibern8_on_idle.state);
2066         }
2067 rel_lock:
2068         spin_unlock_irqrestore(hba->host->host_lock, flags);
2069 out:
2070         return;
2071 }
2072
2073 static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
2074                                             unsigned long delay_ms)
2075 {
2076         pm_runtime_get_sync(hba->dev);
2077         ufshcd_hold_all(hba);
2078         ufshcd_scsi_block_requests(hba);
2079         down_write(&hba->lock);
2080         /* wait for all the outstanding requests to finish */
2081         ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
2082         ufshcd_set_auto_hibern8_timer(hba, delay_ms);
2083         up_write(&hba->lock);
2084         ufshcd_scsi_unblock_requests(hba);
2085         ufshcd_release_all(hba);
2086         pm_runtime_put_sync(hba->dev);
2087 }
2088
2089 static void ufshcd_hibern8_exit_work(struct work_struct *work)
2090 {
2091         int ret;
2092         unsigned long flags;
2093         struct ufs_hba *hba = container_of(work, struct ufs_hba,
2094                                            hibern8_on_idle.exit_work);
2095
2096         cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
2097
2098         spin_lock_irqsave(hba->host->host_lock, flags);
2099         if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
2100              || ufshcd_is_link_active(hba)) {
2101                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2102                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2103                 goto unblock_reqs;
2104         }
2105         spin_unlock_irqrestore(hba->host->host_lock, flags);
2106
2107         /* Exit from hibern8 */
2108         if (ufshcd_is_link_hibern8(hba)) {
2109                 hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
2110                 ufshcd_hold(hba, false);
2111                 ret = ufshcd_uic_hibern8_exit(hba);
2112                 hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
2113                 ufshcd_release(hba, false);
2114                 if (!ret) {
2115                         spin_lock_irqsave(hba->host->host_lock, flags);
2116                         ufshcd_set_link_active(hba);
2117                         hba->hibern8_on_idle.state = HIBERN8_EXITED;
2118                         trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2119                                 hba->hibern8_on_idle.state);
2120                         spin_unlock_irqrestore(hba->host->host_lock, flags);
2121                 }
2122         }
2123 unblock_reqs:
2124         ufshcd_scsi_unblock_requests(hba);
2125 }
2126
2127 static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
2128                 struct device_attribute *attr, char *buf)
2129 {
2130         struct ufs_hba *hba = dev_get_drvdata(dev);
2131
2132         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
2133 }
2134
2135 static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
2136                 struct device_attribute *attr, const char *buf, size_t count)
2137 {
2138         struct ufs_hba *hba = dev_get_drvdata(dev);
2139         unsigned long flags, value;
2140         bool change = true;
2141
2142         if (kstrtoul(buf, 0, &value))
2143                 return -EINVAL;
2144
2145         spin_lock_irqsave(hba->host->host_lock, flags);
2146         if (hba->hibern8_on_idle.delay_ms == value)
2147                 change = false;
2148
2149         if (value >= hba->clk_gating.delay_ms_pwr_save ||
2150             value >= hba->clk_gating.delay_ms_perf) {
2151                 dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
2152                         value, hba->clk_gating.delay_ms_pwr_save,
2153                         hba->clk_gating.delay_ms_perf);
2154                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2155                 return -EINVAL;
2156         }
2157
2158         hba->hibern8_on_idle.delay_ms = value;
2159         spin_unlock_irqrestore(hba->host->host_lock, flags);
2160
2161         /* Update auto hibern8 timer value if supported */
2162         if (change && ufshcd_is_auto_hibern8_supported(hba) &&
2163             hba->hibern8_on_idle.is_enabled)
2164                 __ufshcd_set_auto_hibern8_timer(hba,
2165                                                 hba->hibern8_on_idle.delay_ms);
2166
2167         return count;
2168 }
2169
2170 static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
2171                 struct device_attribute *attr, char *buf)
2172 {
2173         struct ufs_hba *hba = dev_get_drvdata(dev);
2174
2175         return snprintf(buf, PAGE_SIZE, "%d\n",
2176                         hba->hibern8_on_idle.is_enabled);
2177 }
2178
2179 static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
2180                 struct device_attribute *attr, const char *buf, size_t count)
2181 {
2182         struct ufs_hba *hba = dev_get_drvdata(dev);
2183         unsigned long flags;
2184         u32 value;
2185
2186         if (kstrtou32(buf, 0, &value))
2187                 return -EINVAL;
2188
2189         value = !!value;
2190         if (value == hba->hibern8_on_idle.is_enabled)
2191                 goto out;
2192
2193         /* Update auto hibern8 timer value if supported */
2194         if (ufshcd_is_auto_hibern8_supported(hba)) {
2195                 __ufshcd_set_auto_hibern8_timer(hba,
2196                         value ? hba->hibern8_on_idle.delay_ms : value);
2197                 goto update;
2198         }
2199
2200         if (value) {
2201                 /*
2202                  * As clock gating work would wait for the hibern8 enter work
2203                  * to finish, clocks would remain on during hibern8 enter work.
2204                  */
2205                 ufshcd_hold(hba, false);
2206                 ufshcd_release_all(hba);
2207         } else {
2208                 spin_lock_irqsave(hba->host->host_lock, flags);
2209                 hba->hibern8_on_idle.active_reqs++;
2210                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2211         }
2212
2213 update:
2214         hba->hibern8_on_idle.is_enabled = value;
2215 out:
2216         return count;
2217 }
2218
2219 static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
2220 {
2221         /* initialize the state variable here */
2222         hba->hibern8_on_idle.state = HIBERN8_EXITED;
2223
2224         if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2225             !ufshcd_is_auto_hibern8_supported(hba))
2226                 return;
2227
2228         if (ufshcd_is_auto_hibern8_supported(hba)) {
2229                 hba->hibern8_on_idle.delay_ms = 1;
2230                 hba->hibern8_on_idle.state = AUTO_HIBERN8;
2231                 /*
2232                  * Disable SW hibern8 enter on idle in case
2233                  * auto hibern8 is supported
2234                  */
2235                 hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
2236         } else {
2237                 hba->hibern8_on_idle.delay_ms = 10;
2238                 INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
2239                                   ufshcd_hibern8_enter_work);
2240                 INIT_WORK(&hba->hibern8_on_idle.exit_work,
2241                           ufshcd_hibern8_exit_work);
2242         }
2243
2244         hba->hibern8_on_idle.is_enabled = true;
2245
2246         hba->hibern8_on_idle.delay_attr.show =
2247                                         ufshcd_hibern8_on_idle_delay_show;
2248         hba->hibern8_on_idle.delay_attr.store =
2249                                         ufshcd_hibern8_on_idle_delay_store;
2250         sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
2251         hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
2252         hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
2253         if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
2254                 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
2255
2256         hba->hibern8_on_idle.enable_attr.show =
2257                                         ufshcd_hibern8_on_idle_enable_show;
2258         hba->hibern8_on_idle.enable_attr.store =
2259                                         ufshcd_hibern8_on_idle_enable_store;
2260         sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
2261         hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
2262         hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
2263         if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
2264                 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
2265 }
2266
2267 static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
2268 {
2269         if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2270             !ufshcd_is_auto_hibern8_supported(hba))
2271                 return;
2272         device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
2273         device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
2274 }
2275
2276 static void ufshcd_hold_all(struct ufs_hba *hba)
2277 {
2278         ufshcd_hold(hba, false);
2279         ufshcd_hibern8_hold(hba, false);
2280 }
2281
2282 static void ufshcd_release_all(struct ufs_hba *hba)
2283 {
2284         ufshcd_hibern8_release(hba, false);
2285         ufshcd_release(hba, false);
2286 }
2287
2288 /* Must be called with host lock acquired */
2289 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2290 {
2291         bool queue_resume_work = false;
2292
2293         if (!ufshcd_is_clkscaling_supported(hba))
2294                 return;
2295
2296         if (!hba->clk_scaling.active_reqs++)
2297                 queue_resume_work = true;
2298
2299         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
2300                 return;
2301
2302         if (queue_resume_work)
2303                 queue_work(hba->clk_scaling.workq,
2304                            &hba->clk_scaling.resume_work);
2305
2306         if (!hba->clk_scaling.window_start_t) {
2307                 hba->clk_scaling.window_start_t = jiffies;
2308                 hba->clk_scaling.tot_busy_t = 0;
2309                 hba->clk_scaling.is_busy_started = false;
2310         }
2311
2312         if (!hba->clk_scaling.is_busy_started) {
2313                 hba->clk_scaling.busy_start_t = ktime_get();
2314                 hba->clk_scaling.is_busy_started = true;
2315         }
2316 }
2317
2318 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2319 {
2320         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2321
2322         if (!ufshcd_is_clkscaling_supported(hba))
2323                 return;
2324
2325         if (!hba->outstanding_reqs && scaling->is_busy_started) {
2326                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2327                                         scaling->busy_start_t));
2328                 scaling->busy_start_t = ktime_set(0, 0);
2329                 scaling->is_busy_started = false;
2330         }
2331 }
2332
2333 /**
2334  * ufshcd_send_command - Send SCSI or device management commands
2335  * @hba: per adapter instance
2336  * @task_tag: Task tag of the command
2337  */
2338 static inline
2339 int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2340 {
2341         int ret = 0;
2342
2343         hba->lrb[task_tag].issue_time_stamp = ktime_get();
2344         hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
2345         ufshcd_clk_scaling_start_busy(hba);
2346         __set_bit(task_tag, &hba->outstanding_reqs);
2347         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2348         /* Make sure that doorbell is committed immediately */
2349         wmb();
2350         ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
2351         ufshcd_update_tag_stats(hba, task_tag);
2352         return ret;
2353 }
2354
2355 /**
2356  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2357  * @lrb - pointer to local reference block
2358  */
2359 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2360 {
2361         int len;
2362         if (lrbp->sense_buffer &&
2363             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2364                 int len_to_copy;
2365
2366                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2367                 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2368
2369                 memcpy(lrbp->sense_buffer,
2370                         lrbp->ucd_rsp_ptr->sr.sense_data,
2371                         min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
2372         }
2373 }
2374
2375 /**
2376  * ufshcd_copy_query_response() - Copy the Query Response and the data
2377  * descriptor
2378  * @hba: per adapter instance
2379  * @lrb - pointer to local reference block
2380  */
2381 static
2382 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2383 {
2384         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2385
2386         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2387
2388         /* Get the descriptor */
2389         if (hba->dev_cmd.query.descriptor &&
2390             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2391                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2392                                 GENERAL_UPIU_REQUEST_SIZE;
2393                 u16 resp_len;
2394                 u16 buf_len;
2395
2396                 /* data segment length */
2397                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2398                                                 MASK_QUERY_DATA_SEG_LEN;
2399                 buf_len = be16_to_cpu(
2400                                 hba->dev_cmd.query.request.upiu_req.length);
2401                 if (likely(buf_len >= resp_len)) {
2402                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2403                 } else {
2404                         dev_warn(hba->dev,
2405                                 "%s: Response size is bigger than buffer",
2406                                 __func__);
2407                         return -EINVAL;
2408                 }
2409         }
2410
2411         return 0;
2412 }
2413
2414 /**
2415  * ufshcd_hba_capabilities - Read controller capabilities
2416  * @hba: per adapter instance
2417  */
2418 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2419 {
2420         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2421
2422         /* nutrs and nutmrs are 0 based values */
2423         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2424         hba->nutmrs =
2425         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2426
2427         /* disable auto hibern8 */
2428         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
2429 }
2430
2431 /**
2432  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2433  *                            to accept UIC commands
2434  * @hba: per adapter instance
2435  * Return true on success, else false
2436  */
2437 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2438 {
2439         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2440                 return true;
2441         else
2442                 return false;
2443 }
2444
2445 /**
2446  * ufshcd_get_upmcrs - Get the power mode change request status
2447  * @hba: Pointer to adapter instance
2448  *
2449  * This function gets the UPMCRS field of HCS register
2450  * Returns value of UPMCRS field
2451  */
2452 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2453 {
2454         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2455 }
2456
2457 /**
2458  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2459  * @hba: per adapter instance
2460  * @uic_cmd: UIC command
2461  *
2462  * Mutex must be held.
2463  */
2464 static inline void
2465 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2466 {
2467         WARN_ON(hba->active_uic_cmd);
2468
2469         hba->active_uic_cmd = uic_cmd;
2470
2471         ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
2472         /* Write Args */
2473         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2474         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2475         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2476
2477         /* Write UIC Cmd */
2478         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2479                       REG_UIC_COMMAND);
2480 }
2481
2482 /**
2483  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2484  * @hba: per adapter instance
2485  * @uic_command: UIC command
2486  *
2487  * Must be called with mutex held.
2488  * Returns 0 only if success.
2489  */
2490 static int
2491 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2492 {
2493         int ret;
2494         unsigned long flags;
2495
2496         if (wait_for_completion_timeout(&uic_cmd->done,
2497                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2498                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2499         else
2500                 ret = -ETIMEDOUT;
2501
2502         if (ret)
2503                 ufsdbg_set_err_state(hba);
2504
2505         ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
2506
2507         spin_lock_irqsave(hba->host->host_lock, flags);
2508         hba->active_uic_cmd = NULL;
2509         spin_unlock_irqrestore(hba->host->host_lock, flags);
2510
2511         return ret;
2512 }
2513
2514 /**
2515  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2516  * @hba: per adapter instance
2517  * @uic_cmd: UIC command
2518  * @completion: initialize the completion only if this is set to true
2519  *
2520  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2521  * with mutex held and host_lock locked.
2522  * Returns 0 only if success.
2523  */
2524 static int
2525 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2526                       bool completion)
2527 {
2528         if (!ufshcd_ready_for_uic_cmd(hba)) {
2529                 dev_err(hba->dev,
2530                         "Controller not ready to accept UIC commands\n");
2531                 return -EIO;
2532         }
2533
2534         if (completion)
2535                 init_completion(&uic_cmd->done);
2536
2537         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2538
2539         return 0;
2540 }
2541
2542 /**
2543  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2544  * @hba: per adapter instance
2545  * @uic_cmd: UIC command
2546  *
2547  * Returns 0 only if success.
2548  */
2549 static int
2550 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2551 {
2552         int ret;
2553         unsigned long flags;
2554
2555         hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
2556         ufshcd_hold_all(hba);
2557         mutex_lock(&hba->uic_cmd_mutex);
2558         ufshcd_add_delay_before_dme_cmd(hba);
2559
2560         spin_lock_irqsave(hba->host->host_lock, flags);
2561         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2562         spin_unlock_irqrestore(hba->host->host_lock, flags);
2563         if (!ret)
2564                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2565
2566         ufshcd_save_tstamp_of_last_dme_cmd(hba);
2567         mutex_unlock(&hba->uic_cmd_mutex);
2568         ufshcd_release_all(hba);
2569         hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
2570
2571         ufsdbg_error_inject_dispatcher(hba,
2572                 ERR_INJECT_UIC, 0, &ret);
2573
2574         return ret;
2575 }
2576
2577 /**
2578  * ufshcd_map_sg - Map scatter-gather list to prdt
2579  * @lrbp - pointer to local reference block
2580  *
2581  * Returns 0 in case of success, non-zero value in case of failure
2582  */
2583 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
2584 {
2585         struct ufshcd_sg_entry *prd_table;
2586         struct scatterlist *sg;
2587         struct scsi_cmnd *cmd;
2588         int sg_segments;
2589         int i;
2590
2591         cmd = lrbp->cmd;
2592         sg_segments = scsi_dma_map(cmd);
2593         if (sg_segments < 0)
2594                 return sg_segments;
2595
2596         if (sg_segments) {
2597                 lrbp->utr_descriptor_ptr->prd_table_length =
2598                                         cpu_to_le16((u16) (sg_segments));
2599
2600                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2601
2602                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2603                         prd_table[i].size  =
2604                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2605                         prd_table[i].base_addr =
2606                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2607                         prd_table[i].upper_addr =
2608                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2609                         prd_table[i].reserved = 0;
2610                 }
2611         } else {
2612                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2613         }
2614
2615         return 0;
2616 }
2617
2618 /**
2619  * ufshcd_enable_intr - enable interrupts
2620  * @hba: per adapter instance
2621  * @intrs: interrupt bits
2622  */
2623 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2624 {
2625         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2626
2627         if (hba->ufs_version == UFSHCI_VERSION_10) {
2628                 u32 rw;
2629                 rw = set & INTERRUPT_MASK_RW_VER_10;
2630                 set = rw | ((set ^ intrs) & intrs);
2631         } else {
2632                 set |= intrs;
2633         }
2634
2635         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2636 }
2637
2638 /**
2639  * ufshcd_disable_intr - disable interrupts
2640  * @hba: per adapter instance
2641  * @intrs: interrupt bits
2642  */
2643 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2644 {
2645         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2646
2647         if (hba->ufs_version == UFSHCI_VERSION_10) {
2648                 u32 rw;
2649                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2650                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2651                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2652
2653         } else {
2654                 set &= ~intrs;
2655         }
2656
2657         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2658 }
2659
2660 static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
2661                 struct ufshcd_lrb *lrbp)
2662 {
2663         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2664         u8 cc_index = 0;
2665         bool enable = false;
2666         u64 dun = 0;
2667         int ret;
2668
2669         /*
2670          * Call vendor specific code to get crypto info for this request:
2671          * enable, crypto config. index, DUN.
2672          * If bypass is set, don't bother setting the other fields.
2673          */
2674         ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
2675         if (ret) {
2676                 if (ret != -EAGAIN) {
2677                         dev_err(hba->dev,
2678                                 "%s: failed to setup crypto request (%d)\n",
2679                                 __func__, ret);
2680                 }
2681
2682                 return ret;
2683         }
2684
2685         if (!enable)
2686                 goto out;
2687
2688         req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
2689         req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
2690         req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
2691 out:
2692         return 0;
2693 }
2694
2695 /**
2696  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2697  * descriptor according to request
2698  * @hba: per adapter instance
2699  * @lrbp: pointer to local reference block
2700  * @upiu_flags: flags required in the header
2701  * @cmd_dir: requests data direction
2702  */
2703 static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
2704         struct ufshcd_lrb *lrbp, u32 *upiu_flags,
2705         enum dma_data_direction cmd_dir)
2706 {
2707         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2708         u32 data_direction;
2709         u32 dword_0;
2710
2711         if (cmd_dir == DMA_FROM_DEVICE) {
2712                 data_direction = UTP_DEVICE_TO_HOST;
2713                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2714         } else if (cmd_dir == DMA_TO_DEVICE) {
2715                 data_direction = UTP_HOST_TO_DEVICE;
2716                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2717         } else {
2718                 data_direction = UTP_NO_DATA_TRANSFER;
2719                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2720         }
2721
2722         dword_0 = data_direction | (lrbp->command_type
2723                                 << UPIU_COMMAND_TYPE_OFFSET);
2724         if (lrbp->intr_cmd)
2725                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2726
2727         /* Transfer request descriptor header fields */
2728         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2729         /* dword_1 is reserved, hence it is set to 0 */
2730         req_desc->header.dword_1 = 0;
2731         /*
2732          * assigning invalid value for command status. Controller
2733          * updates OCS on command completion, with the command
2734          * status
2735          */
2736         req_desc->header.dword_2 =
2737                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2738         /* dword_3 is reserved, hence it is set to 0 */
2739         req_desc->header.dword_3 = 0;
2740
2741         req_desc->prd_table_length = 0;
2742
2743         if (ufshcd_is_crypto_supported(hba))
2744                 return ufshcd_prepare_crypto_utrd(hba, lrbp);
2745
2746         return 0;
2747 }
2748
2749 /**
2750  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2751  * for scsi commands
2752  * @lrbp - local reference block pointer
2753  * @upiu_flags - flags
2754  */
2755 static
2756 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2757 {
2758         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2759         unsigned short cdb_len;
2760
2761         /* command descriptor fields */
2762         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2763                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2764                                 lrbp->lun, lrbp->task_tag);
2765         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2766                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2767
2768         /* Total EHS length and Data segment length will be zero */
2769         ucd_req_ptr->header.dword_2 = 0;
2770
2771         ucd_req_ptr->sc.exp_data_transfer_len =
2772                 cpu_to_be32(lrbp->cmd->sdb.length);
2773
2774         cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2775         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2776         if (cdb_len < MAX_CDB_SIZE)
2777                 memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
2778                        (MAX_CDB_SIZE - cdb_len));
2779         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2780 }
2781
2782 /**
2783  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2784  * for query requsts
2785  * @hba: UFS hba
2786  * @lrbp: local reference block pointer
2787  * @upiu_flags: flags
2788  */
2789 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2790                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2791 {
2792         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2793         struct ufs_query *query = &hba->dev_cmd.query;
2794         u16 len = be16_to_cpu(query->request.upiu_req.length);
2795         u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2796
2797         /* Query request header */
2798         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2799                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2800                         lrbp->lun, lrbp->task_tag);
2801         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2802                         0, query->request.query_func, 0, 0);
2803
2804         /* Data segment length */
2805         ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
2806                         0, 0, len >> 8, (u8)len);
2807
2808         /* Copy the Query Request buffer as is */
2809         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2810                         QUERY_OSF_SIZE);
2811
2812         /* Copy the Descriptor */
2813         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2814                 memcpy(descp, query->descriptor, len);
2815
2816         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2817 }
2818
2819 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2820 {
2821         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2822
2823         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2824
2825         /* command descriptor fields */
2826         ucd_req_ptr->header.dword_0 =
2827                 UPIU_HEADER_DWORD(
2828                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2829         /* clear rest of the fields of basic header */
2830         ucd_req_ptr->header.dword_1 = 0;
2831         ucd_req_ptr->header.dword_2 = 0;
2832
2833         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2834 }
2835
2836 /**
2837  * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
2838  * @hba - per adapter instance
2839  * @lrb - pointer to local reference block
2840  */
2841 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2842 {
2843         u32 upiu_flags;
2844         int ret = 0;
2845
2846         switch (lrbp->command_type) {
2847         case UTP_CMD_TYPE_SCSI:
2848                 if (likely(lrbp->cmd)) {
2849                         ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
2850                                 &upiu_flags, lrbp->cmd->sc_data_direction);
2851                         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2852                 } else {
2853                         ret = -EINVAL;
2854                 }
2855                 break;
2856         case UTP_CMD_TYPE_DEV_MANAGE:
2857                 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
2858                         DMA_NONE);
2859                 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2860                         ufshcd_prepare_utp_query_req_upiu(
2861                                         hba, lrbp, upiu_flags);
2862                 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2863                         ufshcd_prepare_utp_nop_upiu(lrbp);
2864                 else
2865                         ret = -EINVAL;
2866                 break;
2867         case UTP_CMD_TYPE_UFS:
2868                 /* For UFS native command implementation */
2869                 ret = -ENOTSUPP;
2870                 dev_err(hba->dev, "%s: UFS native command are not supported\n",
2871                         __func__);
2872                 break;
2873         default:
2874                 ret = -ENOTSUPP;
2875                 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
2876                                 __func__, lrbp->command_type);
2877                 break;
2878         } /* end of switch */
2879
2880         return ret;
2881 }
2882
2883 /*
2884  * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2885  * @scsi_lun: scsi LUN id
2886  *
2887  * Returns UPIU LUN id
2888  */
2889 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2890 {
2891         if (scsi_is_wlun(scsi_lun))
2892                 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2893                         | UFS_UPIU_WLUN_ID;
2894         else
2895                 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2896 }
2897
2898 /**
2899  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2900  * @scsi_lun: UPIU W-LUN id
2901  *
2902  * Returns SCSI W-LUN id
2903  */
2904 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2905 {
2906         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2907 }
2908
2909 /**
2910  * ufshcd_get_write_lock - synchronize between shutdown, scaling &
2911  * arrival of requests
2912  * @hba: ufs host
2913  *
2914  * Lock is predominantly held by shutdown context thus, ensuring
2915  * that no requests from any other context may sneak through.
2916  */
2917 static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
2918 {
2919         down_write(&hba->lock);
2920 }
2921
2922 /**
2923  * ufshcd_get_read_lock - synchronize between shutdown, scaling &
2924  * arrival of requests
2925  * @hba: ufs host
2926  *
2927  * Returns 1 if acquired, < 0 on contention
2928  *
2929  * After shutdown's initiated, allow requests only directed to the
2930  * well known device lun. The sync between scaling & issue is maintained
2931  * as is and this restructuring syncs shutdown with these too.
2932  */
2933 static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
2934 {
2935         int err = 0;
2936
2937         err = down_read_trylock(&hba->lock);
2938         if (err > 0)
2939                 goto out;
2940         /* let requests for well known device lun to go through */
2941         if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
2942                 return 0;
2943         else if (!ufshcd_is_shutdown_ongoing(hba))
2944                 return -EAGAIN;
2945         else
2946                 return -EPERM;
2947
2948 out:
2949         return err;
2950 }
2951
2952 /**
2953  * ufshcd_put_read_lock - synchronize between shutdown, scaling &
2954  * arrival of requests
2955  * @hba: ufs host
2956  *
2957  * Returns none
2958  */
2959 static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
2960 {
2961         up_read(&hba->lock);
2962 }
2963
2964 /**
2965  * ufshcd_queuecommand - main entry point for SCSI requests
2966  * @cmd: command from SCSI Midlayer
2967  * @done: call back function
2968  *
2969  * Returns 0 for success, non-zero in case of failure
2970  */
2971 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2972 {
2973         struct ufshcd_lrb *lrbp;
2974         struct ufs_hba *hba;
2975         unsigned long flags;
2976         int tag;
2977         int err = 0;
2978         bool has_read_lock = false;
2979
2980         hba = shost_priv(host);
2981
2982         if (!cmd || !cmd->request || !hba)
2983                 return -EINVAL;
2984
2985         tag = cmd->request->tag;
2986         if (!ufshcd_valid_tag(hba, tag)) {
2987                 dev_err(hba->dev,
2988                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2989                         __func__, tag, cmd, cmd->request);
2990                 BUG();
2991         }
2992
2993         err = ufshcd_get_read_lock(hba, cmd->device->lun);
2994         if (unlikely(err < 0)) {
2995                 if (err == -EPERM) {
2996                         set_host_byte(cmd, DID_ERROR);
2997                         cmd->scsi_done(cmd);
2998                         return 0;
2999                 }
3000                 if (err == -EAGAIN)
3001                         return SCSI_MLQUEUE_HOST_BUSY;
3002         } else if (err == 1) {
3003                 has_read_lock = true;
3004         }
3005
3006         spin_lock_irqsave(hba->host->host_lock, flags);
3007
3008         /* if error handling is in progress, return host busy */
3009         if (ufshcd_eh_in_progress(hba)) {
3010                 err = SCSI_MLQUEUE_HOST_BUSY;
3011                 goto out_unlock;
3012         }
3013
3014         switch (hba->ufshcd_state) {
3015         case UFSHCD_STATE_OPERATIONAL:
3016                 break;
3017         case UFSHCD_STATE_RESET:
3018                 err = SCSI_MLQUEUE_HOST_BUSY;
3019                 goto out_unlock;
3020         case UFSHCD_STATE_ERROR:
3021                 set_host_byte(cmd, DID_ERROR);
3022                 cmd->scsi_done(cmd);
3023                 goto out_unlock;
3024         default:
3025                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
3026                                 __func__, hba->ufshcd_state);
3027                 set_host_byte(cmd, DID_BAD_TARGET);
3028                 cmd->scsi_done(cmd);
3029                 goto out_unlock;
3030         }
3031         spin_unlock_irqrestore(hba->host->host_lock, flags);
3032
3033         hba->req_abort_count = 0;
3034
3035         /* acquire the tag to make sure device cmds don't use it */
3036         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
3037                 /*
3038                  * Dev manage command in progress, requeue the command.
3039                  * Requeuing the command helps in cases where the request *may*
3040                  * find different tag instead of waiting for dev manage command
3041                  * completion.
3042                  */
3043                 err = SCSI_MLQUEUE_HOST_BUSY;
3044                 goto out;
3045         }
3046
3047         hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
3048         err = ufshcd_hold(hba, true);
3049         if (err) {
3050                 err = SCSI_MLQUEUE_HOST_BUSY;
3051                 clear_bit_unlock(tag, &hba->lrb_in_use);
3052                 goto out;
3053         }
3054         if (ufshcd_is_clkgating_allowed(hba))
3055                 WARN_ON(hba->clk_gating.state != CLKS_ON);
3056
3057         err = ufshcd_hibern8_hold(hba, true);
3058         if (err) {
3059                 clear_bit_unlock(tag, &hba->lrb_in_use);
3060                 err = SCSI_MLQUEUE_HOST_BUSY;
3061                 hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
3062                 ufshcd_release(hba, true);
3063                 goto out;
3064         }
3065         if (ufshcd_is_hibern8_on_idle_allowed(hba))
3066                 WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
3067
3068         /* Vote PM QoS for the request */
3069         ufshcd_vops_pm_qos_req_start(hba, cmd->request);
3070
3071         /* IO svc time latency histogram */
3072         if (hba->latency_hist_enabled &&
3073             (cmd->request->cmd_type == REQ_TYPE_FS)) {
3074                 cmd->request->lat_hist_io_start = ktime_get();
3075                 cmd->request->lat_hist_enabled = 1;
3076         } else {
3077                 cmd->request->lat_hist_enabled = 0;
3078         }
3079
3080         WARN_ON(hba->clk_gating.state != CLKS_ON);
3081
3082         lrbp = &hba->lrb[tag];
3083
3084         WARN_ON(lrbp->cmd);
3085         lrbp->cmd = cmd;
3086         lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
3087         lrbp->sense_buffer = cmd->sense_buffer;
3088         lrbp->task_tag = tag;
3089         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
3090         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
3091         lrbp->command_type = UTP_CMD_TYPE_SCSI;
3092         lrbp->req_abort_skip = false;
3093
3094         /* form UPIU before issuing the command */
3095         err = ufshcd_compose_upiu(hba, lrbp);
3096         if (err) {
3097                 if (err != -EAGAIN)
3098                         dev_err(hba->dev,
3099                                 "%s: failed to compose upiu %d\n",
3100                                 __func__, err);
3101
3102                 lrbp->cmd = NULL;
3103                 clear_bit_unlock(tag, &hba->lrb_in_use);
3104                 ufshcd_release_all(hba);
3105                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3106                 goto out;
3107         }
3108
3109         err = ufshcd_map_sg(lrbp);
3110         if (err) {
3111                 lrbp->cmd = NULL;
3112                 clear_bit_unlock(tag, &hba->lrb_in_use);
3113                 ufshcd_release_all(hba);
3114                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3115                 goto out;
3116         }
3117
3118         err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
3119         if (err) {
3120                 if (err != -EAGAIN)
3121                         dev_err(hba->dev,
3122                                 "%s: failed to configure crypto engine %d\n",
3123                                 __func__, err);
3124
3125                 scsi_dma_unmap(lrbp->cmd);
3126                 lrbp->cmd = NULL;
3127                 clear_bit_unlock(tag, &hba->lrb_in_use);
3128                 ufshcd_release_all(hba);
3129                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3130
3131                 goto out;
3132         }
3133
3134         /* Make sure descriptors are ready before ringing the doorbell */
3135         wmb();
3136         /* issue command to the controller */
3137         spin_lock_irqsave(hba->host->host_lock, flags);
3138
3139         err = ufshcd_send_command(hba, tag);
3140         if (err) {
3141                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3142                 scsi_dma_unmap(lrbp->cmd);
3143                 lrbp->cmd = NULL;
3144                 clear_bit_unlock(tag, &hba->lrb_in_use);
3145                 ufshcd_release_all(hba);
3146                 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3147                 ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
3148                 dev_err(hba->dev, "%s: failed sending command, %d\n",
3149                                                         __func__, err);
3150                 err = DID_ERROR;
3151                 goto out;
3152         }
3153
3154 out_unlock:
3155         spin_unlock_irqrestore(hba->host->host_lock, flags);
3156 out:
3157         if (has_read_lock)
3158                 ufshcd_put_read_lock(hba);
3159         return err;
3160 }
3161
3162 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3163                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3164 {
3165         lrbp->cmd = NULL;
3166         lrbp->sense_bufflen = 0;
3167         lrbp->sense_buffer = NULL;
3168         lrbp->task_tag = tag;
3169         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
3170         lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
3171         lrbp->intr_cmd = true; /* No interrupt aggregation */
3172         hba->dev_cmd.type = cmd_type;
3173
3174         return ufshcd_compose_upiu(hba, lrbp);
3175 }
3176
3177 static int
3178 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
3179 {
3180         int err = 0;
3181         unsigned long flags;
3182         u32 mask = 1 << tag;
3183
3184         /* clear outstanding transaction before retry */
3185         spin_lock_irqsave(hba->host->host_lock, flags);
3186         ufshcd_utrl_clear(hba, tag);
3187         spin_unlock_irqrestore(hba->host->host_lock, flags);
3188
3189         /*
3190          * wait for for h/w to clear corresponding bit in door-bell.
3191          * max. wait is 1 sec.
3192          */
3193         err = ufshcd_wait_for_register(hba,
3194                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
3195                         mask, ~mask, 1000, 1000, true);
3196
3197         return err;
3198 }
3199
3200 static int
3201 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3202 {
3203         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
3204
3205         /* Get the UPIU response */
3206         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
3207                                 UPIU_RSP_CODE_OFFSET;
3208         return query_res->response;
3209 }
3210
3211 /**
3212  * ufshcd_dev_cmd_completion() - handles device management command responses
3213  * @hba: per adapter instance
3214  * @lrbp: pointer to local reference block
3215  */
3216 static int
3217 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3218 {
3219         int resp;
3220         int err = 0;
3221
3222         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3223         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3224
3225         switch (resp) {
3226         case UPIU_TRANSACTION_NOP_IN:
3227                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3228                         err = -EINVAL;
3229                         dev_err(hba->dev, "%s: unexpected response %x\n",
3230                                         __func__, resp);
3231                 }
3232                 break;
3233         case UPIU_TRANSACTION_QUERY_RSP:
3234                 err = ufshcd_check_query_response(hba, lrbp);
3235                 if (!err)
3236                         err = ufshcd_copy_query_response(hba, lrbp);
3237                 break;
3238         case UPIU_TRANSACTION_REJECT_UPIU:
3239                 /* TODO: handle Reject UPIU Response */
3240                 err = -EPERM;
3241                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3242                                 __func__);
3243                 break;
3244         default:
3245                 err = -EINVAL;
3246                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3247                                 __func__, resp);
3248                 break;
3249         }
3250
3251         return err;
3252 }
3253
3254 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3255                 struct ufshcd_lrb *lrbp, int max_timeout)
3256 {
3257         int err = 0;
3258         unsigned long time_left;
3259         unsigned long flags;
3260
3261         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3262                         msecs_to_jiffies(max_timeout));
3263
3264         spin_lock_irqsave(hba->host->host_lock, flags);
3265         hba->dev_cmd.complete = NULL;
3266         if (likely(time_left)) {
3267                 err = ufshcd_get_tr_ocs(lrbp);
3268                 if (!err)
3269                         err = ufshcd_dev_cmd_completion(hba, lrbp);
3270         }
3271         spin_unlock_irqrestore(hba->host->host_lock, flags);
3272
3273         if (!time_left) {
3274                 err = -ETIMEDOUT;
3275                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3276                         __func__, lrbp->task_tag);
3277                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
3278                         /* successfully cleared the command, retry if needed */
3279                         err = -EAGAIN;
3280                 /*
3281                  * in case of an error, after clearing the doorbell,
3282                  * we also need to clear the outstanding_request
3283                  * field in hba
3284                  */
3285                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
3286         }
3287
3288         if (err)
3289                 ufsdbg_set_err_state(hba);
3290
3291         return err;
3292 }
3293
3294 /**
3295  * ufshcd_get_dev_cmd_tag - Get device management command tag
3296  * @hba: per-adapter instance
3297  * @tag: pointer to variable with available slot value
3298  *
3299  * Get a free slot and lock it until device management command
3300  * completes.
3301  *
3302  * Returns false if free slot is unavailable for locking, else
3303  * return true with tag value in @tag.
3304  */
3305 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
3306 {
3307         int tag;
3308         bool ret = false;
3309         unsigned long tmp;
3310
3311         if (!tag_out)
3312                 goto out;
3313
3314         do {
3315                 tmp = ~hba->lrb_in_use;
3316                 tag = find_last_bit(&tmp, hba->nutrs);
3317                 if (tag >= hba->nutrs)
3318                         goto out;
3319         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
3320
3321         *tag_out = tag;
3322         ret = true;
3323 out:
3324         return ret;
3325 }
3326
3327 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
3328 {
3329         clear_bit_unlock(tag, &hba->lrb_in_use);
3330 }
3331
3332 /**
3333  * ufshcd_exec_dev_cmd - API for sending device management requests
3334  * @hba - UFS hba
3335  * @cmd_type - specifies the type (NOP, Query...)
3336  * @timeout - time in seconds
3337  *
3338  * NOTE: Since there is only one available tag for device management commands,
3339  * it is expected you hold the hba->dev_cmd.lock mutex.
3340  */
3341 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3342                 enum dev_cmd_type cmd_type, int timeout)
3343 {
3344         struct ufshcd_lrb *lrbp;
3345         int err;
3346         int tag;
3347         struct completion wait;
3348         unsigned long flags;
3349
3350         /*
3351          * May get invoked from shutdown and IOCTL contexts.
3352          * In shutdown context, it comes in with lock acquired.
3353          * In error recovery context, it may come with lock acquired.
3354          */
3355
3356         if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
3357                 down_read(&hba->lock);
3358
3359         /*
3360          * Get free slot, sleep if slots are unavailable.
3361          * Even though we use wait_event() which sleeps indefinitely,
3362          * the maximum wait time is bounded by SCSI request timeout.
3363          */
3364         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
3365
3366         init_completion(&wait);
3367         lrbp = &hba->lrb[tag];
3368         WARN_ON(lrbp->cmd);
3369         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3370         if (unlikely(err))
3371                 goto out_put_tag;
3372
3373         hba->dev_cmd.complete = &wait;
3374
3375         /* Make sure descriptors are ready before ringing the doorbell */
3376         wmb();
3377         spin_lock_irqsave(hba->host->host_lock, flags);
3378         err = ufshcd_send_command(hba, tag);
3379         spin_unlock_irqrestore(hba->host->host_lock, flags);
3380         if (err) {
3381                 dev_err(hba->dev, "%s: failed sending command, %d\n",
3382                                                         __func__, err);
3383                 goto out_put_tag;
3384         }
3385         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3386
3387 out_put_tag:
3388         ufshcd_put_dev_cmd_tag(hba, tag);
3389         wake_up(&hba->dev_cmd.tag_wq);
3390         if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
3391                 up_read(&hba->lock);
3392         return err;
3393 }
3394
3395 /**
3396  * ufshcd_init_query() - init the query response and request parameters
3397  * @hba: per-adapter instance
3398  * @request: address of the request pointer to be initialized
3399  * @response: address of the response pointer to be initialized
3400  * @opcode: operation to perform
3401  * @idn: flag idn to access
3402  * @index: LU number to access
3403  * @selector: query/flag/descriptor further identification
3404  */
3405 static inline void ufshcd_init_query(struct ufs_hba *hba,
3406                 struct ufs_query_req **request, struct ufs_query_res **response,
3407                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3408 {
3409         int idn_t = (int)idn;
3410
3411         ufsdbg_error_inject_dispatcher(hba,
3412                 ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
3413         idn = idn_t;
3414
3415         *request = &hba->dev_cmd.query.request;
3416         *response = &hba->dev_cmd.query.response;
3417         memset(*request, 0, sizeof(struct ufs_query_req));
3418         memset(*response, 0, sizeof(struct ufs_query_res));
3419         (*request)->upiu_req.opcode = opcode;
3420         (*request)->upiu_req.idn = idn;
3421         (*request)->upiu_req.index = index;
3422         (*request)->upiu_req.selector = selector;
3423
3424         ufshcd_update_query_stats(hba, opcode, idn);
3425 }
3426
3427 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3428         enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
3429 {
3430         int ret;
3431         int retries;
3432
3433         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3434                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
3435                 if (ret)
3436                         dev_dbg(hba->dev,
3437                                 "%s: failed with error %d, retries %d\n",
3438                                 __func__, ret, retries);
3439                 else
3440                         break;
3441         }
3442
3443         if (ret)
3444                 dev_err(hba->dev,
3445                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3446                         __func__, opcode, idn, ret, retries);
3447         return ret;
3448 }
3449
3450 /**
3451  * ufshcd_query_flag() - API function for sending flag query requests
3452  * hba: per-adapter instance
3453  * query_opcode: flag query to perform
3454  * idn: flag idn to access
3455  * flag_res: the flag value after the query request completes
3456  *
3457  * Returns 0 for success, non-zero in case of failure
3458  */
3459 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3460                         enum flag_idn idn, bool *flag_res)
3461 {
3462         struct ufs_query_req *request = NULL;
3463         struct ufs_query_res *response = NULL;
3464         int err, index = 0, selector = 0;
3465         int timeout = QUERY_REQ_TIMEOUT;
3466
3467         BUG_ON(!hba);
3468
3469         ufshcd_hold_all(hba);
3470         mutex_lock(&hba->dev_cmd.lock);
3471         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3472                         selector);
3473
3474         switch (opcode) {
3475         case UPIU_QUERY_OPCODE_SET_FLAG:
3476         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3477         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3478                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3479                 break;
3480         case UPIU_QUERY_OPCODE_READ_FLAG:
3481                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3482                 if (!flag_res) {
3483                         /* No dummy reads */
3484                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3485                                         __func__);
3486                         err = -EINVAL;
3487                         goto out_unlock;
3488                 }
3489                 break;
3490         default:
3491                 dev_err(hba->dev,
3492                         "%s: Expected query flag opcode but got = %d\n",
3493                         __func__, opcode);
3494                 err = -EINVAL;
3495                 goto out_unlock;
3496         }
3497
3498         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3499
3500         if (err) {
3501                 dev_err(hba->dev,
3502                         "%s: Sending flag query for idn %d failed, err = %d\n",
3503                         __func__, request->upiu_req.idn, err);
3504                 goto out_unlock;
3505         }
3506
3507         if (flag_res)
3508                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3509                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3510
3511 out_unlock:
3512         mutex_unlock(&hba->dev_cmd.lock);
3513         ufshcd_release_all(hba);
3514         return err;
3515 }
3516 EXPORT_SYMBOL(ufshcd_query_flag);
3517
3518 /**
3519  * ufshcd_query_attr - API function for sending attribute requests
3520  * hba: per-adapter instance
3521  * opcode: attribute opcode
3522  * idn: attribute idn to access
3523  * index: index field
3524  * selector: selector field
3525  * attr_val: the attribute value after the query request completes
3526  *
3527  * Returns 0 for success, non-zero in case of failure
3528 */
3529 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3530                         enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3531 {
3532         struct ufs_query_req *request = NULL;
3533         struct ufs_query_res *response = NULL;
3534         int err;
3535
3536         BUG_ON(!hba);
3537
3538         ufshcd_hold_all(hba);
3539         if (!attr_val) {
3540                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3541                                 __func__, opcode);
3542                 err = -EINVAL;
3543                 goto out;
3544         }
3545
3546         mutex_lock(&hba->dev_cmd.lock);
3547         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3548                         selector);
3549
3550         switch (opcode) {
3551         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3552                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3553                 request->upiu_req.value = cpu_to_be32(*attr_val);
3554                 break;
3555         case UPIU_QUERY_OPCODE_READ_ATTR:
3556                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3557                 break;
3558         default:
3559                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3560                                 __func__, opcode);
3561                 err = -EINVAL;
3562                 goto out_unlock;
3563         }
3564
3565         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3566
3567         if (err) {
3568                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3569                                 __func__, opcode,
3570                                 request->upiu_req.idn, index, err);
3571                 goto out_unlock;
3572         }
3573
3574         *attr_val = be32_to_cpu(response->upiu_res.value);
3575
3576 out_unlock:
3577         mutex_unlock(&hba->dev_cmd.lock);
3578 out:
3579         ufshcd_release_all(hba);
3580         return err;
3581 }
3582 EXPORT_SYMBOL(ufshcd_query_attr);
3583
3584 /**
3585  * ufshcd_query_attr_retry() - API function for sending query
3586  * attribute with retries
3587  * @hba: per-adapter instance
3588  * @opcode: attribute opcode
3589  * @idn: attribute idn to access
3590  * @index: index field
3591  * @selector: selector field
3592  * @attr_val: the attribute value after the query request
3593  * completes
3594  *
3595  * Returns 0 for success, non-zero in case of failure
3596 */
3597 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3598         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3599         u32 *attr_val)
3600 {
3601         int ret = 0;
3602         u32 retries;
3603
3604          for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3605                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3606                                                 selector, attr_val);
3607                 if (ret)
3608                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3609                                 __func__, ret, retries);
3610                 else
3611                         break;
3612         }
3613
3614         if (ret)
3615                 dev_err(hba->dev,
3616                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3617                         __func__, idn, ret, retries);
3618         return ret;
3619 }
3620
3621 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3622                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3623                         u8 selector, u8 *desc_buf, int *buf_len)
3624 {
3625         struct ufs_query_req *request = NULL;
3626         struct ufs_query_res *response = NULL;
3627         int err;
3628
3629         BUG_ON(!hba);
3630
3631         ufshcd_hold_all(hba);
3632         if (!desc_buf) {
3633                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3634                                 __func__, opcode);
3635                 err = -EINVAL;
3636                 goto out;
3637         }
3638
3639         if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3640                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3641                                 __func__, *buf_len);
3642                 err = -EINVAL;
3643                 goto out;
3644         }
3645
3646         mutex_lock(&hba->dev_cmd.lock);
3647         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3648                         selector);
3649         hba->dev_cmd.query.descriptor = desc_buf;
3650         request->upiu_req.length = cpu_to_be16(*buf_len);
3651
3652         switch (opcode) {
3653         case UPIU_QUERY_OPCODE_WRITE_DESC:
3654                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3655                 break;
3656         case UPIU_QUERY_OPCODE_READ_DESC:
3657                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3658                 break;
3659         default:
3660                 dev_err(hba->dev,
3661                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3662                                 __func__, opcode);
3663                 err = -EINVAL;
3664                 goto out_unlock;
3665         }
3666
3667         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3668
3669         if (err) {
3670                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3671                                 __func__, opcode,
3672                                 request->upiu_req.idn, index, err);
3673                 goto out_unlock;
3674         }
3675
3676         *buf_len = be16_to_cpu(response->upiu_res.length);
3677
3678 out_unlock:
3679         hba->dev_cmd.query.descriptor = NULL;
3680         mutex_unlock(&hba->dev_cmd.lock);
3681 out:
3682         ufshcd_release_all(hba);
3683         return err;
3684 }
3685
3686 /**
3687  * ufshcd_query_descriptor - API function for sending descriptor requests
3688  * hba: per-adapter instance
3689  * opcode: attribute opcode
3690  * idn: attribute idn to access
3691  * index: index field
3692  * selector: selector field
3693  * desc_buf: the buffer that contains the descriptor
3694  * buf_len: length parameter passed to the device
3695  *
3696  * Returns 0 for success, non-zero in case of failure.
3697  * The buf_len parameter will contain, on return, the length parameter
3698  * received on the response.
3699  */
3700 int ufshcd_query_descriptor(struct ufs_hba *hba,
3701                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3702                         u8 selector, u8 *desc_buf, int *buf_len)
3703 {
3704         int err;
3705         int retries;
3706
3707         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3708                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3709                                                 selector, desc_buf, buf_len);
3710                 if (!err || err == -EINVAL)
3711                         break;
3712         }
3713
3714         return err;
3715 }
3716 EXPORT_SYMBOL(ufshcd_query_descriptor);
3717
3718 /**
3719  * ufshcd_read_desc_param - read the specified descriptor parameter
3720  * @hba: Pointer to adapter instance
3721  * @desc_id: descriptor idn value
3722  * @desc_index: descriptor index
3723  * @param_offset: offset of the parameter to read
3724  * @param_read_buf: pointer to buffer where parameter would be read
3725  * @param_size: sizeof(param_read_buf)
3726  *
3727  * Return 0 in case of success, non-zero otherwise
3728  */
3729 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3730                                   enum desc_idn desc_id,
3731                                   int desc_index,
3732                                   u32 param_offset,
3733                                   u8 *param_read_buf,
3734                                   u32 param_size)
3735 {
3736         int ret;
3737         u8 *desc_buf;
3738         u32 buff_len;
3739         bool is_kmalloc = true;
3740
3741         /* safety checks */
3742         if (desc_id >= QUERY_DESC_IDN_MAX)
3743                 return -EINVAL;
3744
3745         buff_len = ufs_query_desc_max_size[desc_id];
3746         if ((param_offset + param_size) > buff_len)
3747                 return -EINVAL;
3748
3749         if (!param_offset && (param_size == buff_len)) {
3750                 /* memory space already available to hold full descriptor */
3751                 desc_buf = param_read_buf;
3752                 is_kmalloc = false;
3753         } else {
3754                 /* allocate memory to hold full descriptor */
3755                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3756                 if (!desc_buf)
3757                         return -ENOMEM;
3758         }
3759
3760         ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3761                                       desc_id, desc_index, 0, desc_buf,
3762                                       &buff_len);
3763
3764         if (ret) {
3765                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3766                         __func__, desc_id, desc_index, param_offset, ret);
3767
3768                 goto out;
3769         }
3770
3771         /* Sanity check */
3772         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3773                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3774                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3775                 ret = -EINVAL;
3776                 goto out;
3777         }
3778
3779         /*
3780          * While reading variable size descriptors (like string descriptor),
3781          * some UFS devices may report the "LENGTH" (field in "Transaction
3782          * Specific fields" of Query Response UPIU) same as what was requested
3783          * in Query Request UPIU instead of reporting the actual size of the
3784          * variable size descriptor.
3785          * Although it's safe to ignore the "LENGTH" field for variable size
3786          * descriptors as we can always derive the length of the descriptor from
3787          * the descriptor header fields. Hence this change impose the length
3788          * match check only for fixed size descriptors (for which we always
3789          * request the correct size as part of Query Request UPIU).
3790          */
3791         if ((desc_id != QUERY_DESC_IDN_STRING) &&
3792             (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3793                 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3794                         __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3795                 ret = -EINVAL;
3796                 goto out;
3797         }
3798
3799         if (is_kmalloc)
3800                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3801 out:
3802         if (is_kmalloc)
3803                 kfree(desc_buf);
3804         return ret;
3805 }
3806
3807 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3808                                    enum desc_idn desc_id,
3809                                    int desc_index,
3810                                    u8 *buf,
3811                                    u32 size)
3812 {
3813         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3814 }
3815
3816 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3817                                          u8 *buf,
3818                                          u32 size)
3819 {
3820         return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3821 }
3822
3823 int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3824 {
3825         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3826 }
3827
3828 /**
3829  * ufshcd_read_string_desc - read string descriptor
3830  * @hba: pointer to adapter instance
3831  * @desc_index: descriptor index
3832  * @buf: pointer to buffer where descriptor would be read
3833  * @size: size of buf
3834  * @ascii: if true convert from unicode to ascii characters
3835  *
3836  * Return 0 in case of success, non-zero otherwise
3837  */
3838 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
3839                                 u32 size, bool ascii)
3840 {
3841         int err = 0;
3842
3843         err = ufshcd_read_desc(hba,
3844                                 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3845
3846         if (err) {
3847                 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3848                         __func__, QUERY_REQ_RETRIES, err);
3849                 goto out;
3850         }
3851
3852         if (ascii) {
3853                 int desc_len;
3854                 int ascii_len;
3855                 int i;
3856                 char *buff_ascii;
3857
3858                 desc_len = buf[0];
3859                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3860                 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3861                 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3862                         dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3863                                         __func__);
3864                         err = -ENOMEM;
3865                         goto out;
3866                 }
3867
3868                 buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
3869                 if (!buff_ascii) {
3870                         dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
3871                                         __func__, ascii_len);
3872                         err = -ENOMEM;
3873                         goto out_free_buff;
3874                 }
3875
3876                 /*
3877                  * the descriptor contains string in UTF16 format
3878                  * we need to convert to utf-8 so it can be displayed
3879                  */
3880                 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3881                                 desc_len - QUERY_DESC_HDR_SIZE,
3882                                 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3883
3884                 /* replace non-printable or non-ASCII characters with spaces */
3885                 for (i = 0; i < ascii_len; i++)
3886                         ufshcd_remove_non_printable(&buff_ascii[i]);
3887
3888                 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3889                                 size - QUERY_DESC_HDR_SIZE);
3890                 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3891                 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3892 out_free_buff:
3893                 kfree(buff_ascii);
3894         }
3895 out:
3896         return err;
3897 }
3898
3899 /**
3900  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3901  * @hba: Pointer to adapter instance
3902  * @lun: lun id
3903  * @param_offset: offset of the parameter to read
3904  * @param_read_buf: pointer to buffer where parameter would be read
3905  * @param_size: sizeof(param_read_buf)
3906  *
3907  * Return 0 in case of success, non-zero otherwise
3908  */
3909 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3910                                               int lun,
3911                                               enum unit_desc_param param_offset,
3912                                               u8 *param_read_buf,
3913                                               u32 param_size)
3914 {
3915         /*
3916          * Unit descriptors are only available for general purpose LUs (LUN id
3917          * from 0 to 7) and RPMB Well known LU.
3918          */
3919         if (!ufs_is_valid_unit_desc_lun(lun))
3920                 return -EOPNOTSUPP;
3921
3922         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3923                                       param_offset, param_read_buf, param_size);
3924 }
3925
3926 /**
3927  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3928  * @hba: per adapter instance
3929  *
3930  * 1. Allocate DMA memory for Command Descriptor array
3931  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3932  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3933  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3934  *      (UTMRDL)
3935  * 4. Allocate memory for local reference block(lrb).
3936  *
3937  * Returns 0 for success, non-zero in case of failure
3938  */
3939 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3940 {
3941         size_t utmrdl_size, utrdl_size, ucdl_size;
3942
3943         /* Allocate memory for UTP command descriptors */
3944         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3945         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3946                                                   ucdl_size,
3947                                                   &hba->ucdl_dma_addr,
3948                                                   GFP_KERNEL);
3949
3950         /*
3951          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3952          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3953          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3954          * be aligned to 128 bytes as well
3955          */
3956         if (!hba->ucdl_base_addr ||
3957             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3958                 dev_err(hba->dev,
3959                         "Command Descriptor Memory allocation failed\n");
3960                 goto out;
3961         }
3962
3963         /*
3964          * Allocate memory for UTP Transfer descriptors
3965          * UFSHCI requires 1024 byte alignment of UTRD
3966          */
3967         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3968         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3969                                                    utrdl_size,
3970                                                    &hba->utrdl_dma_addr,
3971                                                    GFP_KERNEL);
3972         if (!hba->utrdl_base_addr ||
3973             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3974                 dev_err(hba->dev,
3975                         "Transfer Descriptor Memory allocation failed\n");
3976                 goto out;
3977         }
3978
3979         /*
3980          * Allocate memory for UTP Task Management descriptors
3981          * UFSHCI requires 1024 byte alignment of UTMRD
3982          */
3983         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3984         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3985                                                     utmrdl_size,
3986                                                     &hba->utmrdl_dma_addr,
3987                                                     GFP_KERNEL);
3988         if (!hba->utmrdl_base_addr ||
3989             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3990                 dev_err(hba->dev,
3991                 "Task Management Descriptor Memory allocation failed\n");
3992                 goto out;
3993         }
3994
3995         /* Allocate memory for local reference block */
3996         hba->lrb = devm_kzalloc(hba->dev,
3997                                 hba->nutrs * sizeof(struct ufshcd_lrb),
3998                                 GFP_KERNEL);
3999         if (!hba->lrb) {
4000                 dev_err(hba->dev, "LRB Memory allocation failed\n");
4001                 goto out;
4002         }
4003         return 0;
4004 out:
4005         return -ENOMEM;
4006 }
4007
4008 /**
4009  * ufshcd_host_memory_configure - configure local reference block with
4010  *                              memory offsets
4011  * @hba: per adapter instance
4012  *
4013  * Configure Host memory space
4014  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
4015  * address.
4016  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
4017  * and PRDT offset.
4018  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
4019  * into local reference block.
4020  */
4021 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
4022 {
4023         struct utp_transfer_cmd_desc *cmd_descp;
4024         struct utp_transfer_req_desc *utrdlp;
4025         dma_addr_t cmd_desc_dma_addr;
4026         dma_addr_t cmd_desc_element_addr;
4027         u16 response_offset;
4028         u16 prdt_offset;
4029         int cmd_desc_size;
4030         int i;
4031
4032         utrdlp = hba->utrdl_base_addr;
4033         cmd_descp = hba->ucdl_base_addr;
4034
4035         response_offset =
4036                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
4037         prdt_offset =
4038                 offsetof(struct utp_transfer_cmd_desc, prd_table);
4039
4040         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
4041         cmd_desc_dma_addr = hba->ucdl_dma_addr;
4042
4043         for (i = 0; i < hba->nutrs; i++) {
4044                 /* Configure UTRD with command descriptor base address */
4045                 cmd_desc_element_addr =
4046                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
4047                 utrdlp[i].command_desc_base_addr_lo =
4048                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
4049                 utrdlp[i].command_desc_base_addr_hi =
4050                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
4051
4052                 /* Response upiu and prdt offset should be in double words */
4053                 utrdlp[i].response_upiu_offset =
4054                                 cpu_to_le16((response_offset >> 2));
4055                 utrdlp[i].prd_table_offset =
4056                                 cpu_to_le16((prdt_offset >> 2));
4057                 utrdlp[i].response_upiu_length =
4058                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
4059
4060                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
4061                 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
4062                                 (i * sizeof(struct utp_transfer_req_desc));
4063                 hba->lrb[i].ucd_req_ptr =
4064                         (struct utp_upiu_req *)(cmd_descp + i);
4065                 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
4066                 hba->lrb[i].ucd_rsp_ptr =
4067                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
4068                 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
4069                                 response_offset;
4070                 hba->lrb[i].ucd_prdt_ptr =
4071                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
4072                 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
4073                                 prdt_offset;
4074         }
4075 }
4076
4077 /**
4078  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
4079  * @hba: per adapter instance
4080  *
4081  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
4082  * in order to initialize the Unipro link startup procedure.
4083  * Once the Unipro links are up, the device connected to the controller
4084  * is detected.
4085  *
4086  * Returns 0 on success, non-zero value on failure
4087  */
4088 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4089 {
4090         struct uic_command uic_cmd = {0};
4091         int ret;
4092
4093         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
4094
4095         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4096         if (ret)
4097                 dev_dbg(hba->dev,
4098                         "dme-link-startup: error code %d\n", ret);
4099         return ret;
4100 }
4101
4102 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4103 {
4104         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
4105         unsigned long min_sleep_time_us;
4106
4107         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4108                 return;
4109
4110         /*
4111          * last_dme_cmd_tstamp will be 0 only for 1st call to
4112          * this function
4113          */
4114         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4115                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4116         } else {
4117                 unsigned long delta =
4118                         (unsigned long) ktime_to_us(
4119                                 ktime_sub(ktime_get(),
4120                                 hba->last_dme_cmd_tstamp));
4121
4122                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4123                         min_sleep_time_us =
4124                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4125                 else
4126                         return; /* no more delay required */
4127         }
4128
4129         /* allow sleep for extra 50us if needed */
4130         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4131 }
4132
4133 static inline void ufshcd_save_tstamp_of_last_dme_cmd(
4134                         struct ufs_hba *hba)
4135 {
4136         if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
4137                 hba->last_dme_cmd_tstamp = ktime_get();
4138 }
4139
4140 /**
4141  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4142  * @hba: per adapter instance
4143  * @attr_sel: uic command argument1
4144  * @attr_set: attribute set type as uic command argument2
4145  * @mib_val: setting value as uic command argument3
4146  * @peer: indicate whether peer or local
4147  *
4148  * Returns 0 on success, non-zero value on failure
4149  */
4150 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4151                         u8 attr_set, u32 mib_val, u8 peer)
4152 {
4153         struct uic_command uic_cmd = {0};
4154         static const char *const action[] = {
4155                 "dme-set",
4156                 "dme-peer-set"
4157         };
4158         const char *set = action[!!peer];
4159         int ret;
4160         int retries = UFS_UIC_COMMAND_RETRIES;
4161
4162         ufsdbg_error_inject_dispatcher(hba,
4163                 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4164
4165         uic_cmd.command = peer ?
4166                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4167         uic_cmd.argument1 = attr_sel;
4168         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4169         uic_cmd.argument3 = mib_val;
4170
4171         do {
4172                 /* for peer attributes we retry upon failure */
4173                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4174                 if (ret)
4175                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4176                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4177         } while (ret && peer && --retries);
4178
4179         if (ret)
4180                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4181                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4182                         UFS_UIC_COMMAND_RETRIES - retries);
4183
4184         return ret;
4185 }
4186 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4187
4188 /**
4189  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4190  * @hba: per adapter instance
4191  * @attr_sel: uic command argument1
4192  * @mib_val: the value of the attribute as returned by the UIC command
4193  * @peer: indicate whether peer or local
4194  *
4195  * Returns 0 on success, non-zero value on failure
4196  */
4197 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4198                         u32 *mib_val, u8 peer)
4199 {
4200         struct uic_command uic_cmd = {0};
4201         static const char *const action[] = {
4202                 "dme-get",
4203                 "dme-peer-get"
4204         };
4205         const char *get = action[!!peer];
4206         int ret;
4207         int retries = UFS_UIC_COMMAND_RETRIES;
4208         struct ufs_pa_layer_attr orig_pwr_info;
4209         struct ufs_pa_layer_attr temp_pwr_info;
4210         bool pwr_mode_change = false;
4211
4212         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4213                 orig_pwr_info = hba->pwr_info;
4214                 temp_pwr_info = orig_pwr_info;
4215
4216                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4217                     orig_pwr_info.pwr_rx == FAST_MODE) {
4218                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4219                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4220                         pwr_mode_change = true;
4221                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4222                     orig_pwr_info.pwr_rx == SLOW_MODE) {
4223                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4224                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4225                         pwr_mode_change = true;
4226                 }
4227                 if (pwr_mode_change) {
4228                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4229                         if (ret)
4230                                 goto out;
4231                 }
4232         }
4233
4234         uic_cmd.command = peer ?
4235                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4236
4237         ufsdbg_error_inject_dispatcher(hba,
4238                 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4239
4240         uic_cmd.argument1 = attr_sel;
4241
4242         do {
4243                 /* for peer attributes we retry upon failure */
4244                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4245                 if (ret)
4246                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4247                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
4248         } while (ret && peer && --retries);
4249
4250         if (ret)
4251                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4252                         get, UIC_GET_ATTR_ID(attr_sel),
4253                         UFS_UIC_COMMAND_RETRIES - retries);
4254
4255         if (mib_val && !ret)
4256                 *mib_val = uic_cmd.argument3;
4257
4258         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4259             && pwr_mode_change)
4260                 ufshcd_change_power_mode(hba, &orig_pwr_info);
4261 out:
4262         return ret;
4263 }
4264 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4265
4266 /**
4267  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4268  * state) and waits for it to take effect.
4269  *
4270  * @hba: per adapter instance
4271  * @cmd: UIC command to execute
4272  *
4273  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4274  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4275  * and device UniPro link and hence it's final completion would be indicated by
4276  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4277  * addition to normal UIC command completion Status (UCCS). This function only
4278  * returns after the relevant status bits indicate the completion.
4279  *
4280  * Returns 0 on success, non-zero value on failure
4281  */
4282 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4283 {
4284         struct completion uic_async_done;
4285         unsigned long flags;
4286         u8 status;
4287         int ret;
4288         bool reenable_intr = false;
4289
4290         mutex_lock(&hba->uic_cmd_mutex);
4291         init_completion(&uic_async_done);
4292         ufshcd_add_delay_before_dme_cmd(hba);
4293
4294         spin_lock_irqsave(hba->host->host_lock, flags);
4295         hba->uic_async_done = &uic_async_done;
4296         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4297                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4298                 /*
4299                  * Make sure UIC command completion interrupt is disabled before
4300                  * issuing UIC command.
4301                  */
4302                 wmb();
4303                 reenable_intr = true;
4304         }
4305         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4306         spin_unlock_irqrestore(hba->host->host_lock, flags);
4307         if (ret) {
4308                 dev_err(hba->dev,
4309                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4310                         cmd->command, cmd->argument3, ret);
4311                 goto out;
4312         }
4313
4314         if (!wait_for_completion_timeout(hba->uic_async_done,
4315                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4316                 dev_err(hba->dev,
4317                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4318                         cmd->command, cmd->argument3);
4319                 ret = -ETIMEDOUT;
4320                 goto out;
4321         }
4322
4323         status = ufshcd_get_upmcrs(hba);
4324         if (status != PWR_LOCAL) {
4325                 dev_err(hba->dev,
4326                         "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
4327                         cmd->command, status);
4328                 ret = (status != PWR_OK) ? status : -1;
4329         }
4330         ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
4331
4332 out:
4333         if (ret) {
4334                 ufsdbg_set_err_state(hba);
4335                 ufshcd_print_host_state(hba);
4336                 ufshcd_print_pwr_info(hba);
4337                 ufshcd_print_host_regs(hba);
4338                 ufshcd_print_cmd_log(hba);
4339         }
4340
4341         ufshcd_save_tstamp_of_last_dme_cmd(hba);
4342         spin_lock_irqsave(hba->host->host_lock, flags);
4343         hba->active_uic_cmd = NULL;
4344         hba->uic_async_done = NULL;
4345         if (reenable_intr)
4346                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4347         spin_unlock_irqrestore(hba->host->host_lock, flags);
4348         mutex_unlock(&hba->uic_cmd_mutex);
4349         return ret;
4350 }
4351
4352 int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
4353 {
4354         unsigned long flags;
4355         int ret = 0;
4356         u32 tm_doorbell;
4357         u32 tr_doorbell;
4358         bool timeout = false, do_last_check = false;
4359         ktime_t start;
4360
4361         ufshcd_hold_all(hba);
4362         spin_lock_irqsave(hba->host->host_lock, flags);
4363         /*
4364          * Wait for all the outstanding tasks/transfer requests.
4365          * Verify by checking the doorbell registers are clear.
4366          */
4367         start = ktime_get();
4368         do {
4369                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
4370                         ret = -EBUSY;
4371                         goto out;
4372                 }
4373
4374                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
4375                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4376                 if (!tm_doorbell && !tr_doorbell) {
4377                         timeout = false;
4378                         break;
4379                 } else if (do_last_check) {
4380                         break;
4381                 }
4382
4383                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4384                 schedule();
4385                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
4386                     wait_timeout_us) {
4387                         timeout = true;
4388                         /*
4389                          * We might have scheduled out for long time so make
4390                          * sure to check if doorbells are cleared by this time
4391                          * or not.
4392                          */
4393                         do_last_check = true;
4394                 }
4395                 spin_lock_irqsave(hba->host->host_lock, flags);
4396         } while (tm_doorbell || tr_doorbell);
4397
4398         if (timeout) {
4399                 dev_err(hba->dev,
4400                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
4401                         __func__, tm_doorbell, tr_doorbell);
4402                 ret = -EBUSY;
4403         }
4404 out:
4405         spin_unlock_irqrestore(hba->host->host_lock, flags);
4406         ufshcd_release_all(hba);
4407         return ret;
4408 }
4409
4410 /**
4411  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4412  *                              using DME_SET primitives.
4413  * @hba: per adapter instance
4414  * @mode: powr mode value
4415  *
4416  * Returns 0 on success, non-zero value on failure
4417  */
4418 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4419 {
4420         struct uic_command uic_cmd = {0};
4421         int ret;
4422
4423         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4424                 ret = ufshcd_dme_set(hba,
4425                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4426                 if (ret) {
4427                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4428                                                 __func__, ret);
4429                         goto out;
4430                 }
4431         }
4432
4433         uic_cmd.command = UIC_CMD_DME_SET;
4434         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4435         uic_cmd.argument3 = mode;
4436         hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
4437         ufshcd_hold_all(hba);
4438         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4439         hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
4440         ufshcd_release_all(hba);
4441 out:
4442         return ret;
4443 }
4444
4445 static int ufshcd_link_recovery(struct ufs_hba *hba)
4446 {
4447         int ret = 0;
4448         unsigned long flags;
4449
4450         /*
4451          * Check if there is any race with fatal error handling.
4452          * If so, wait for it to complete. Even though fatal error
4453          * handling does reset and restore in some cases, don't assume
4454          * anything out of it. We are just avoiding race here.
4455          */
4456         do {
4457                 spin_lock_irqsave(hba->host->host_lock, flags);
4458                 if (!(work_pending(&hba->eh_work) ||
4459                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
4460                         break;
4461                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4462                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4463                 flush_work(&hba->eh_work);
4464         } while (1);
4465
4466
4467         /*
4468          * we don't know if previous reset had really reset the host controller
4469          * or not. So let's force reset here to be sure.
4470          */
4471         hba->ufshcd_state = UFSHCD_STATE_ERROR;
4472         hba->force_host_reset = true;
4473         schedule_work(&hba->eh_work);
4474
4475         /* wait for the reset work to finish */
4476         do {
4477                 if (!(work_pending(&hba->eh_work) ||
4478                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
4479                         break;
4480                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4481                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4482                 flush_work(&hba->eh_work);
4483                 spin_lock_irqsave(hba->host->host_lock, flags);
4484         } while (1);
4485
4486         if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
4487               ufshcd_is_link_active(hba)))
4488                 ret = -ENOLINK;
4489         spin_unlock_irqrestore(hba->host->host_lock, flags);
4490
4491         return ret;
4492 }
4493
4494 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4495 {
4496         int ret;
4497         struct uic_command uic_cmd = {0};
4498         ktime_t start = ktime_get();
4499
4500         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4501         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4502         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4503                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4504
4505         /*
4506          * Do full reinit if enter failed or if LINERESET was detected during
4507          * Hibern8 operation. After LINERESET, link moves to default PWM-G1
4508          * mode hence full reinit is required to move link to HS speeds.
4509          */
4510         if (ret || hba->full_init_linereset) {
4511                 int err;
4512
4513                 hba->full_init_linereset = false;
4514                 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
4515                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
4516                         __func__, ret);
4517                 /*
4518                  * If link recovery fails then return error code (-ENOLINK)
4519                  * returned ufshcd_link_recovery().
4520                  * If link recovery succeeds then return -EAGAIN to attempt
4521                  * hibern8 enter retry again.
4522                  */
4523                 err = ufshcd_link_recovery(hba);
4524                 if (err) {
4525                         dev_err(hba->dev, "%s: link recovery failed", __func__);
4526                         ret = err;
4527                 } else {
4528                         ret = -EAGAIN;
4529                 }
4530         } else {
4531                 dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
4532                         ktime_to_us(ktime_get()));
4533         }
4534
4535         return ret;
4536 }
4537
4538 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4539 {
4540         int ret = 0, retries;
4541
4542         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
4543                 ret = __ufshcd_uic_hibern8_enter(hba);
4544                 if (!ret)
4545                         goto out;
4546                 else if (ret != -EAGAIN)
4547                         /* Unable to recover the link, so no point proceeding */
4548                         BUG();
4549         }
4550 out:
4551         return ret;
4552 }
4553
4554 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4555 {
4556         struct uic_command uic_cmd = {0};
4557         int ret;
4558         ktime_t start = ktime_get();
4559
4560         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4561         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4562         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4563                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4564
4565         /* Do full reinit if exit failed */
4566         if (ret) {
4567                 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
4568                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
4569                         __func__, ret);
4570                 ret = ufshcd_link_recovery(hba);
4571                 /* Unable to recover the link, so no point proceeding */
4572                 if (ret)
4573                         BUG();
4574         } else {
4575                 dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
4576                         ktime_to_us(ktime_get()));
4577                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4578                 hba->ufs_stats.hibern8_exit_cnt++;
4579         }
4580
4581         return ret;
4582 }
4583
4584  /**
4585  * ufshcd_init_pwr_info - setting the POR (power on reset)
4586  * values in hba power info
4587  * @hba: per-adapter instance
4588  */
4589 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4590 {
4591         hba->pwr_info.gear_rx = UFS_PWM_G1;
4592         hba->pwr_info.gear_tx = UFS_PWM_G1;
4593         hba->pwr_info.lane_rx = 1;
4594         hba->pwr_info.lane_tx = 1;
4595         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4596         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4597         hba->pwr_info.hs_rate = 0;
4598 }
4599
4600 /**
4601  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4602  * @hba: per-adapter instance
4603  */
4604 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4605 {
4606         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4607
4608         if (hba->max_pwr_info.is_valid)
4609                 return 0;
4610
4611         pwr_info->pwr_tx = FAST_MODE;
4612         pwr_info->pwr_rx = FAST_MODE;
4613         pwr_info->hs_rate = PA_HS_MODE_B;
4614
4615         /* Get the connected lane count */
4616         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4617                         &pwr_info->lane_rx);
4618         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4619                         &pwr_info->lane_tx);
4620
4621         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4622                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4623                                 __func__,
4624                                 pwr_info->lane_rx,
4625                                 pwr_info->lane_tx);
4626                 return -EINVAL;
4627         }
4628
4629         /*
4630          * First, get the maximum gears of HS speed.
4631          * If a zero value, it means there is no HSGEAR capability.
4632          * Then, get the maximum gears of PWM speed.
4633          */
4634         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4635         if (!pwr_info->gear_rx) {
4636                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4637                                 &pwr_info->gear_rx);
4638                 if (!pwr_info->gear_rx) {
4639                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4640                                 __func__, pwr_info->gear_rx);
4641                         return -EINVAL;
4642                 }
4643                 pwr_info->pwr_rx = SLOW_MODE;
4644         }
4645
4646         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4647                         &pwr_info->gear_tx);
4648         if (!pwr_info->gear_tx) {
4649                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4650                                 &pwr_info->gear_tx);
4651                 if (!pwr_info->gear_tx) {
4652                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4653                                 __func__, pwr_info->gear_tx);
4654                         return -EINVAL;
4655                 }
4656                 pwr_info->pwr_tx = SLOW_MODE;
4657         }
4658
4659         hba->max_pwr_info.is_valid = true;
4660         return 0;
4661 }
4662
4663 int ufshcd_change_power_mode(struct ufs_hba *hba,
4664                              struct ufs_pa_layer_attr *pwr_mode)
4665 {
4666         int ret = 0;
4667
4668         /* if already configured to the requested pwr_mode */
4669         if (!hba->restore_needed &&
4670                 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4671                 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4672             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4673             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4674             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4675             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4676             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4677                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4678                 return 0;
4679         }
4680
4681         ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
4682         if (ret)
4683                 return ret;
4684
4685         /*
4686          * Configure attributes for power mode change with below.
4687          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4688          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4689          * - PA_HSSERIES
4690          */
4691         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4692         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4693                         pwr_mode->lane_rx);
4694         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4695                         pwr_mode->pwr_rx == FAST_MODE)
4696                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4697         else
4698                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4699
4700         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4701         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4702                         pwr_mode->lane_tx);
4703         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4704                         pwr_mode->pwr_tx == FAST_MODE)
4705                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4706         else
4707                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4708
4709         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4710             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4711             pwr_mode->pwr_rx == FAST_MODE ||
4712             pwr_mode->pwr_tx == FAST_MODE)
4713                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4714                                                 pwr_mode->hs_rate);
4715
4716         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4717                         DL_FC0ProtectionTimeOutVal_Default);
4718         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4719                         DL_TC0ReplayTimeOutVal_Default);
4720         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4721                         DL_AFC0ReqTimeOutVal_Default);
4722
4723         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4724                         DL_FC0ProtectionTimeOutVal_Default);
4725         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4726                         DL_TC0ReplayTimeOutVal_Default);
4727         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4728                         DL_AFC0ReqTimeOutVal_Default);
4729
4730         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4731                         | pwr_mode->pwr_tx);
4732
4733         if (ret) {
4734                 ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
4735                 dev_err(hba->dev,
4736                         "%s: power mode change failed %d\n", __func__, ret);
4737         } else {
4738                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4739                                                                 pwr_mode);
4740
4741                 memcpy(&hba->pwr_info, pwr_mode,
4742                         sizeof(struct ufs_pa_layer_attr));
4743                 hba->ufs_stats.power_mode_change_cnt++;
4744         }
4745
4746         return ret;
4747 }
4748
4749 /**
4750  * ufshcd_config_pwr_mode - configure a new power mode
4751  * @hba: per-adapter instance
4752  * @desired_pwr_mode: desired power configuration
4753  */
4754 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4755                 struct ufs_pa_layer_attr *desired_pwr_mode)
4756 {
4757         struct ufs_pa_layer_attr final_params = { 0 };
4758         int ret;
4759
4760         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4761                                         desired_pwr_mode, &final_params);
4762
4763         if (ret)
4764                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4765
4766         ret = ufshcd_change_power_mode(hba, &final_params);
4767         if (!ret)
4768                 ufshcd_print_pwr_info(hba);
4769
4770         return ret;
4771 }
4772
4773 /**
4774  * ufshcd_complete_dev_init() - checks device readiness
4775  * hba: per-adapter instance
4776  *
4777  * Set fDeviceInit flag and poll until device toggles it.
4778  */
4779 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4780 {
4781         int i;
4782         int err;
4783         bool flag_res = 1;
4784
4785         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4786                 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4787         if (err) {
4788                 dev_err(hba->dev,
4789                         "%s setting fDeviceInit flag failed with error %d\n",
4790                         __func__, err);
4791                 goto out;
4792         }
4793
4794         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4795         for (i = 0; i < 1000 && !err && flag_res; i++)
4796                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4797                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4798
4799         if (err)
4800                 dev_err(hba->dev,
4801                         "%s reading fDeviceInit flag failed with error %d\n",
4802                         __func__, err);
4803         else if (flag_res)
4804                 dev_err(hba->dev,
4805                         "%s fDeviceInit was not cleared by the device\n",
4806                         __func__);
4807
4808 out:
4809         return err;
4810 }
4811
4812 /**
4813  * ufshcd_make_hba_operational - Make UFS controller operational
4814  * @hba: per adapter instance
4815  *
4816  * To bring UFS host controller to operational state,
4817  * 1. Enable required interrupts
4818  * 2. Configure interrupt aggregation
4819  * 3. Program UTRL and UTMRL base address
4820  * 4. Configure run-stop-registers
4821  *
4822  * Returns 0 on success, non-zero value on failure
4823  */
4824 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4825 {
4826         int err = 0;
4827         u32 reg;
4828
4829         /* Enable required interrupts */
4830         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4831
4832         /* Configure interrupt aggregation */
4833         if (ufshcd_is_intr_aggr_allowed(hba))
4834                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4835         else
4836                 ufshcd_disable_intr_aggr(hba);
4837
4838         /* Configure UTRL and UTMRL base address registers */
4839         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4840                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4841         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4842                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4843         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4844                         REG_UTP_TASK_REQ_LIST_BASE_L);
4845         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4846                         REG_UTP_TASK_REQ_LIST_BASE_H);
4847
4848         /*
4849          * Make sure base address and interrupt setup are updated before
4850          * enabling the run/stop registers below.
4851          */
4852         wmb();
4853
4854         /*
4855          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4856          */
4857         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4858         if (!(ufshcd_get_lists_status(reg))) {
4859                 ufshcd_enable_run_stop_reg(hba);
4860         } else {
4861                 dev_err(hba->dev,
4862                         "Host controller not ready to process requests");
4863                 err = -EIO;
4864                 goto out;
4865         }
4866
4867 out:
4868         return err;
4869 }
4870
4871 /**
4872  * ufshcd_hba_stop - Send controller to reset state
4873  * @hba: per adapter instance
4874  * @can_sleep: perform sleep or just spin
4875  */
4876 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4877 {
4878         int err;
4879
4880         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4881         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4882                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4883                                         10, 1, can_sleep);
4884         if (err)
4885                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4886 }
4887
4888 /**
4889  * ufshcd_hba_enable - initialize the controller
4890  * @hba: per adapter instance
4891  *
4892  * The controller resets itself and controller firmware initialization
4893  * sequence kicks off. When controller is ready it will set
4894  * the Host Controller Enable bit to 1.
4895  *
4896  * Returns 0 on success, non-zero value on failure
4897  */
4898 static int ufshcd_hba_enable(struct ufs_hba *hba)
4899 {
4900         int retry;
4901
4902         /*
4903          * msleep of 1 and 5 used in this function might result in msleep(20),
4904          * but it was necessary to send the UFS FPGA to reset mode during
4905          * development and testing of this driver. msleep can be changed to
4906          * mdelay and retry count can be reduced based on the controller.
4907          */
4908         if (!ufshcd_is_hba_active(hba))
4909                 /* change controller state to "reset state" */
4910                 ufshcd_hba_stop(hba, true);
4911
4912         /* UniPro link is disabled at this point */
4913         ufshcd_set_link_off(hba);
4914
4915         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4916
4917         /* start controller initialization sequence */
4918         ufshcd_hba_start(hba);
4919
4920         /*
4921          * To initialize a UFS host controller HCE bit must be set to 1.
4922          * During initialization the HCE bit value changes from 1->0->1.
4923          * When the host controller completes initialization sequence
4924          * it sets the value of HCE bit to 1. The same HCE bit is read back
4925          * to check if the controller has completed initialization sequence.
4926          * So without this delay the value HCE = 1, set in the previous
4927          * instruction might be read back.
4928          * This delay can be changed based on the controller.
4929          */
4930         msleep(1);
4931
4932         /* wait for the host controller to complete initialization */
4933         retry = 10;
4934         while (ufshcd_is_hba_active(hba)) {
4935                 if (retry) {
4936                         retry--;
4937                 } else {
4938                         dev_err(hba->dev,
4939                                 "Controller enable failed\n");
4940                         return -EIO;
4941                 }
4942                 msleep(5);
4943         }
4944
4945         /* enable UIC related interrupts */
4946         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4947
4948         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4949
4950         return 0;
4951 }
4952
4953 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4954 {
4955         int tx_lanes, i, err = 0;
4956
4957         if (!peer)
4958                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4959                                &tx_lanes);
4960         else
4961                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4962                                     &tx_lanes);
4963         for (i = 0; i < tx_lanes; i++) {
4964                 if (!peer)
4965                         err = ufshcd_dme_set(hba,
4966                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4967                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4968                                         0);
4969                 else
4970                         err = ufshcd_dme_peer_set(hba,
4971                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4972                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4973                                         0);
4974                 if (err) {
4975                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4976                                 __func__, peer, i, err);
4977                         break;
4978                 }
4979         }
4980
4981         return err;
4982 }
4983
4984 static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
4985 {
4986         return ufshcd_disable_tx_lcc(hba, false);
4987 }
4988
4989 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4990 {
4991         return ufshcd_disable_tx_lcc(hba, true);
4992 }
4993
4994 /**
4995  * ufshcd_link_startup - Initialize unipro link startup
4996  * @hba: per adapter instance
4997  *
4998  * Returns 0 for success, non-zero in case of failure
4999  */
5000 static int ufshcd_link_startup(struct ufs_hba *hba)
5001 {
5002         int ret;
5003         int retries = DME_LINKSTARTUP_RETRIES;
5004         bool link_startup_again = false;
5005
5006         /*
5007          * If UFS device isn't active then we will have to issue link startup
5008          * 2 times to make sure the device state move to active.
5009          */
5010         if (!ufshcd_is_ufs_dev_active(hba))
5011                 link_startup_again = true;
5012
5013 link_startup:
5014         do {
5015                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
5016
5017                 ret = ufshcd_dme_link_startup(hba);
5018                 if (ret)
5019                         ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
5020
5021                 /* check if device is detected by inter-connect layer */
5022                 if (!ret && !ufshcd_is_device_present(hba)) {
5023                         ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
5024                         dev_err(hba->dev, "%s: Device not present\n", __func__);
5025                         ret = -ENXIO;
5026                         goto out;
5027                 }
5028
5029                 /*
5030                  * DME link lost indication is only received when link is up,
5031                  * but we can't be sure if the link is up until link startup
5032                  * succeeds. So reset the local Uni-Pro and try again.
5033                  */
5034                 if (ret && ufshcd_hba_enable(hba))
5035                         goto out;
5036         } while (ret && retries--);
5037
5038         if (ret)
5039                 /* failed to get the link up... retire */
5040                 goto out;
5041
5042         if (link_startup_again) {
5043                 link_startup_again = false;
5044                 retries = DME_LINKSTARTUP_RETRIES;
5045                 goto link_startup;
5046         }
5047
5048         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5049         ufshcd_init_pwr_info(hba);
5050         ufshcd_print_pwr_info(hba);
5051
5052         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5053                 ret = ufshcd_disable_device_tx_lcc(hba);
5054                 if (ret)
5055                         goto out;
5056         }
5057
5058         if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
5059                 ret = ufshcd_disable_host_tx_lcc(hba);
5060                 if (ret)
5061                         goto out;
5062         }
5063
5064         /* Include any host controller configuration via UIC commands */
5065         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5066         if (ret)
5067                 goto out;
5068
5069         ret = ufshcd_make_hba_operational(hba);
5070 out:
5071         if (ret) {
5072                 dev_err(hba->dev, "link startup failed %d\n", ret);
5073                 ufshcd_print_host_state(hba);
5074                 ufshcd_print_pwr_info(hba);
5075                 ufshcd_print_host_regs(hba);
5076         }
5077         return ret;
5078 }
5079
5080 /**
5081  * ufshcd_verify_dev_init() - Verify device initialization
5082  * @hba: per-adapter instance
5083  *
5084  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5085  * device Transport Protocol (UTP) layer is ready after a reset.
5086  * If the UTP layer at the device side is not initialized, it may
5087  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5088  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5089  */
5090 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5091 {
5092         int err = 0;
5093         int retries;
5094
5095         ufshcd_hold_all(hba);
5096         mutex_lock(&hba->dev_cmd.lock);
5097         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5098                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5099                                                NOP_OUT_TIMEOUT);
5100
5101                 if (!err || err == -ETIMEDOUT)
5102                         break;
5103
5104                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5105         }
5106         mutex_unlock(&hba->dev_cmd.lock);
5107         ufshcd_release_all(hba);
5108
5109         if (err)
5110                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5111         return err;
5112 }
5113
5114 /**
5115  * ufshcd_set_queue_depth - set lun queue depth
5116  * @sdev: pointer to SCSI device
5117  *
5118  * Read bLUQueueDepth value and activate scsi tagged command
5119  * queueing. For WLUN, queue depth is set to 1. For best-effort
5120  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
5121  * value that host can queue.
5122  */
5123 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
5124 {
5125         int ret = 0;
5126         u8 lun_qdepth;
5127         struct ufs_hba *hba;
5128
5129         hba = shost_priv(sdev->host);
5130
5131         lun_qdepth = hba->nutrs;
5132         ret = ufshcd_read_unit_desc_param(hba,
5133                           ufshcd_scsi_to_upiu_lun(sdev->lun),
5134                           UNIT_DESC_PARAM_LU_Q_DEPTH,
5135                           &lun_qdepth,
5136                           sizeof(lun_qdepth));
5137
5138         /* Some WLUN doesn't support unit descriptor */
5139         if (ret == -EOPNOTSUPP)
5140                 lun_qdepth = 1;
5141         else if (!lun_qdepth)
5142                 /* eventually, we can figure out the real queue depth */
5143                 lun_qdepth = hba->nutrs;
5144         else
5145                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
5146
5147         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
5148                         __func__, lun_qdepth);
5149         scsi_change_queue_depth(sdev, lun_qdepth);
5150 }
5151
5152 /*
5153  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
5154  * @hba: per-adapter instance
5155  * @lun: UFS device lun id
5156  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
5157  *
5158  * Returns 0 in case of success and b_lu_write_protect status would be returned
5159  * @b_lu_write_protect parameter.
5160  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
5161  * Returns -EINVAL in case of invalid parameters passed to this function.
5162  */
5163 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
5164                             u8 lun,
5165                             u8 *b_lu_write_protect)
5166 {
5167         int ret;
5168
5169         if (!b_lu_write_protect)
5170                 ret = -EINVAL;
5171         /*
5172          * According to UFS device spec, RPMB LU can't be write
5173          * protected so skip reading bLUWriteProtect parameter for
5174          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
5175          */
5176         else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
5177                 ret = -ENOTSUPP;
5178         else
5179                 ret = ufshcd_read_unit_desc_param(hba,
5180                                           lun,
5181                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
5182                                           b_lu_write_protect,
5183                                           sizeof(*b_lu_write_protect));
5184         return ret;
5185 }
5186
5187 /**
5188  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
5189  * status
5190  * @hba: per-adapter instance
5191  * @sdev: pointer to SCSI device
5192  *
5193  */
5194 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
5195                                                     struct scsi_device *sdev)
5196 {
5197         if (hba->dev_info.f_power_on_wp_en &&
5198             !hba->dev_info.is_lu_power_on_wp) {
5199                 u8 b_lu_write_protect;
5200
5201                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
5202                                       &b_lu_write_protect) &&
5203                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
5204                         hba->dev_info.is_lu_power_on_wp = true;
5205         }
5206 }
5207
5208 /**
5209  * ufshcd_slave_alloc - handle initial SCSI device configurations
5210  * @sdev: pointer to SCSI device
5211  *
5212  * Returns success
5213  */
5214 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5215 {
5216         struct ufs_hba *hba;
5217
5218         hba = shost_priv(sdev->host);
5219
5220         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5221         sdev->use_10_for_ms = 1;
5222
5223         /* allow SCSI layer to restart the device in case of errors */
5224         sdev->allow_restart = 1;
5225
5226         /* REPORT SUPPORTED OPERATION CODES is not supported */
5227         sdev->no_report_opcodes = 1;
5228
5229         /* WRITE_SAME command is not supported */
5230         sdev->no_write_same = 1;
5231
5232         ufshcd_set_queue_depth(sdev);
5233
5234         ufshcd_get_lu_power_on_wp_status(hba, sdev);
5235
5236         return 0;
5237 }
5238
5239 /**
5240  * ufshcd_change_queue_depth - change queue depth
5241  * @sdev: pointer to SCSI device
5242  * @depth: required depth to set
5243  *
5244  * Change queue depth and make sure the max. limits are not crossed.
5245  */
5246 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5247 {
5248         struct ufs_hba *hba = shost_priv(sdev->host);
5249
5250         if (depth > hba->nutrs)
5251                 depth = hba->nutrs;
5252         return scsi_change_queue_depth(sdev, depth);
5253 }
5254
5255 /**
5256  * ufshcd_slave_configure - adjust SCSI device configurations
5257  * @sdev: pointer to SCSI device
5258  */
5259 static int ufshcd_slave_configure(struct scsi_device *sdev)
5260 {
5261         struct request_queue *q = sdev->request_queue;
5262
5263         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5264         blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
5265
5266         sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
5267         sdev->use_rpm_auto = 1;
5268
5269         return 0;
5270 }
5271
5272 /**
5273  * ufshcd_slave_destroy - remove SCSI device configurations
5274  * @sdev: pointer to SCSI device
5275  */
5276 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5277 {
5278         struct ufs_hba *hba;
5279
5280         hba = shost_priv(sdev->host);
5281         /* Drop the reference as it won't be needed anymore */
5282         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5283                 unsigned long flags;
5284
5285                 spin_lock_irqsave(hba->host->host_lock, flags);
5286                 hba->sdev_ufs_device = NULL;
5287                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5288         }
5289 }
5290
5291 /**
5292  * ufshcd_task_req_compl - handle task management request completion
5293  * @hba: per adapter instance
5294  * @index: index of the completed request
5295  * @resp: task management service response
5296  *
5297  * Returns non-zero value on error, zero on success
5298  */
5299 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
5300 {
5301         struct utp_task_req_desc *task_req_descp;
5302         struct utp_upiu_task_rsp *task_rsp_upiup;
5303         unsigned long flags;
5304         int ocs_value;
5305         int task_result;
5306
5307         spin_lock_irqsave(hba->host->host_lock, flags);
5308
5309         /* Clear completed tasks from outstanding_tasks */
5310         __clear_bit(index, &hba->outstanding_tasks);
5311
5312         task_req_descp = hba->utmrdl_base_addr;
5313         ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
5314
5315         if (ocs_value == OCS_SUCCESS) {
5316                 task_rsp_upiup = (struct utp_upiu_task_rsp *)
5317                                 task_req_descp[index].task_rsp_upiu;
5318                 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
5319                 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
5320                 if (resp)
5321                         *resp = (u8)task_result;
5322         } else {
5323                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5324                                 __func__, ocs_value);
5325         }
5326         spin_unlock_irqrestore(hba->host->host_lock, flags);
5327
5328         return ocs_value;
5329 }
5330
5331 /**
5332  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5333  * @lrb: pointer to local reference block of completed command
5334  * @scsi_status: SCSI command status
5335  *
5336  * Returns value base on SCSI command status
5337  */
5338 static inline int
5339 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5340 {
5341         int result = 0;
5342
5343         switch (scsi_status) {
5344         case SAM_STAT_CHECK_CONDITION:
5345                 ufshcd_copy_sense_data(lrbp);
5346         case SAM_STAT_GOOD:
5347                 result |= DID_OK << 16 |
5348                           COMMAND_COMPLETE << 8 |
5349                           scsi_status;
5350                 break;
5351         case SAM_STAT_TASK_SET_FULL:
5352         case SAM_STAT_BUSY:
5353         case SAM_STAT_TASK_ABORTED:
5354                 ufshcd_copy_sense_data(lrbp);
5355                 result |= scsi_status;
5356                 break;
5357         default:
5358                 result |= DID_ERROR << 16;
5359                 break;
5360         } /* end of switch */
5361
5362         return result;
5363 }
5364
5365 /**
5366  * ufshcd_transfer_rsp_status - Get overall status of the response
5367  * @hba: per adapter instance
5368  * @lrb: pointer to local reference block of completed command
5369  *
5370  * Returns result of the command to notify SCSI midlayer
5371  */
5372 static inline int
5373 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5374 {
5375         int result = 0;
5376         int scsi_status;
5377         int ocs;
5378         bool print_prdt;
5379
5380         /* overall command status of utrd */
5381         ocs = ufshcd_get_tr_ocs(lrbp);
5382
5383         switch (ocs) {
5384         case OCS_SUCCESS:
5385                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5386                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5387                 switch (result) {
5388                 case UPIU_TRANSACTION_RESPONSE:
5389                         /*
5390                          * get the response UPIU result to extract
5391                          * the SCSI command status
5392                          */
5393                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5394
5395                         /*
5396                          * get the result based on SCSI status response
5397                          * to notify the SCSI midlayer of the command status
5398                          */
5399                         scsi_status = result & MASK_SCSI_STATUS;
5400                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5401
5402                         /*
5403                          * Currently we are only supporting BKOPs exception
5404                          * events hence we can ignore BKOPs exception event
5405                          * during power management callbacks. BKOPs exception
5406                          * event is not expected to be raised in runtime suspend
5407                          * callback as it allows the urgent bkops.
5408                          * During system suspend, we are anyway forcefully
5409                          * disabling the bkops and if urgent bkops is needed
5410                          * it will be enabled on system resume. Long term
5411                          * solution could be to abort the system suspend if
5412                          * UFS device needs urgent BKOPs.
5413                          */
5414                         if (!hba->pm_op_in_progress &&
5415                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
5416                                 /*
5417                                  * Prevent suspend once eeh_work is scheduled
5418                                  * to avoid deadlock between ufshcd_suspend
5419                                  * and exception event handler.
5420                                  */
5421                                 if (schedule_work(&hba->eeh_work))
5422                                         pm_runtime_get_noresume(hba->dev);
5423                         }
5424                         break;
5425                 case UPIU_TRANSACTION_REJECT_UPIU:
5426                         /* TODO: handle Reject UPIU Response */
5427                         result = DID_ERROR << 16;
5428                         dev_err(hba->dev,
5429                                 "Reject UPIU not fully implemented\n");
5430                         break;
5431                 default:
5432                         result = DID_ERROR << 16;
5433                         dev_err(hba->dev,
5434                                 "Unexpected request response code = %x\n",
5435                                 result);
5436                         break;
5437                 }
5438                 break;
5439         case OCS_ABORTED:
5440                 result |= DID_ABORT << 16;
5441                 break;
5442         case OCS_INVALID_COMMAND_STATUS:
5443                 result |= DID_REQUEUE << 16;
5444                 break;
5445         case OCS_INVALID_CMD_TABLE_ATTR:
5446         case OCS_INVALID_PRDT_ATTR:
5447         case OCS_MISMATCH_DATA_BUF_SIZE:
5448         case OCS_MISMATCH_RESP_UPIU_SIZE:
5449         case OCS_PEER_COMM_FAILURE:
5450         case OCS_FATAL_ERROR:
5451         case OCS_DEVICE_FATAL_ERROR:
5452         case OCS_INVALID_CRYPTO_CONFIG:
5453         case OCS_GENERAL_CRYPTO_ERROR:
5454         default:
5455                 result |= DID_ERROR << 16;
5456                 dev_err(hba->dev,
5457                                 "OCS error from controller = %x for tag %d\n",
5458                                 ocs, lrbp->task_tag);
5459                 /*
5460                  * This is called in interrupt context, hence avoid sleep
5461                  * while printing debug registers. Also print only the minimum
5462                  * debug registers needed to debug OCS failure.
5463                  */
5464                 __ufshcd_print_host_regs(hba, true);
5465                 ufshcd_print_host_state(hba);
5466                 break;
5467         } /* end of switch */
5468
5469         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
5470                 print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
5471                         ocs == OCS_MISMATCH_DATA_BUF_SIZE);
5472                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
5473         }
5474
5475         if ((host_byte(result) == DID_ERROR) ||
5476             (host_byte(result) == DID_ABORT))
5477                 ufsdbg_set_err_state(hba);
5478
5479         return result;
5480 }
5481
5482 /**
5483  * ufshcd_uic_cmd_compl - handle completion of uic command
5484  * @hba: per adapter instance
5485  * @intr_status: interrupt status generated by the controller
5486  *
5487  * Returns
5488  *  IRQ_HANDLED - If interrupt is valid
5489  *  IRQ_NONE    - If invalid interrupt
5490  */
5491 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5492 {
5493         irqreturn_t retval = IRQ_NONE;
5494
5495         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5496                 hba->active_uic_cmd->argument2 |=
5497                         ufshcd_get_uic_cmd_result(hba);
5498                 hba->active_uic_cmd->argument3 =
5499                         ufshcd_get_dme_attr_val(hba);
5500                 complete(&hba->active_uic_cmd->done);
5501                 retval = IRQ_HANDLED;
5502         }
5503
5504         if (intr_status & UFSHCD_UIC_PWR_MASK) {
5505                 if (hba->uic_async_done) {
5506                         complete(hba->uic_async_done);
5507                         retval = IRQ_HANDLED;
5508                 } else if (ufshcd_is_auto_hibern8_supported(hba)) {
5509                         /*
5510                          * If uic_async_done flag is not set then this
5511                          * is an Auto hibern8 err interrupt.
5512                          * Perform a host reset followed by a full
5513                          * link recovery.
5514                          */
5515                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
5516                         hba->force_host_reset = true;
5517                         dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5518                                 __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
5519                                 "Enter" : "Exit",
5520                                 intr_status, ufshcd_get_upmcrs(hba));
5521                         __ufshcd_print_host_regs(hba, true);
5522                         ufshcd_print_host_state(hba);
5523                         schedule_work(&hba->eh_work);
5524                         retval = IRQ_HANDLED;
5525                 }
5526         }
5527         return retval;
5528 }
5529
5530 /**
5531  * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
5532  * @hba: per adapter instance
5533  * @result: error result to inform scsi layer about
5534  */
5535 void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
5536 {
5537         u8 index;
5538         struct ufshcd_lrb *lrbp;
5539         struct scsi_cmnd *cmd;
5540
5541         if (!hba->outstanding_reqs)
5542                 return;
5543
5544         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5545                 lrbp = &hba->lrb[index];
5546                 cmd = lrbp->cmd;
5547                 if (cmd) {
5548                         ufshcd_cond_add_cmd_trace(hba, index, "failed");
5549                         ufshcd_update_error_stats(hba,
5550                                         UFS_ERR_INT_FATAL_ERRORS);
5551                         scsi_dma_unmap(cmd);
5552                         cmd->result = result;
5553                         /* Clear pending transfer requests */
5554                         ufshcd_clear_cmd(hba, index);
5555                         ufshcd_outstanding_req_clear(hba, index);
5556                         clear_bit_unlock(index, &hba->lrb_in_use);
5557                         lrbp->complete_time_stamp = ktime_get();
5558                         update_req_stats(hba, lrbp);
5559                         /* Mark completed command as NULL in LRB */
5560                         lrbp->cmd = NULL;
5561                         ufshcd_release_all(hba);
5562                         if (cmd->request) {
5563                                 /*
5564                                  * As we are accessing the "request" structure,
5565                                  * this must be called before calling
5566                                  * ->scsi_done() callback.
5567                                  */
5568                                 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5569                                         true);
5570                                 ufshcd_vops_crypto_engine_cfg_end(hba,
5571                                                 lrbp, cmd->request);
5572                         }
5573                         /* Do not touch lrbp after scsi done */
5574                         cmd->scsi_done(cmd);
5575                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5576                         if (hba->dev_cmd.complete) {
5577                                 ufshcd_cond_add_cmd_trace(hba, index,
5578                                                         "dev_failed");
5579                                 ufshcd_outstanding_req_clear(hba, index);
5580                                 complete(hba->dev_cmd.complete);
5581                         }
5582                 }
5583                 if (ufshcd_is_clkscaling_supported(hba))
5584                         hba->clk_scaling.active_reqs--;
5585         }
5586 }
5587
5588 /**
5589  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5590  * @hba: per adapter instance
5591  * @completed_reqs: requests to complete
5592  */
5593 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5594                                         unsigned long completed_reqs)
5595 {
5596         struct ufshcd_lrb *lrbp;
5597         struct scsi_cmnd *cmd;
5598         int result;
5599         int index;
5600         struct request *req;
5601
5602         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5603                 lrbp = &hba->lrb[index];
5604                 cmd = lrbp->cmd;
5605                 if (cmd) {
5606                         ufshcd_cond_add_cmd_trace(hba, index, "complete");
5607                         ufshcd_update_tag_stats_completion(hba, cmd);
5608                         result = ufshcd_transfer_rsp_status(hba, lrbp);
5609                         scsi_dma_unmap(cmd);
5610                         cmd->result = result;
5611                         clear_bit_unlock(index, &hba->lrb_in_use);
5612                         lrbp->complete_time_stamp = ktime_get();
5613                         update_req_stats(hba, lrbp);
5614                         /* Mark completed command as NULL in LRB */
5615                         lrbp->cmd = NULL;
5616                         hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
5617                         __ufshcd_release(hba, false);
5618                         __ufshcd_hibern8_release(hba, false);
5619                         if (cmd->request) {
5620                                 /*
5621                                  * As we are accessing the "request" structure,
5622                                  * this must be called before calling
5623                                  * ->scsi_done() callback.
5624                                  */
5625                                 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5626                                         false);
5627                                 ufshcd_vops_crypto_engine_cfg_end(hba,
5628                                         lrbp, cmd->request);
5629                         }
5630
5631                         req = cmd->request;
5632                         if (req) {
5633                                 /* Update IO svc time latency histogram */
5634                                 if (req->lat_hist_enabled) {
5635                                         ktime_t completion;
5636                                         u_int64_t delta_us;
5637
5638                                         completion = ktime_get();
5639                                         delta_us = ktime_us_delta(completion,
5640                                                   req->lat_hist_io_start);
5641                                         blk_update_latency_hist(
5642                                                 (rq_data_dir(req) == READ) ?
5643                                                 &hba->io_lat_read :
5644                                                 &hba->io_lat_write, delta_us);
5645                                 }
5646                         }
5647                         /* Do not touch lrbp after scsi done */
5648                         cmd->scsi_done(cmd);
5649                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5650                         if (hba->dev_cmd.complete) {
5651                                 ufshcd_cond_add_cmd_trace(hba, index,
5652                                                 "dcmp");
5653                                 complete(hba->dev_cmd.complete);
5654                         }
5655                 }
5656                 if (ufshcd_is_clkscaling_supported(hba))
5657                         hba->clk_scaling.active_reqs--;
5658         }
5659
5660         /* clear corresponding bits of completed commands */
5661         hba->outstanding_reqs ^= completed_reqs;
5662
5663         ufshcd_clk_scaling_update_busy(hba);
5664
5665         /* we might have free'd some tags above */
5666         wake_up(&hba->dev_cmd.tag_wq);
5667 }
5668
5669 /**
5670  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5671  * @hba: per adapter instance
5672  *
5673  * Returns
5674  *  IRQ_HANDLED - If interrupt is valid
5675  *  IRQ_NONE    - If invalid interrupt
5676  */
5677 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5678 {
5679         unsigned long completed_reqs;
5680         u32 tr_doorbell;
5681
5682         /* Resetting interrupt aggregation counters first and reading the
5683          * DOOR_BELL afterward allows us to handle all the completed requests.
5684          * In order to prevent other interrupts starvation the DB is read once
5685          * after reset. The down side of this solution is the possibility of
5686          * false interrupt if device completes another request after resetting
5687          * aggregation and before reading the DB.
5688          */
5689         if (ufshcd_is_intr_aggr_allowed(hba))
5690                 ufshcd_reset_intr_aggr(hba);
5691
5692         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5693         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5694
5695         if (completed_reqs) {
5696                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5697                 return IRQ_HANDLED;
5698         } else {
5699                 return IRQ_NONE;
5700         }
5701 }
5702
5703 /**
5704  * ufshcd_disable_ee - disable exception event
5705  * @hba: per-adapter instance
5706  * @mask: exception event to disable
5707  *
5708  * Disables exception event in the device so that the EVENT_ALERT
5709  * bit is not set.
5710  *
5711  * Returns zero on success, non-zero error value on failure.
5712  */
5713 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5714 {
5715         int err = 0;
5716         u32 val;
5717
5718         if (!(hba->ee_ctrl_mask & mask))
5719                 goto out;
5720
5721         val = hba->ee_ctrl_mask & ~mask;
5722         val &= 0xFFFF; /* 2 bytes */
5723         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5724                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5725         if (!err)
5726                 hba->ee_ctrl_mask &= ~mask;
5727 out:
5728         return err;
5729 }
5730
5731 /**
5732  * ufshcd_enable_ee - enable exception event
5733  * @hba: per-adapter instance
5734  * @mask: exception event to enable
5735  *
5736  * Enable corresponding exception event in the device to allow
5737  * device to alert host in critical scenarios.
5738  *
5739  * Returns zero on success, non-zero error value on failure.
5740  */
5741 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5742 {
5743         int err = 0;
5744         u32 val;
5745
5746         if (hba->ee_ctrl_mask & mask)
5747                 goto out;
5748
5749         val = hba->ee_ctrl_mask | mask;
5750         val &= 0xFFFF; /* 2 bytes */
5751         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5752                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5753         if (!err)
5754                 hba->ee_ctrl_mask |= mask;
5755 out:
5756         return err;
5757 }
5758
5759 /**
5760  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5761  * @hba: per-adapter instance
5762  *
5763  * Allow device to manage background operations on its own. Enabling
5764  * this might lead to inconsistent latencies during normal data transfers
5765  * as the device is allowed to manage its own way of handling background
5766  * operations.
5767  *
5768  * Returns zero on success, non-zero on failure.
5769  */
5770 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5771 {
5772         int err = 0;
5773
5774         if (hba->auto_bkops_enabled)
5775                 goto out;
5776
5777         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5778                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
5779         if (err) {
5780                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5781                                 __func__, err);
5782                 goto out;
5783         }
5784
5785         hba->auto_bkops_enabled = true;
5786         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
5787
5788         /* No need of URGENT_BKOPS exception from the device */
5789         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5790         if (err)
5791                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5792                                 __func__, err);
5793 out:
5794         return err;
5795 }
5796
5797 /**
5798  * ufshcd_disable_auto_bkops - block device in doing background operations
5799  * @hba: per-adapter instance
5800  *
5801  * Disabling background operations improves command response latency but
5802  * has drawback of device moving into critical state where the device is
5803  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5804  * host is idle so that BKOPS are managed effectively without any negative
5805  * impacts.
5806  *
5807  * Returns zero on success, non-zero on failure.
5808  */
5809 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5810 {
5811         int err = 0;
5812
5813         if (!hba->auto_bkops_enabled)
5814                 goto out;
5815
5816         /*
5817          * If host assisted BKOPs is to be enabled, make sure
5818          * urgent bkops exception is allowed.
5819          */
5820         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5821         if (err) {
5822                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5823                                 __func__, err);
5824                 goto out;
5825         }
5826
5827         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5828                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
5829         if (err) {
5830                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5831                                 __func__, err);
5832                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5833                 goto out;
5834         }
5835
5836         hba->auto_bkops_enabled = false;
5837         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
5838 out:
5839         return err;
5840 }
5841
5842 /**
5843  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5844  * @hba: per adapter instance
5845  *
5846  * After a device reset the device may toggle the BKOPS_EN flag
5847  * to default value. The s/w tracking variables should be updated
5848  * as well. This function would change the auto-bkops state based on
5849  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5850  */
5851 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5852 {
5853         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5854                 hba->auto_bkops_enabled = false;
5855                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5856                 ufshcd_enable_auto_bkops(hba);
5857         } else {
5858                 hba->auto_bkops_enabled = true;
5859                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5860                 ufshcd_disable_auto_bkops(hba);
5861         }
5862 }
5863
5864 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5865 {
5866         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5867                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5868 }
5869
5870 /**
5871  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5872  * @hba: per-adapter instance
5873  * @status: bkops_status value
5874  *
5875  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5876  * flag in the device to permit background operations if the device
5877  * bkops_status is greater than or equal to "status" argument passed to
5878  * this function, disable otherwise.
5879  *
5880  * Returns 0 for success, non-zero in case of failure.
5881  *
5882  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5883  * to know whether auto bkops is enabled or disabled after this function
5884  * returns control to it.
5885  */
5886 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5887                              enum bkops_status status)
5888 {
5889         int err;
5890         u32 curr_status = 0;
5891
5892         err = ufshcd_get_bkops_status(hba, &curr_status);
5893         if (err) {
5894                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5895                                 __func__, err);
5896                 goto out;
5897         } else if (curr_status > BKOPS_STATUS_MAX) {
5898                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5899                                 __func__, curr_status);
5900                 err = -EINVAL;
5901                 goto out;
5902         }
5903
5904         if (curr_status >= status)
5905                 err = ufshcd_enable_auto_bkops(hba);
5906         else
5907                 err = ufshcd_disable_auto_bkops(hba);
5908 out:
5909         return err;
5910 }
5911
5912 /**
5913  * ufshcd_urgent_bkops - handle urgent bkops exception event
5914  * @hba: per-adapter instance
5915  *
5916  * Enable fBackgroundOpsEn flag in the device to permit background
5917  * operations.
5918  *
5919  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5920  * and negative error value for any other failure.
5921  */
5922 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5923 {
5924         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5925 }
5926
5927 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5928 {
5929         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5930                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5931 }
5932
5933 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5934 {
5935         int err;
5936         u32 curr_status = 0;
5937
5938         if (hba->is_urgent_bkops_lvl_checked)
5939                 goto enable_auto_bkops;
5940
5941         err = ufshcd_get_bkops_status(hba, &curr_status);
5942         if (err) {
5943                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5944                                 __func__, err);
5945                 goto out;
5946         }
5947
5948         /*
5949          * We are seeing that some devices are raising the urgent bkops
5950          * exception events even when BKOPS status doesn't indicate performace
5951          * impacted or critical. Handle these device by determining their urgent
5952          * bkops status at runtime.
5953          */
5954         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5955                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5956                                 __func__, curr_status);
5957                 /* update the current status as the urgent bkops level */
5958                 hba->urgent_bkops_lvl = curr_status;
5959                 hba->is_urgent_bkops_lvl_checked = true;
5960         }
5961
5962 enable_auto_bkops:
5963         err = ufshcd_enable_auto_bkops(hba);
5964 out:
5965         if (err < 0)
5966                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5967                                 __func__, err);
5968 }
5969
5970 /**
5971  * ufshcd_exception_event_handler - handle exceptions raised by device
5972  * @work: pointer to work data
5973  *
5974  * Read bExceptionEventStatus attribute from the device and handle the
5975  * exception event accordingly.
5976  */
5977 static void ufshcd_exception_event_handler(struct work_struct *work)
5978 {
5979         struct ufs_hba *hba;
5980         int err;
5981         u32 status = 0;
5982         hba = container_of(work, struct ufs_hba, eeh_work);
5983
5984         pm_runtime_get_sync(hba->dev);
5985         ufshcd_scsi_block_requests(hba);
5986         err = ufshcd_get_ee_status(hba, &status);
5987         if (err) {
5988                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5989                                 __func__, err);
5990                 goto out;
5991         }
5992
5993         status &= hba->ee_ctrl_mask;
5994
5995         if (status & MASK_EE_URGENT_BKOPS)
5996                 ufshcd_bkops_exception_event_handler(hba);
5997
5998 out:
5999         ufshcd_scsi_unblock_requests(hba);
6000         /*
6001          * pm_runtime_get_noresume is called while scheduling
6002          * eeh_work to avoid suspend racing with exception work.
6003          * Hence decrement usage counter using pm_runtime_put_noidle
6004          * to allow suspend on completion of exception event handler.
6005          */
6006         pm_runtime_put_noidle(hba->dev);
6007         pm_runtime_put(hba->dev);
6008         return;
6009 }
6010
6011 /* Complete requests that have door-bell cleared */
6012 static void ufshcd_complete_requests(struct ufs_hba *hba)
6013 {
6014         ufshcd_transfer_req_compl(hba);
6015         ufshcd_tmc_handler(hba);
6016 }
6017
6018 /**
6019  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6020  *                              to recover from the DL NAC errors or not.
6021  * @hba: per-adapter instance
6022  *
6023  * Returns true if error handling is required, false otherwise
6024  */
6025 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6026 {
6027         unsigned long flags;
6028         bool err_handling = true;
6029
6030         spin_lock_irqsave(hba->host->host_lock, flags);
6031         /*
6032          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6033          * device fatal error and/or DL NAC & REPLAY timeout errors.
6034          */
6035         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6036                 goto out;
6037
6038         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6039             ((hba->saved_err & UIC_ERROR) &&
6040              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
6041                 /*
6042                  * we have to do error recovery but atleast silence the error
6043                  * logs.
6044                  */
6045                 hba->silence_err_logs = true;
6046                 goto out;
6047         }
6048
6049         if ((hba->saved_err & UIC_ERROR) &&
6050             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6051                 int err;
6052                 /*
6053                  * wait for 50ms to see if we can get any other errors or not.
6054                  */
6055                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6056                 msleep(50);
6057                 spin_lock_irqsave(hba->host->host_lock, flags);
6058
6059                 /*
6060                  * now check if we have got any other severe errors other than
6061                  * DL NAC error?
6062                  */
6063                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6064                     ((hba->saved_err & UIC_ERROR) &&
6065                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
6066                         if (((hba->saved_err & INT_FATAL_ERRORS) ==
6067                                 DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
6068                                         ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
6069                                 hba->silence_err_logs = true;
6070                         goto out;
6071                 }
6072
6073                 /*
6074                  * As DL NAC is the only error received so far, send out NOP
6075                  * command to confirm if link is still active or not.
6076                  *   - If we don't get any response then do error recovery.
6077                  *   - If we get response then clear the DL NAC error bit.
6078                  */
6079
6080                 /* silence the error logs from NOP command */
6081                 hba->silence_err_logs = true;
6082                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6083                 err = ufshcd_verify_dev_init(hba);
6084                 spin_lock_irqsave(hba->host->host_lock, flags);
6085                 hba->silence_err_logs = false;
6086
6087                 if (err) {
6088                         hba->silence_err_logs = true;
6089                         goto out;
6090                 }
6091
6092                 /* Link seems to be alive hence ignore the DL NAC errors */
6093                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6094                         hba->saved_err &= ~UIC_ERROR;
6095                 /* clear NAC error */
6096                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6097                 if (!hba->saved_uic_err) {
6098                         err_handling = false;
6099                         goto out;
6100                 }
6101                 /*
6102                  * there seems to be some errors other than NAC, so do error
6103                  * recovery
6104                  */
6105                 hba->silence_err_logs = true;
6106         }
6107 out:
6108         spin_unlock_irqrestore(hba->host->host_lock, flags);
6109         return err_handling;
6110 }
6111
6112 /**
6113  * ufshcd_err_handler - handle UFS errors that require s/w attention
6114  * @work: pointer to work structure
6115  */
6116 static void ufshcd_err_handler(struct work_struct *work)
6117 {
6118         struct ufs_hba *hba;
6119         unsigned long flags;
6120         bool err_xfer = false, err_tm = false;
6121         int err = 0;
6122         int tag;
6123         bool needs_reset = false;
6124         bool clks_enabled = false;
6125
6126         hba = container_of(work, struct ufs_hba, eh_work);
6127
6128         spin_lock_irqsave(hba->host->host_lock, flags);
6129         ufsdbg_set_err_state(hba);
6130
6131         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6132                 goto out;
6133
6134         /*
6135          * Make sure the clocks are ON before we proceed with err
6136          * handling. For the majority of cases err handler would be
6137          * run with clocks ON. There is a possibility that the err
6138          * handler was scheduled due to auto hibern8 error interrupt,
6139          * in which case the clocks could be gated or be in the
6140          * process of gating when the err handler runs.
6141          */
6142         if (unlikely((hba->clk_gating.state != CLKS_ON) &&
6143             ufshcd_is_auto_hibern8_supported(hba))) {
6144                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6145                 hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
6146                 ufshcd_hold(hba, false);
6147                 spin_lock_irqsave(hba->host->host_lock, flags);
6148                 clks_enabled = true;
6149         }
6150
6151         hba->ufshcd_state = UFSHCD_STATE_RESET;
6152         ufshcd_set_eh_in_progress(hba);
6153
6154         /* Complete requests that have door-bell cleared by h/w */
6155         ufshcd_complete_requests(hba);
6156
6157         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6158                 bool ret;
6159
6160                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6161                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6162                 ret = ufshcd_quirk_dl_nac_errors(hba);
6163                 spin_lock_irqsave(hba->host->host_lock, flags);
6164                 if (!ret)
6165                         goto skip_err_handling;
6166         }
6167
6168         /*
6169          * Dump controller state before resetting. Transfer requests state
6170          * will be dump as part of the request completion.
6171          */
6172         if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
6173                 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
6174                         __func__, hba->saved_err, hba->saved_uic_err);
6175                 if (!hba->silence_err_logs) {
6176                         /* release lock as print host regs sleeps */
6177                         spin_unlock_irqrestore(hba->host->host_lock, flags);
6178                         ufshcd_print_host_regs(hba);
6179                         ufshcd_print_host_state(hba);
6180                         ufshcd_print_pwr_info(hba);
6181                         ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6182                         ufshcd_print_cmd_log(hba);
6183                         spin_lock_irqsave(hba->host->host_lock, flags);
6184                 }
6185         }
6186
6187         if ((hba->saved_err & INT_FATAL_ERRORS)
6188             || hba->saved_ce_err || hba->force_host_reset ||
6189             ((hba->saved_err & UIC_ERROR) &&
6190             (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
6191                                    UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6192                                    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
6193                 needs_reset = true;
6194
6195         /*
6196          * if host reset is required then skip clearing the pending
6197          * transfers forcefully because they will automatically get
6198          * cleared after link startup.
6199          */
6200         if (needs_reset)
6201                 goto skip_pending_xfer_clear;
6202
6203         /* release lock as clear command might sleep */
6204         spin_unlock_irqrestore(hba->host->host_lock, flags);
6205         /* Clear pending transfer requests */
6206         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6207                 if (ufshcd_clear_cmd(hba, tag)) {
6208                         err_xfer = true;
6209                         goto lock_skip_pending_xfer_clear;
6210                 }
6211         }
6212
6213         /* Clear pending task management requests */
6214         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6215                 if (ufshcd_clear_tm_cmd(hba, tag)) {
6216                         err_tm = true;
6217                         goto lock_skip_pending_xfer_clear;
6218                 }
6219         }
6220
6221 lock_skip_pending_xfer_clear:
6222         spin_lock_irqsave(hba->host->host_lock, flags);
6223
6224         /* Complete the requests that are cleared by s/w */
6225         ufshcd_complete_requests(hba);
6226
6227         if (err_xfer || err_tm)
6228                 needs_reset = true;
6229
6230 skip_pending_xfer_clear:
6231         /* Fatal errors need reset */
6232         if (needs_reset) {
6233                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
6234
6235                 if (hba->saved_err & INT_FATAL_ERRORS)
6236                         ufshcd_update_error_stats(hba,
6237                                                   UFS_ERR_INT_FATAL_ERRORS);
6238                 if (hba->saved_ce_err)
6239                         ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
6240
6241                 if (hba->saved_err & UIC_ERROR)
6242                         ufshcd_update_error_stats(hba,
6243                                                   UFS_ERR_INT_UIC_ERROR);
6244
6245                 if (err_xfer || err_tm)
6246                         ufshcd_update_error_stats(hba,
6247                                                   UFS_ERR_CLEAR_PEND_XFER_TM);
6248
6249                 /*
6250                  * ufshcd_reset_and_restore() does the link reinitialization
6251                  * which will need atleast one empty doorbell slot to send the
6252                  * device management commands (NOP and query commands).
6253                  * If there is no slot empty at this moment then free up last
6254                  * slot forcefully.
6255                  */
6256                 if (hba->outstanding_reqs == max_doorbells)
6257                         __ufshcd_transfer_req_compl(hba,
6258                                                     (1UL << (hba->nutrs - 1)));
6259
6260                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6261                 err = ufshcd_reset_and_restore(hba);
6262                 spin_lock_irqsave(hba->host->host_lock, flags);
6263                 if (err) {
6264                         dev_err(hba->dev, "%s: reset and restore failed\n",
6265                                         __func__);
6266                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
6267                 }
6268                 /*
6269                  * Inform scsi mid-layer that we did reset and allow to handle
6270                  * Unit Attention properly.
6271                  */
6272                 scsi_report_bus_reset(hba->host, 0);
6273                 hba->saved_err = 0;
6274                 hba->saved_uic_err = 0;
6275                 hba->saved_ce_err = 0;
6276                 hba->force_host_reset = false;
6277         }
6278
6279 skip_err_handling:
6280         if (!needs_reset) {
6281                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6282                 if (hba->saved_err || hba->saved_uic_err)
6283                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6284                             __func__, hba->saved_err, hba->saved_uic_err);
6285         }
6286
6287         hba->silence_err_logs = false;
6288
6289         if (clks_enabled) {
6290                 __ufshcd_release(hba, false);
6291                 hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
6292         }
6293 out:
6294         ufshcd_clear_eh_in_progress(hba);
6295         spin_unlock_irqrestore(hba->host->host_lock, flags);
6296 }
6297
6298 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
6299                 u32 reg)
6300 {
6301         reg_hist->reg[reg_hist->pos] = reg;
6302         reg_hist->tstamp[reg_hist->pos] = ktime_get();
6303         reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
6304 }
6305
6306 static void ufshcd_rls_handler(struct work_struct *work)
6307 {
6308         struct ufs_hba *hba;
6309         int ret = 0;
6310         u32 mode;
6311
6312         hba = container_of(work, struct ufs_hba, rls_work);
6313         pm_runtime_get_sync(hba->dev);
6314         ufshcd_scsi_block_requests(hba);
6315         down_write(&hba->lock);
6316         ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
6317         if (ret) {
6318                 dev_err(hba->dev,
6319                         "Timed out (%d) waiting for DB to clear\n",
6320                         ret);
6321                 goto out;
6322         }
6323
6324         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6325         if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
6326                 hba->restore_needed = true;
6327
6328         if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
6329                 hba->restore_needed = true;
6330
6331         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
6332         if (hba->pwr_info.gear_rx != mode)
6333                 hba->restore_needed = true;
6334
6335         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
6336         if (hba->pwr_info.gear_tx != mode)
6337                 hba->restore_needed = true;
6338
6339         if (hba->restore_needed)
6340                 ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6341
6342         if (ret)
6343                 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6344                         __func__, ret);
6345         else
6346                 hba->restore_needed = false;
6347
6348 out:
6349         up_write(&hba->lock);
6350         ufshcd_scsi_unblock_requests(hba);
6351         pm_runtime_put_sync(hba->dev);
6352 }
6353
6354 /**
6355  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6356  * @hba: per-adapter instance
6357  *
6358  * Returns
6359  *  IRQ_HANDLED - If interrupt is valid
6360  *  IRQ_NONE    - If invalid interrupt
6361  */
6362 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6363 {
6364         u32 reg;
6365         irqreturn_t retval = IRQ_NONE;
6366
6367         /* PHY layer lane error */
6368         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6369         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6370             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6371                 /*
6372                  * To know whether this error is fatal or not, DB timeout
6373                  * must be checked but this error is handled separately.
6374                  */
6375                 dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
6376                                 __func__, reg);
6377                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
6378
6379                 /*
6380                  * Don't ignore LINERESET indication during hibern8
6381                  * enter operation.
6382                  */
6383                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6384                         struct uic_command *cmd = hba->active_uic_cmd;
6385
6386                         if (cmd) {
6387                                 if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
6388                                         dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
6389                                                 __func__, reg);
6390                                         hba->full_init_linereset = true;
6391                                 }
6392                         }
6393                         if (!hba->full_init_linereset)
6394                                 schedule_work(&hba->rls_work);
6395                 }
6396                 retval |= IRQ_HANDLED;
6397         }
6398
6399         /* PA_INIT_ERROR is fatal and needs UIC reset */
6400         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6401         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6402             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6403                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
6404
6405                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
6406                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6407                 } else if (hba->dev_quirks &
6408                            UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6409                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6410                                 hba->uic_error |=
6411                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6412                         else if (reg &
6413                                  UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6414                                 hba->uic_error |=
6415                                         UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6416                 }
6417                 retval |= IRQ_HANDLED;
6418         }
6419
6420         /* UIC NL/TL/DME errors needs software retry */
6421         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6422         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6423             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6424                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
6425                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6426                 retval |= IRQ_HANDLED;
6427         }
6428
6429         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6430         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6431             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6432                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
6433                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6434                 retval |= IRQ_HANDLED;
6435         }
6436
6437         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6438         if ((reg & UIC_DME_ERROR) &&
6439             (reg & UIC_DME_ERROR_CODE_MASK)) {
6440                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
6441                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6442                 retval |= IRQ_HANDLED;
6443         }
6444
6445         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6446                         __func__, hba->uic_error);
6447         return retval;
6448 }
6449
6450 /**
6451  * ufshcd_check_errors - Check for errors that need s/w attention
6452  * @hba: per-adapter instance
6453  *
6454  * Returns
6455  *  IRQ_HANDLED - If interrupt is valid
6456  *  IRQ_NONE    - If invalid interrupt
6457  */
6458 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6459 {
6460         bool queue_eh_work = false;
6461         irqreturn_t retval = IRQ_NONE;
6462
6463         if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
6464                 queue_eh_work = true;
6465
6466         if (hba->errors & UIC_ERROR) {
6467                 hba->uic_error = 0;
6468                 retval = ufshcd_update_uic_error(hba);
6469                 if (hba->uic_error)
6470                         queue_eh_work = true;
6471         }
6472
6473         if (queue_eh_work) {
6474                 /*
6475                  * update the transfer error masks to sticky bits, let's do this
6476                  * irrespective of current ufshcd_state.
6477                  */
6478                 hba->saved_err |= hba->errors;
6479                 hba->saved_uic_err |= hba->uic_error;
6480                 hba->saved_ce_err |= hba->ce_error;
6481
6482                 /* handle fatal errors only when link is functional */
6483                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
6484                         /*
6485                          * Set error handling in progress flag early so that we
6486                          * don't issue new requests any more.
6487                          */
6488                         ufshcd_set_eh_in_progress(hba);
6489
6490                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
6491                         schedule_work(&hba->eh_work);
6492                 }
6493                 retval |= IRQ_HANDLED;
6494         }
6495         /*
6496          * if (!queue_eh_work) -
6497          * Other errors are either non-fatal where host recovers
6498          * itself without s/w intervention or errors that will be
6499          * handled by the SCSI core layer.
6500          */
6501         return retval;
6502 }
6503
6504 /**
6505  * ufshcd_tmc_handler - handle task management function completion
6506  * @hba: per adapter instance
6507  *
6508  * Returns
6509  *  IRQ_HANDLED - If interrupt is valid
6510  *  IRQ_NONE    - If invalid interrupt
6511  */
6512 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6513 {
6514         u32 tm_doorbell;
6515
6516         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6517         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
6518         if (hba->tm_condition) {
6519                 wake_up(&hba->tm_wq);
6520                 return IRQ_HANDLED;
6521         } else {
6522                 return IRQ_NONE;
6523         }
6524 }
6525
6526 /**
6527  * ufshcd_sl_intr - Interrupt service routine
6528  * @hba: per adapter instance
6529  * @intr_status: contains interrupts generated by the controller
6530  *
6531  * Returns
6532  *  IRQ_HANDLED - If interrupt is valid
6533  *  IRQ_NONE    - If invalid interrupt
6534  */
6535 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6536 {
6537         irqreturn_t retval = IRQ_NONE;
6538
6539         ufsdbg_error_inject_dispatcher(hba,
6540                 ERR_INJECT_INTR, intr_status, &intr_status);
6541
6542         ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
6543
6544         hba->errors = UFSHCD_ERROR_MASK & intr_status;
6545         if (hba->errors || hba->ce_error)
6546                 retval |= ufshcd_check_errors(hba);
6547
6548         if (intr_status & UFSHCD_UIC_MASK)
6549                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6550
6551         if (intr_status & UTP_TASK_REQ_COMPL)
6552                 retval |= ufshcd_tmc_handler(hba);
6553
6554         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6555                 retval |= ufshcd_transfer_req_compl(hba);
6556
6557         return retval;
6558 }
6559
6560 /**
6561  * ufshcd_intr - Main interrupt service routine
6562  * @irq: irq number
6563  * @__hba: pointer to adapter instance
6564  *
6565  * Returns
6566  *  IRQ_HANDLED - If interrupt is valid
6567  *  IRQ_NONE    - If invalid interrupt
6568  */
6569 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6570 {
6571         u32 intr_status, enabled_intr_status;
6572         irqreturn_t retval = IRQ_NONE;
6573         struct ufs_hba *hba = __hba;
6574         int retries = hba->nutrs;
6575
6576         spin_lock(hba->host->host_lock);
6577         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6578         hba->ufs_stats.last_intr_status = intr_status;
6579         hba->ufs_stats.last_intr_ts = ktime_get();
6580         /*
6581          * There could be max of hba->nutrs reqs in flight and in worst case
6582          * if the reqs get finished 1 by 1 after the interrupt status is
6583          * read, make sure we handle them by checking the interrupt status
6584          * again in a loop until we process all of the reqs before returning.
6585          */
6586         do {
6587                 enabled_intr_status =
6588                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6589                 if (intr_status)
6590                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6591                 if (enabled_intr_status)
6592                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6593
6594                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6595         } while (intr_status && --retries);
6596
6597         if (retval == IRQ_NONE) {
6598                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6599                                         __func__, intr_status);
6600                 ufshcd_hex_dump("host regs: ", hba->mmio_base,
6601                                         UFSHCI_REG_SPACE_SIZE);
6602         }
6603
6604         spin_unlock(hba->host->host_lock);
6605         return retval;
6606 }
6607
6608 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6609 {
6610         int err = 0;
6611         u32 mask = 1 << tag;
6612         unsigned long flags;
6613
6614         if (!test_bit(tag, &hba->outstanding_tasks))
6615                 goto out;
6616
6617         spin_lock_irqsave(hba->host->host_lock, flags);
6618         ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
6619         spin_unlock_irqrestore(hba->host->host_lock, flags);
6620
6621         /* poll for max. 1 sec to clear door bell register by h/w */
6622         err = ufshcd_wait_for_register(hba,
6623                         REG_UTP_TASK_REQ_DOOR_BELL,
6624                         mask, 0, 1000, 1000, true);
6625 out:
6626         return err;
6627 }
6628
6629 /**
6630  * ufshcd_issue_tm_cmd - issues task management commands to controller
6631  * @hba: per adapter instance
6632  * @lun_id: LUN ID to which TM command is sent
6633  * @task_id: task ID to which the TM command is applicable
6634  * @tm_function: task management function opcode
6635  * @tm_response: task management service response return value
6636  *
6637  * Returns non-zero value on error, zero on success.
6638  */
6639 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6640                 u8 tm_function, u8 *tm_response)
6641 {
6642         struct utp_task_req_desc *task_req_descp;
6643         struct utp_upiu_task_req *task_req_upiup;
6644         struct Scsi_Host *host;
6645         unsigned long flags;
6646         int free_slot;
6647         int err;
6648         int task_tag;
6649
6650         host = hba->host;
6651
6652         /*
6653          * Get free slot, sleep if slots are unavailable.
6654          * Even though we use wait_event() which sleeps indefinitely,
6655          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6656          */
6657         wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
6658         hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
6659         ufshcd_hold_all(hba);
6660
6661         spin_lock_irqsave(host->host_lock, flags);
6662         task_req_descp = hba->utmrdl_base_addr;
6663         task_req_descp += free_slot;
6664
6665         /* Configure task request descriptor */
6666         task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6667         task_req_descp->header.dword_2 =
6668                         cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6669
6670         /* Configure task request UPIU */
6671         task_req_upiup =
6672                 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
6673         task_tag = hba->nutrs + free_slot;
6674         task_req_upiup->header.dword_0 =
6675                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
6676                                               lun_id, task_tag);
6677         task_req_upiup->header.dword_1 =
6678                 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
6679         /*
6680          * The host shall provide the same value for LUN field in the basic
6681          * header and for Input Parameter.
6682          */
6683         task_req_upiup->input_param1 = cpu_to_be32(lun_id);
6684         task_req_upiup->input_param2 = cpu_to_be32(task_id);
6685
6686         /* send command to the controller */
6687         __set_bit(free_slot, &hba->outstanding_tasks);
6688
6689         /* Make sure descriptors are ready before ringing the task doorbell */
6690         wmb();
6691
6692         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6693         /* Make sure that doorbell is committed immediately */
6694         wmb();
6695
6696         spin_unlock_irqrestore(host->host_lock, flags);
6697
6698         /* wait until the task management command is completed */
6699         err = wait_event_timeout(hba->tm_wq,
6700                         test_bit(free_slot, &hba->tm_condition),
6701                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6702         if (!err) {
6703                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6704                                 __func__, tm_function);
6705                 if (ufshcd_clear_tm_cmd(hba, free_slot))
6706                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6707                                         __func__, free_slot);
6708                 err = -ETIMEDOUT;
6709         } else {
6710                 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
6711         }
6712
6713         clear_bit(free_slot, &hba->tm_condition);
6714         ufshcd_put_tm_slot(hba, free_slot);
6715         wake_up(&hba->tm_tag_wq);
6716         hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
6717
6718         ufshcd_release_all(hba);
6719         return err;
6720 }
6721
6722 /**
6723  * ufshcd_eh_device_reset_handler - device reset handler registered to
6724  *                                    scsi layer.
6725  * @cmd: SCSI command pointer
6726  *
6727  * Returns SUCCESS/FAILED
6728  */
6729 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6730 {
6731         struct Scsi_Host *host;
6732         struct ufs_hba *hba;
6733         unsigned int tag;
6734         u32 pos;
6735         int err;
6736         u8 resp = 0xF;
6737         struct ufshcd_lrb *lrbp;
6738         unsigned long flags;
6739
6740         host = cmd->device->host;
6741         hba = shost_priv(host);
6742         tag = cmd->request->tag;
6743
6744         ufshcd_print_cmd_log(hba);
6745         lrbp = &hba->lrb[tag];
6746         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6747         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6748                 if (!err)
6749                         err = resp;
6750                 goto out;
6751         }
6752
6753         /* clear the commands that were pending for corresponding LUN */
6754         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6755                 if (hba->lrb[pos].lun == lrbp->lun) {
6756                         err = ufshcd_clear_cmd(hba, pos);
6757                         if (err)
6758                                 break;
6759                 }
6760         }
6761         spin_lock_irqsave(host->host_lock, flags);
6762         ufshcd_transfer_req_compl(hba);
6763         spin_unlock_irqrestore(host->host_lock, flags);
6764
6765 out:
6766         hba->req_abort_count = 0;
6767         if (!err) {
6768                 err = SUCCESS;
6769         } else {
6770                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6771                 err = FAILED;
6772         }
6773         return err;
6774 }
6775
6776 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6777 {
6778         struct ufshcd_lrb *lrbp;
6779         int tag;
6780
6781         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6782                 lrbp = &hba->lrb[tag];
6783                 lrbp->req_abort_skip = true;
6784         }
6785 }
6786
6787 /**
6788  * ufshcd_abort - abort a specific command
6789  * @cmd: SCSI command pointer
6790  *
6791  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6792  * command, and in host controller by clearing the door-bell register. There can
6793  * be race between controller sending the command to the device while abort is
6794  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6795  * really issued and then try to abort it.
6796  *
6797  * Returns SUCCESS/FAILED
6798  */
6799 static int ufshcd_abort(struct scsi_cmnd *cmd)
6800 {
6801         struct Scsi_Host *host;
6802         struct ufs_hba *hba;
6803         unsigned long flags;
6804         unsigned int tag;
6805         int err = 0;
6806         int poll_cnt;
6807         u8 resp = 0xF;
6808         struct ufshcd_lrb *lrbp;
6809         u32 reg;
6810
6811         host = cmd->device->host;
6812         hba = shost_priv(host);
6813         tag = cmd->request->tag;
6814         if (!ufshcd_valid_tag(hba, tag)) {
6815                 dev_err(hba->dev,
6816                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6817                         __func__, tag, cmd, cmd->request);
6818                 BUG();
6819         }
6820
6821         lrbp = &hba->lrb[tag];
6822
6823         ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
6824
6825         /*
6826          * Task abort to the device W-LUN is illegal. When this command
6827          * will fail, due to spec violation, scsi err handling next step
6828          * will be to send LU reset which, again, is a spec violation.
6829          * To avoid these unnecessary/illegal step we skip to the last error
6830          * handling stage: reset and restore.
6831          */
6832         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6833                 return ufshcd_eh_host_reset_handler(cmd);
6834
6835         ufshcd_hold_all(hba);
6836         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6837         /* If command is already aborted/completed, return SUCCESS */
6838         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6839                 dev_err(hba->dev,
6840                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6841                         __func__, tag, hba->outstanding_reqs, reg);
6842                 goto out;
6843         }
6844
6845         if (!(reg & (1 << tag))) {
6846                 dev_err(hba->dev,
6847                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6848                 __func__, tag);
6849         }
6850
6851         /* Print Transfer Request of aborted task */
6852         dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
6853
6854         /*
6855          * Print detailed info about aborted request.
6856          * As more than one request might get aborted at the same time,
6857          * print full information only for the first aborted request in order
6858          * to reduce repeated printouts. For other aborted requests only print
6859          * basic details.
6860          */
6861         scsi_print_command(cmd);
6862         if (!hba->req_abort_count) {
6863                 ufshcd_print_fsm_state(hba);
6864                 ufshcd_print_host_regs(hba);
6865                 ufshcd_print_host_state(hba);
6866                 ufshcd_print_pwr_info(hba);
6867                 ufshcd_print_trs(hba, 1 << tag, true);
6868         } else {
6869                 ufshcd_print_trs(hba, 1 << tag, false);
6870         }
6871         hba->req_abort_count++;
6872
6873
6874         /* Skip task abort in case previous aborts failed and report failure */
6875         if (lrbp->req_abort_skip) {
6876                 err = -EIO;
6877                 goto out;
6878         }
6879
6880         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6881                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6882                                 UFS_QUERY_TASK, &resp);
6883                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6884                         /* cmd pending in the device */
6885                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
6886                                 __func__, tag);
6887                         break;
6888                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6889                         /*
6890                          * cmd not pending in the device, check if it is
6891                          * in transition.
6892                          */
6893                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
6894                                 __func__, tag);
6895                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6896                         if (reg & (1 << tag)) {
6897                                 /* sleep for max. 200us to stabilize */
6898                                 usleep_range(100, 200);
6899                                 continue;
6900                         }
6901                         /* command completed already */
6902                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
6903                                 __func__, tag);
6904                         goto out;
6905                 } else {
6906                         dev_err(hba->dev,
6907                                 "%s: no response from device. tag = %d, err %d",
6908                                 __func__, tag, err);
6909                         if (!err)
6910                                 err = resp; /* service response error */
6911                         goto out;
6912                 }
6913         }
6914
6915         if (!poll_cnt) {
6916                 err = -EBUSY;
6917                 goto out;
6918         }
6919
6920         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6921                         UFS_ABORT_TASK, &resp);
6922         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6923                 if (!err) {
6924                         err = resp; /* service response error */
6925                         dev_err(hba->dev, "%s: issued. tag = %d, err %d",
6926                                 __func__, tag, err);
6927                 }
6928                 goto out;
6929         }
6930
6931         err = ufshcd_clear_cmd(hba, tag);
6932         if (err) {
6933                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
6934                         __func__, tag, err);
6935                 goto out;
6936         }
6937
6938         scsi_dma_unmap(cmd);
6939
6940         spin_lock_irqsave(host->host_lock, flags);
6941         ufshcd_outstanding_req_clear(hba, tag);
6942         hba->lrb[tag].cmd = NULL;
6943         spin_unlock_irqrestore(host->host_lock, flags);
6944
6945         clear_bit_unlock(tag, &hba->lrb_in_use);
6946         wake_up(&hba->dev_cmd.tag_wq);
6947
6948 out:
6949         if (!err) {
6950                 err = SUCCESS;
6951         } else {
6952                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6953                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6954                 err = FAILED;
6955         }
6956
6957         /*
6958          * This ufshcd_release_all() corresponds to the original scsi cmd that
6959          * got aborted here (as we won't get any IRQ for it).
6960          */
6961         ufshcd_release_all(hba);
6962         return err;
6963 }
6964
6965 /**
6966  * ufshcd_host_reset_and_restore - reset and restore host controller
6967  * @hba: per-adapter instance
6968  *
6969  * Note that host controller reset may issue DME_RESET to
6970  * local and remote (device) Uni-Pro stack and the attributes
6971  * are reset to default state.
6972  *
6973  * Returns zero on success, non-zero on failure
6974  */
6975 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6976 {
6977         int err;
6978         unsigned long flags;
6979
6980         /* Reset the host controller */
6981         spin_lock_irqsave(hba->host->host_lock, flags);
6982         ufshcd_hba_stop(hba, false);
6983         spin_unlock_irqrestore(hba->host->host_lock, flags);
6984
6985         /* scale up clocks to max frequency before full reinitialization */
6986         ufshcd_set_clk_freq(hba, true);
6987
6988         err = ufshcd_hba_enable(hba);
6989         if (err)
6990                 goto out;
6991
6992         /* Establish the link again and restore the device */
6993         err = ufshcd_probe_hba(hba);
6994
6995         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
6996                 err = -EIO;
6997                 goto out;
6998         }
6999
7000         if (!err) {
7001                 err = ufshcd_vops_crypto_engine_reset(hba);
7002                 if (err) {
7003                         dev_err(hba->dev,
7004                                 "%s: failed to reset crypto engine %d\n",
7005                                 __func__, err);
7006                         goto out;
7007                 }
7008         }
7009
7010 out:
7011         if (err)
7012                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7013
7014         return err;
7015 }
7016
7017 /**
7018  * ufshcd_reset_and_restore - reset and re-initialize host/device
7019  * @hba: per-adapter instance
7020  *
7021  * Reset and recover device, host and re-establish link. This
7022  * is helpful to recover the communication in fatal error conditions.
7023  *
7024  * Returns zero on success, non-zero on failure
7025  */
7026 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7027 {
7028         int err = 0;
7029         unsigned long flags;
7030         int retries = MAX_HOST_RESET_RETRIES;
7031
7032         do {
7033                 err = ufshcd_vops_full_reset(hba);
7034                 if (err)
7035                         dev_warn(hba->dev, "%s: full reset returned %d\n",
7036                                  __func__, err);
7037
7038                 err = ufshcd_reset_device(hba);
7039                 if (err)
7040                         dev_warn(hba->dev, "%s: device reset failed. err %d\n",
7041                                  __func__, err);
7042
7043                 err = ufshcd_host_reset_and_restore(hba);
7044         } while (err && --retries);
7045
7046         /*
7047          * There is no point proceeding even after failing
7048          * to recover after multiple retries.
7049          */
7050         if (err)
7051                 BUG();
7052         /*
7053          * After reset the door-bell might be cleared, complete
7054          * outstanding requests in s/w here.
7055          */
7056         spin_lock_irqsave(hba->host->host_lock, flags);
7057         ufshcd_transfer_req_compl(hba);
7058         ufshcd_tmc_handler(hba);
7059         spin_unlock_irqrestore(hba->host->host_lock, flags);
7060
7061         return err;
7062 }
7063
7064 /**
7065  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7066  * @cmd - SCSI command pointer
7067  *
7068  * Returns SUCCESS/FAILED
7069  */
7070 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7071 {
7072         int err = SUCCESS;
7073         unsigned long flags;
7074         struct ufs_hba *hba;
7075
7076         hba = shost_priv(cmd->device->host);
7077
7078         /*
7079          * Check if there is any race with fatal error handling.
7080          * If so, wait for it to complete. Even though fatal error
7081          * handling does reset and restore in some cases, don't assume
7082          * anything out of it. We are just avoiding race here.
7083          */
7084         do {
7085                 spin_lock_irqsave(hba->host->host_lock, flags);
7086                 if (!(work_pending(&hba->eh_work) ||
7087                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
7088                         break;
7089                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7090                 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7091                 flush_work(&hba->eh_work);
7092         } while (1);
7093
7094         /*
7095          * we don't know if previous reset had really reset the host controller
7096          * or not. So let's force reset here to be sure.
7097          */
7098         hba->ufshcd_state = UFSHCD_STATE_ERROR;
7099         hba->force_host_reset = true;
7100         schedule_work(&hba->eh_work);
7101
7102         /* wait for the reset work to finish */
7103         do {
7104                 if (!(work_pending(&hba->eh_work) ||
7105                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
7106                         break;
7107                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7108                 dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
7109                 flush_work(&hba->eh_work);
7110                 spin_lock_irqsave(hba->host->host_lock, flags);
7111         } while (1);
7112
7113         if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
7114               ufshcd_is_link_active(hba))) {
7115                 err = FAILED;
7116                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7117         }
7118
7119         spin_unlock_irqrestore(hba->host->host_lock, flags);
7120
7121         return err;
7122 }
7123
7124 /**
7125  * ufshcd_get_max_icc_level - calculate the ICC level
7126  * @sup_curr_uA: max. current supported by the regulator
7127  * @start_scan: row at the desc table to start scan from
7128  * @buff: power descriptor buffer
7129  *
7130  * Returns calculated max ICC level for specific regulator
7131  */
7132 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7133 {
7134         int i;
7135         int curr_uA;
7136         u16 data;
7137         u16 unit;
7138
7139         for (i = start_scan; i >= 0; i--) {
7140                 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
7141                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7142                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7143                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7144                 switch (unit) {
7145                 case UFSHCD_NANO_AMP:
7146                         curr_uA = curr_uA / 1000;
7147                         break;
7148                 case UFSHCD_MILI_AMP:
7149                         curr_uA = curr_uA * 1000;
7150                         break;
7151                 case UFSHCD_AMP:
7152                         curr_uA = curr_uA * 1000 * 1000;
7153                         break;
7154                 case UFSHCD_MICRO_AMP:
7155                 default:
7156                         break;
7157                 }
7158                 if (sup_curr_uA >= curr_uA)
7159                         break;
7160         }
7161         if (i < 0) {
7162                 i = 0;
7163                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7164         }
7165
7166         return (u32)i;
7167 }
7168
7169 /**
7170  * ufshcd_calc_icc_level - calculate the max ICC level
7171  * In case regulators are not initialized we'll return 0
7172  * @hba: per-adapter instance
7173  * @desc_buf: power descriptor buffer to extract ICC levels from.
7174  * @len: length of desc_buff
7175  *
7176  * Returns calculated ICC level
7177  */
7178 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7179                                                         u8 *desc_buf, int len)
7180 {
7181         u32 icc_level = 0;
7182
7183         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7184                                                 !hba->vreg_info.vccq2) {
7185                 dev_err(hba->dev,
7186                         "%s: Regulator capability was not set, actvIccLevel=%d",
7187                                                         __func__, icc_level);
7188                 goto out;
7189         }
7190
7191         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
7192                 icc_level = ufshcd_get_max_icc_level(
7193                                 hba->vreg_info.vcc->max_uA,
7194                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7195                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7196
7197         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
7198                 icc_level = ufshcd_get_max_icc_level(
7199                                 hba->vreg_info.vccq->max_uA,
7200                                 icc_level,
7201                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7202
7203         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7204                 icc_level = ufshcd_get_max_icc_level(
7205                                 hba->vreg_info.vccq2->max_uA,
7206                                 icc_level,
7207                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7208 out:
7209         return icc_level;
7210 }
7211
7212 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
7213 {
7214         int ret;
7215         int buff_len = QUERY_DESC_POWER_MAX_SIZE;
7216         u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
7217
7218         ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
7219         if (ret) {
7220                 dev_err(hba->dev,
7221                         "%s: Failed reading power descriptor.len = %d ret = %d",
7222                         __func__, buff_len, ret);
7223                 return;
7224         }
7225
7226         hba->init_prefetch_data.icc_level =
7227                         ufshcd_find_max_sup_active_icc_level(hba,
7228                         desc_buf, buff_len);
7229         dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
7230                         __func__, hba->init_prefetch_data.icc_level);
7231
7232         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7233                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
7234                 &hba->init_prefetch_data.icc_level);
7235
7236         if (ret)
7237                 dev_err(hba->dev,
7238                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7239                         __func__, hba->init_prefetch_data.icc_level , ret);
7240
7241 }
7242
7243 /**
7244  * ufshcd_scsi_add_wlus - Adds required W-LUs
7245  * @hba: per-adapter instance
7246  *
7247  * UFS device specification requires the UFS devices to support 4 well known
7248  * logical units:
7249  *      "REPORT_LUNS" (address: 01h)
7250  *      "UFS Device" (address: 50h)
7251  *      "RPMB" (address: 44h)
7252  *      "BOOT" (address: 30h)
7253  * UFS device's power management needs to be controlled by "POWER CONDITION"
7254  * field of SSU (START STOP UNIT) command. But this "power condition" field
7255  * will take effect only when its sent to "UFS device" well known logical unit
7256  * hence we require the scsi_device instance to represent this logical unit in
7257  * order for the UFS host driver to send the SSU command for power management.
7258
7259  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7260  * Block) LU so user space process can control this LU. User space may also
7261  * want to have access to BOOT LU.
7262
7263  * This function adds scsi device instances for each of all well known LUs
7264  * (except "REPORT LUNS" LU).
7265  *
7266  * Returns zero on success (all required W-LUs are added successfully),
7267  * non-zero error value on failure (if failed to add any of the required W-LU).
7268  */
7269 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7270 {
7271         int ret = 0;
7272         struct scsi_device *sdev_rpmb;
7273         struct scsi_device *sdev_boot;
7274
7275         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7276                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7277         if (IS_ERR(hba->sdev_ufs_device)) {
7278                 ret = PTR_ERR(hba->sdev_ufs_device);
7279                 hba->sdev_ufs_device = NULL;
7280                 goto out;
7281         }
7282         scsi_device_put(hba->sdev_ufs_device);
7283
7284         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7285                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7286         if (IS_ERR(sdev_boot)) {
7287                 ret = PTR_ERR(sdev_boot);
7288                 goto remove_sdev_ufs_device;
7289         }
7290         scsi_device_put(sdev_boot);
7291
7292         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7293                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7294         if (IS_ERR(sdev_rpmb)) {
7295                 ret = PTR_ERR(sdev_rpmb);
7296                 goto remove_sdev_boot;
7297         }
7298         scsi_device_put(sdev_rpmb);
7299         goto out;
7300
7301 remove_sdev_boot:
7302         scsi_remove_device(sdev_boot);
7303 remove_sdev_ufs_device:
7304         scsi_remove_device(hba->sdev_ufs_device);
7305 out:
7306         return ret;
7307 }
7308
7309 /**
7310  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7311  * @hba: per-adapter instance
7312  *
7313  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7314  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7315  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7316  * the hibern8 exit latency.
7317  *
7318  * Returns zero on success, non-zero error value on failure.
7319  */
7320 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7321 {
7322         int ret = 0;
7323         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7324
7325         if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
7326                 return 0;
7327
7328         ret = ufshcd_dme_peer_get(hba,
7329                                   UIC_ARG_MIB_SEL(
7330                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7331                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7332                                   &peer_rx_min_activatetime);
7333         if (ret)
7334                 goto out;
7335
7336         /* make sure proper unit conversion is applied */
7337         tuned_pa_tactivate =
7338                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7339                  / PA_TACTIVATE_TIME_UNIT_US);
7340         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7341                              tuned_pa_tactivate);
7342
7343 out:
7344         return ret;
7345 }
7346
7347 /**
7348  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7349  * @hba: per-adapter instance
7350  *
7351  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7352  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7353  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7354  * This optimal value can help reduce the hibern8 exit latency.
7355  *
7356  * Returns zero on success, non-zero error value on failure.
7357  */
7358 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7359 {
7360         int ret = 0;
7361         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7362         u32 max_hibern8_time, tuned_pa_hibern8time;
7363
7364         ret = ufshcd_dme_get(hba,
7365                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7366                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7367                                   &local_tx_hibern8_time_cap);
7368         if (ret)
7369                 goto out;
7370
7371         ret = ufshcd_dme_peer_get(hba,
7372                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7373                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7374                                   &peer_rx_hibern8_time_cap);
7375         if (ret)
7376                 goto out;
7377
7378         max_hibern8_time = max(local_tx_hibern8_time_cap,
7379                                peer_rx_hibern8_time_cap);
7380         /* make sure proper unit conversion is applied */
7381         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7382                                 / PA_HIBERN8_TIME_UNIT_US);
7383         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7384                              tuned_pa_hibern8time);
7385 out:
7386         return ret;
7387 }
7388
7389 /**
7390  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7391  * less than device PA_TACTIVATE time.
7392  * @hba: per-adapter instance
7393  *
7394  * Some UFS devices require host PA_TACTIVATE to be lower than device
7395  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7396  * for such devices.
7397  *
7398  * Returns zero on success, non-zero error value on failure.
7399  */
7400 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7401 {
7402         int ret = 0;
7403         u32 granularity, peer_granularity;
7404         u32 pa_tactivate, peer_pa_tactivate;
7405         u32 pa_tactivate_us, peer_pa_tactivate_us;
7406         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7407
7408         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7409                                   &granularity);
7410         if (ret)
7411                 goto out;
7412
7413         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7414                                   &peer_granularity);
7415         if (ret)
7416                 goto out;
7417
7418         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7419             (granularity > PA_GRANULARITY_MAX_VAL)) {
7420                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7421                         __func__, granularity);
7422                 return -EINVAL;
7423         }
7424
7425         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7426             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7427                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7428                         __func__, peer_granularity);
7429                 return -EINVAL;
7430         }
7431
7432         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7433         if (ret)
7434                 goto out;
7435
7436         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7437                                   &peer_pa_tactivate);
7438         if (ret)
7439                 goto out;
7440
7441         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7442         peer_pa_tactivate_us = peer_pa_tactivate *
7443                              gran_to_us_table[peer_granularity - 1];
7444
7445         if (pa_tactivate_us > peer_pa_tactivate_us) {
7446                 u32 new_peer_pa_tactivate;
7447
7448                 new_peer_pa_tactivate = pa_tactivate_us /
7449                                       gran_to_us_table[peer_granularity - 1];
7450                 new_peer_pa_tactivate++;
7451                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7452                                           new_peer_pa_tactivate);
7453         }
7454
7455 out:
7456         return ret;
7457 }
7458
7459 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7460 {
7461         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7462                 ufshcd_tune_pa_tactivate(hba);
7463                 ufshcd_tune_pa_hibern8time(hba);
7464         }
7465
7466         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7467                 /* set 1ms timeout for PA_TACTIVATE */
7468                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7469
7470         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7471                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7472
7473         ufshcd_vops_apply_dev_quirks(hba);
7474 }
7475
7476 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7477 {
7478         int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
7479
7480         memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
7481         memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
7482         memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
7483         memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
7484         memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
7485
7486         hba->req_abort_count = 0;
7487 }
7488
7489 static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
7490 {
7491         if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
7492                 if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
7493                     UIC_LINK_OFF_STATE) {
7494                         hba->rpm_lvl =
7495                                 ufs_get_desired_pm_lvl_for_dev_link_state(
7496                                                 UFS_SLEEP_PWR_MODE,
7497                                                 UIC_LINK_HIBERN8_STATE);
7498                         dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
7499                                 hba->rpm_lvl);
7500                 }
7501                 if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7502                     UIC_LINK_OFF_STATE) {
7503                         hba->spm_lvl =
7504                                 ufs_get_desired_pm_lvl_for_dev_link_state(
7505                                                 UFS_SLEEP_PWR_MODE,
7506                                                 UIC_LINK_HIBERN8_STATE);
7507                         dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
7508                                 hba->spm_lvl);
7509                 }
7510         }
7511 }
7512
7513 /**
7514  * ufshcd_probe_hba - probe hba to detect device and initialize
7515  * @hba: per-adapter instance
7516  *
7517  * Execute link-startup and verify device initialization
7518  */
7519 static int ufshcd_probe_hba(struct ufs_hba *hba)
7520 {
7521         int ret;
7522         ktime_t start = ktime_get();
7523
7524         ret = ufshcd_link_startup(hba);
7525         if (ret)
7526                 goto out;
7527
7528         /* Debug counters initialization */
7529         ufshcd_clear_dbg_ufs_stats(hba);
7530         /* set the default level for urgent bkops */
7531         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
7532         hba->is_urgent_bkops_lvl_checked = false;
7533
7534         /* UniPro link is active now */
7535         ufshcd_set_link_active(hba);
7536
7537         ret = ufshcd_verify_dev_init(hba);
7538         if (ret)
7539                 goto out;
7540
7541         ret = ufshcd_complete_dev_init(hba);
7542         if (ret)
7543                 goto out;
7544
7545         ufs_advertise_fixup_device(hba);
7546         ufshcd_tune_unipro_params(hba);
7547
7548         ufshcd_apply_pm_quirks(hba);
7549         ret = ufshcd_set_vccq_rail_unused(hba,
7550                 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
7551         if (ret)
7552                 goto out;
7553
7554         /* UFS device is also active now */
7555         ufshcd_set_ufs_dev_active(hba);
7556         ufshcd_force_reset_auto_bkops(hba);
7557         hba->wlun_dev_clr_ua = true;
7558
7559         if (ufshcd_get_max_pwr_mode(hba)) {
7560                 dev_err(hba->dev,
7561                         "%s: Failed getting max supported power mode\n",
7562                         __func__);
7563         } else {
7564                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7565                 if (ret) {
7566                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7567                                         __func__, ret);
7568                         goto out;
7569                 }
7570         }
7571
7572         /* set the state as operational after switching to desired gear */
7573         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7574         /*
7575          * If we are in error handling context or in power management callbacks
7576          * context, no need to scan the host
7577          */
7578         if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7579                 bool flag;
7580
7581                 /* clear any previous UFS device information */
7582                 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7583                 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7584                                 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
7585                         hba->dev_info.f_power_on_wp_en = flag;
7586
7587                 if (!hba->is_init_prefetch)
7588                         ufshcd_init_icc_levels(hba);
7589
7590                 /* Add required well known logical units to scsi mid layer */
7591                 ret = ufshcd_scsi_add_wlus(hba);
7592                 if (ret)
7593                         goto out;
7594
7595                 /* Initialize devfreq after UFS device is detected */
7596                 if (ufshcd_is_clkscaling_supported(hba)) {
7597                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
7598                             &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
7599                         hba->clk_scaling.saved_pwr_info.is_valid = true;
7600                         hba->clk_scaling.is_scaled_up = true;
7601                         if (!hba->devfreq) {
7602                                 hba->devfreq = devfreq_add_device(hba->dev,
7603                                                         &ufs_devfreq_profile,
7604                                                         "simple_ondemand",
7605                                                         gov_data);
7606                                 if (IS_ERR(hba->devfreq)) {
7607                                         ret = PTR_ERR(hba->devfreq);
7608                                         dev_err(hba->dev, "Unable to register with devfreq %d\n",
7609                                                 ret);
7610                                         goto out;
7611                                 }
7612                         }
7613                         hba->clk_scaling.is_allowed = true;
7614                 }
7615
7616                 scsi_scan_host(hba->host);
7617                 pm_runtime_put_sync(hba->dev);
7618         }
7619
7620         if (!hba->is_init_prefetch)
7621                 hba->is_init_prefetch = true;
7622
7623         /*
7624          * Enable auto hibern8 if supported, after full host and
7625          * device initialization.
7626          */
7627         if (ufshcd_is_auto_hibern8_supported(hba))
7628                 ufshcd_set_auto_hibern8_timer(hba,
7629                                       hba->hibern8_on_idle.delay_ms);
7630 out:
7631         /*
7632          * If we failed to initialize the device or the device is not
7633          * present, turn off the power/clocks etc.
7634          */
7635         if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7636                 pm_runtime_put_sync(hba->dev);
7637                 ufshcd_hba_exit(hba);
7638         }
7639
7640         trace_ufshcd_init(dev_name(hba->dev), ret,
7641                 ktime_to_us(ktime_sub(ktime_get(), start)),
7642                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7643         return ret;
7644 }
7645
7646 /**
7647  * ufshcd_async_scan - asynchronous execution for probing hba
7648  * @data: data pointer to pass to this function
7649  * @cookie: cookie data
7650  */
7651 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7652 {
7653         struct ufs_hba *hba = (struct ufs_hba *)data;
7654
7655         /*
7656          * Don't allow clock gating and hibern8 enter for faster device
7657          * detection.
7658          */
7659         ufshcd_hold_all(hba);
7660         ufshcd_probe_hba(hba);
7661         ufshcd_release_all(hba);
7662 }
7663
7664 /**
7665  * ufshcd_query_ioctl - perform user read queries
7666  * @hba: per-adapter instance
7667  * @lun: used for lun specific queries
7668  * @buffer: user space buffer for reading and submitting query data and params
7669  * @return: 0 for success negative error code otherwise
7670  *
7671  * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7672  * It will read the opcode, idn and buf_length parameters, and, put the
7673  * response in the buffer field while updating the used size in buf_length.
7674  */
7675 static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
7676 {
7677         struct ufs_ioctl_query_data *ioctl_data;
7678         int err = 0;
7679         int length = 0;
7680         void *data_ptr;
7681         bool flag;
7682         u32 att;
7683         u8 index;
7684         u8 *desc = NULL;
7685
7686         ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
7687         if (!ioctl_data) {
7688                 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
7689                                 sizeof(struct ufs_ioctl_query_data));
7690                 err = -ENOMEM;
7691                 goto out;
7692         }
7693
7694         /* extract params from user buffer */
7695         err = copy_from_user(ioctl_data, buffer,
7696                         sizeof(struct ufs_ioctl_query_data));
7697         if (err) {
7698                 dev_err(hba->dev,
7699                         "%s: Failed copying buffer from user, err %d\n",
7700                         __func__, err);
7701                 goto out_release_mem;
7702         }
7703
7704         /* verify legal parameters & send query */
7705         switch (ioctl_data->opcode) {
7706         case UPIU_QUERY_OPCODE_READ_DESC:
7707                 switch (ioctl_data->idn) {
7708                 case QUERY_DESC_IDN_DEVICE:
7709                 case QUERY_DESC_IDN_CONFIGURAION:
7710                 case QUERY_DESC_IDN_INTERCONNECT:
7711                 case QUERY_DESC_IDN_GEOMETRY:
7712                 case QUERY_DESC_IDN_POWER:
7713                         index = 0;
7714                         break;
7715                 case QUERY_DESC_IDN_UNIT:
7716                         if (!ufs_is_valid_unit_desc_lun(lun)) {
7717                                 dev_err(hba->dev,
7718                                         "%s: No unit descriptor for lun 0x%x\n",
7719                                         __func__, lun);
7720                                 err = -EINVAL;
7721                                 goto out_release_mem;
7722                         }
7723                         index = lun;
7724                         break;
7725                 default:
7726                         goto out_einval;
7727                 }
7728                 length = min_t(int, QUERY_DESC_MAX_SIZE,
7729                                 ioctl_data->buf_size);
7730                 desc = kzalloc(length, GFP_KERNEL);
7731                 if (!desc) {
7732                         dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
7733                                         __func__, length);
7734                         err = -ENOMEM;
7735                         goto out_release_mem;
7736                 }
7737                 err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
7738                                 ioctl_data->idn, index, 0, desc, &length);
7739                 break;
7740         case UPIU_QUERY_OPCODE_READ_ATTR:
7741                 switch (ioctl_data->idn) {
7742                 case QUERY_ATTR_IDN_BOOT_LU_EN:
7743                 case QUERY_ATTR_IDN_POWER_MODE:
7744                 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
7745                 case QUERY_ATTR_IDN_OOO_DATA_EN:
7746                 case QUERY_ATTR_IDN_BKOPS_STATUS:
7747                 case QUERY_ATTR_IDN_PURGE_STATUS:
7748                 case QUERY_ATTR_IDN_MAX_DATA_IN:
7749                 case QUERY_ATTR_IDN_MAX_DATA_OUT:
7750                 case QUERY_ATTR_IDN_REF_CLK_FREQ:
7751                 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
7752                 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
7753                 case QUERY_ATTR_IDN_EE_CONTROL:
7754                 case QUERY_ATTR_IDN_EE_STATUS:
7755                 case QUERY_ATTR_IDN_SECONDS_PASSED:
7756                         index = 0;
7757                         break;
7758                 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
7759                 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
7760                         index = lun;
7761                         break;
7762                 default:
7763                         goto out_einval;
7764                 }
7765                 err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
7766                                         index, 0, &att);
7767                 break;
7768
7769         case UPIU_QUERY_OPCODE_WRITE_ATTR:
7770                 err = copy_from_user(&att,
7771                                 buffer + sizeof(struct ufs_ioctl_query_data),
7772                                 sizeof(u32));
7773                 if (err) {
7774                         dev_err(hba->dev,
7775                                 "%s: Failed copying buffer from user, err %d\n",
7776                                 __func__, err);
7777                         goto out_release_mem;
7778                 }
7779
7780                 switch (ioctl_data->idn) {
7781                 case QUERY_ATTR_IDN_BOOT_LU_EN:
7782                         index = 0;
7783                         if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
7784                                 dev_err(hba->dev,
7785                                         "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
7786                                         __func__, ioctl_data->opcode,
7787                                         (unsigned int)ioctl_data->idn, att);
7788                                 err = -EINVAL;
7789                                 goto out_release_mem;
7790                         }
7791                         break;
7792                 default:
7793                         goto out_einval;
7794                 }
7795                 err = ufshcd_query_attr(hba, ioctl_data->opcode,
7796                                         ioctl_data->idn, index, 0, &att);
7797                 break;
7798
7799         case UPIU_QUERY_OPCODE_READ_FLAG:
7800                 switch (ioctl_data->idn) {
7801                 case QUERY_FLAG_IDN_FDEVICEINIT:
7802                 case QUERY_FLAG_IDN_PERMANENT_WPE:
7803                 case QUERY_FLAG_IDN_PWR_ON_WPE:
7804                 case QUERY_FLAG_IDN_BKOPS_EN:
7805                 case QUERY_FLAG_IDN_PURGE_ENABLE:
7806                 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
7807                 case QUERY_FLAG_IDN_BUSY_RTC:
7808                         break;
7809                 default:
7810                         goto out_einval;
7811                 }
7812                 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
7813                                 ioctl_data->idn, &flag);
7814                 break;
7815         default:
7816                 goto out_einval;
7817         }
7818
7819         if (err) {
7820                 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
7821                                 ioctl_data->idn);
7822                 goto out_release_mem;
7823         }
7824
7825         /*
7826          * copy response data
7827          * As we might end up reading less data then what is specified in
7828          * "ioctl_data->buf_size". So we are updating "ioctl_data->
7829          * buf_size" to what exactly we have read.
7830          */
7831         switch (ioctl_data->opcode) {
7832         case UPIU_QUERY_OPCODE_READ_DESC:
7833                 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
7834                 data_ptr = desc;
7835                 break;
7836         case UPIU_QUERY_OPCODE_READ_ATTR:
7837                 ioctl_data->buf_size = sizeof(u32);
7838                 data_ptr = &att;
7839                 break;
7840         case UPIU_QUERY_OPCODE_READ_FLAG:
7841                 ioctl_data->buf_size = 1;
7842                 data_ptr = &flag;
7843                 break;
7844         case UPIU_QUERY_OPCODE_WRITE_ATTR:
7845                 goto out_release_mem;
7846         default:
7847                 goto out_einval;
7848         }
7849
7850         /* copy to user */
7851         err = copy_to_user(buffer, ioctl_data,
7852                         sizeof(struct ufs_ioctl_query_data));
7853         if (err)
7854                 dev_err(hba->dev, "%s: Failed copying back to user.\n",
7855                         __func__);
7856         err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
7857                         data_ptr, ioctl_data->buf_size);
7858         if (err)
7859                 dev_err(hba->dev, "%s: err %d copying back to user.\n",
7860                                 __func__, err);
7861         goto out_release_mem;
7862
7863 out_einval:
7864         dev_err(hba->dev,
7865                 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7866                 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
7867         err = -EINVAL;
7868 out_release_mem:
7869         kfree(ioctl_data);
7870         kfree(desc);
7871 out:
7872         return err;
7873 }
7874
7875 /**
7876  * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7877  * @dev: scsi device required for per LUN queries
7878  * @cmd: command opcode
7879  * @buffer: user space buffer for transferring data
7880  *
7881  * Supported commands:
7882  * UFS_IOCTL_QUERY
7883  */
7884 static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
7885 {
7886         struct ufs_hba *hba = shost_priv(dev->host);
7887         int err = 0;
7888
7889         BUG_ON(!hba);
7890         if (!buffer) {
7891                 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
7892                 return -EINVAL;
7893         }
7894
7895         switch (cmd) {
7896         case UFS_IOCTL_QUERY:
7897                 pm_runtime_get_sync(hba->dev);
7898                 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
7899                                 buffer);
7900                 pm_runtime_put_sync(hba->dev);
7901                 break;
7902         default:
7903                 err = -ENOIOCTLCMD;
7904                 dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
7905                         cmd);
7906                 break;
7907         }
7908
7909         return err;
7910 }
7911
7912 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7913 {
7914         unsigned long flags;
7915         struct Scsi_Host *host;
7916         struct ufs_hba *hba;
7917         int index;
7918         bool found = false;
7919
7920         if (!scmd || !scmd->device || !scmd->device->host)
7921                 return BLK_EH_NOT_HANDLED;
7922
7923         host = scmd->device->host;
7924         hba = shost_priv(host);
7925         if (!hba)
7926                 return BLK_EH_NOT_HANDLED;
7927
7928         spin_lock_irqsave(host->host_lock, flags);
7929
7930         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7931                 if (hba->lrb[index].cmd == scmd) {
7932                         found = true;
7933                         break;
7934                 }
7935         }
7936
7937         spin_unlock_irqrestore(host->host_lock, flags);
7938
7939         /*
7940          * Bypass SCSI error handling and reset the block layer timer if this
7941          * SCSI command was not actually dispatched to UFS driver, otherwise
7942          * let SCSI layer handle the error as usual.
7943          */
7944         return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
7945 }
7946
7947 static struct scsi_host_template ufshcd_driver_template = {
7948         .module                 = THIS_MODULE,
7949         .name                   = UFSHCD,
7950         .proc_name              = UFSHCD,
7951         .queuecommand           = ufshcd_queuecommand,
7952         .slave_alloc            = ufshcd_slave_alloc,
7953         .slave_configure        = ufshcd_slave_configure,
7954         .slave_destroy          = ufshcd_slave_destroy,
7955         .change_queue_depth     = ufshcd_change_queue_depth,
7956         .eh_abort_handler       = ufshcd_abort,
7957         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7958         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7959         .eh_timed_out           = ufshcd_eh_timed_out,
7960         .ioctl                  = ufshcd_ioctl,
7961 #ifdef CONFIG_COMPAT
7962         .compat_ioctl           = ufshcd_ioctl,
7963 #endif
7964         .this_id                = -1,
7965         .sg_tablesize           = SG_ALL,
7966         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7967         .can_queue              = UFSHCD_CAN_QUEUE,
7968         .max_host_blocked       = 1,
7969         .track_queue_depth      = 1,
7970 };
7971
7972 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7973                                    int ua)
7974 {
7975         int ret;
7976
7977         if (!vreg)
7978                 return 0;
7979
7980         /*
7981          * "set_load" operation shall be required on those regulators
7982          * which specifically configured current limitation. Otherwise
7983          * zero max_uA may cause unexpected behavior when regulator is
7984          * enabled or set as high power mode.
7985          */
7986         if (!vreg->max_uA)
7987                 return 0;
7988
7989         ret = regulator_set_load(vreg->reg, ua);
7990         if (ret < 0) {
7991                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7992                                 __func__, vreg->name, ua, ret);
7993         }
7994
7995         return ret;
7996 }
7997
7998 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7999                                          struct ufs_vreg *vreg)
8000 {
8001         if (!vreg)
8002                 return 0;
8003         else if (vreg->unused)
8004                 return 0;
8005         else
8006                 return ufshcd_config_vreg_load(hba->dev, vreg,
8007                                                UFS_VREG_LPM_LOAD_UA);
8008 }
8009
8010 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8011                                          struct ufs_vreg *vreg)
8012 {
8013         if (!vreg)
8014                 return 0;
8015         else if (vreg->unused)
8016                 return 0;
8017         else
8018                 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8019 }
8020
8021 static int ufshcd_config_vreg(struct device *dev,
8022                 struct ufs_vreg *vreg, bool on)
8023 {
8024         int ret = 0;
8025         struct regulator *reg;
8026         const char *name;
8027         int min_uV, uA_load;
8028
8029         BUG_ON(!vreg);
8030
8031         reg = vreg->reg;
8032         name = vreg->name;
8033
8034         if (regulator_count_voltages(reg) > 0) {
8035                 uA_load = on ? vreg->max_uA : 0;
8036                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8037                 if (ret)
8038                         goto out;
8039
8040                 if (vreg->min_uV && vreg->max_uV) {
8041                         min_uV = on ? vreg->min_uV : 0;
8042                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8043                         if (ret) {
8044                                 dev_err(dev,
8045                                         "%s: %s set voltage failed, err=%d\n",
8046                                         __func__, name, ret);
8047                                 goto out;
8048                         }
8049                 }
8050         }
8051 out:
8052         return ret;
8053 }
8054
8055 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8056 {
8057         int ret = 0;
8058
8059         if (!vreg)
8060                 goto out;
8061         else if (vreg->enabled || vreg->unused)
8062                 goto out;
8063
8064         ret = ufshcd_config_vreg(dev, vreg, true);
8065         if (!ret)
8066                 ret = regulator_enable(vreg->reg);
8067
8068         if (!ret)
8069                 vreg->enabled = true;
8070         else
8071                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8072                                 __func__, vreg->name, ret);
8073 out:
8074         return ret;
8075 }
8076
8077 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8078 {
8079         int ret = 0;
8080
8081         if (!vreg)
8082                 goto out;
8083         else if (!vreg->enabled || vreg->unused)
8084                 goto out;
8085
8086         ret = regulator_disable(vreg->reg);
8087
8088         if (!ret) {
8089                 /* ignore errors on applying disable config */
8090                 ufshcd_config_vreg(dev, vreg, false);
8091                 vreg->enabled = false;
8092         } else {
8093                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8094                                 __func__, vreg->name, ret);
8095         }
8096 out:
8097         return ret;
8098 }
8099
8100 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8101 {
8102         int ret = 0;
8103         struct device *dev = hba->dev;
8104         struct ufs_vreg_info *info = &hba->vreg_info;
8105
8106         if (!info)
8107                 goto out;
8108
8109         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8110         if (ret)
8111                 goto out;
8112
8113         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8114         if (ret)
8115                 goto out;
8116
8117         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8118         if (ret)
8119                 goto out;
8120
8121 out:
8122         if (ret) {
8123                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8124                 ufshcd_toggle_vreg(dev, info->vccq, false);
8125                 ufshcd_toggle_vreg(dev, info->vcc, false);
8126         }
8127         return ret;
8128 }
8129
8130 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8131 {
8132         struct ufs_vreg_info *info = &hba->vreg_info;
8133         int ret = 0;
8134
8135         if (info->vdd_hba) {
8136                 ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8137
8138                 if (!ret)
8139                         ufshcd_vops_update_sec_cfg(hba, on);
8140         }
8141
8142         return ret;
8143 }
8144
8145 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8146 {
8147         int ret = 0;
8148
8149         if (!vreg)
8150                 goto out;
8151
8152         vreg->reg = devm_regulator_get(dev, vreg->name);
8153         if (IS_ERR(vreg->reg)) {
8154                 ret = PTR_ERR(vreg->reg);
8155                 dev_err(dev, "%s: %s get failed, err=%d\n",
8156                                 __func__, vreg->name, ret);
8157         }
8158 out:
8159         return ret;
8160 }
8161
8162 static int ufshcd_init_vreg(struct ufs_hba *hba)
8163 {
8164         int ret = 0;
8165         struct device *dev = hba->dev;
8166         struct ufs_vreg_info *info = &hba->vreg_info;
8167
8168         if (!info)
8169                 goto out;
8170
8171         ret = ufshcd_get_vreg(dev, info->vcc);
8172         if (ret)
8173                 goto out;
8174
8175         ret = ufshcd_get_vreg(dev, info->vccq);
8176         if (ret)
8177                 goto out;
8178
8179         ret = ufshcd_get_vreg(dev, info->vccq2);
8180 out:
8181         return ret;
8182 }
8183
8184 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8185 {
8186         struct ufs_vreg_info *info = &hba->vreg_info;
8187
8188         if (info)
8189                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8190
8191         return 0;
8192 }
8193
8194 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
8195 {
8196         int ret = 0;
8197         struct ufs_vreg_info *info = &hba->vreg_info;
8198
8199         if (!info)
8200                 goto out;
8201         else if (!info->vccq)
8202                 goto out;
8203
8204         if (unused) {
8205                 /* shut off the rail here */
8206                 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
8207                 /*
8208                  * Mark this rail as no longer used, so it doesn't get enabled
8209                  * later by mistake
8210                  */
8211                 if (!ret)
8212                         info->vccq->unused = true;
8213         } else {
8214                 /*
8215                  * rail should have been already enabled hence just make sure
8216                  * that unused flag is cleared.
8217                  */
8218                 info->vccq->unused = false;
8219         }
8220 out:
8221         return ret;
8222 }
8223
8224 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
8225                                bool skip_ref_clk, bool is_gating_context)
8226 {
8227         int ret = 0;
8228         struct ufs_clk_info *clki;
8229         struct list_head *head = &hba->clk_list_head;
8230         unsigned long flags;
8231         ktime_t start = ktime_get();
8232         bool clk_state_changed = false;
8233
8234         if (!head || list_empty(head))
8235                 goto out;
8236
8237         /* call vendor specific bus vote before enabling the clocks */
8238         if (on) {
8239                 ret = ufshcd_vops_set_bus_vote(hba, on);
8240                 if (ret)
8241                         return ret;
8242         }
8243
8244         /*
8245          * vendor specific setup_clocks ops may depend on clocks managed by
8246          * this standard driver hence call the vendor specific setup_clocks
8247          * before disabling the clocks managed here.
8248          */
8249         if (!on) {
8250                 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8251                 if (ret)
8252                         return ret;
8253         }
8254
8255         list_for_each_entry(clki, head, list) {
8256                 if (!IS_ERR_OR_NULL(clki->clk)) {
8257                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
8258                                 continue;
8259
8260                         clk_state_changed = on ^ clki->enabled;
8261                         if (on && !clki->enabled) {
8262                                 ret = clk_prepare_enable(clki->clk);
8263                                 if (ret) {
8264                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8265                                                 __func__, clki->name, ret);
8266                                         goto out;
8267                                 }
8268                         } else if (!on && clki->enabled) {
8269                                 clk_disable_unprepare(clki->clk);
8270                         }
8271                         clki->enabled = on;
8272                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8273                                         clki->name, on ? "en" : "dis");
8274                 }
8275         }
8276
8277         /*
8278          * vendor specific setup_clocks ops may depend on clocks managed by
8279          * this standard driver hence call the vendor specific setup_clocks
8280          * after enabling the clocks managed here.
8281          */
8282         if (on) {
8283                 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8284                 if (ret)
8285                         goto out;
8286         }
8287
8288         /*
8289          * call vendor specific bus vote to remove the vote after
8290          * disabling the clocks.
8291          */
8292         if (!on)
8293                 ret = ufshcd_vops_set_bus_vote(hba, on);
8294
8295 out:
8296         if (ret) {
8297                 if (on)
8298                         /* Can't do much if this fails */
8299                         (void) ufshcd_vops_set_bus_vote(hba, false);
8300                 list_for_each_entry(clki, head, list) {
8301                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8302                                 clk_disable_unprepare(clki->clk);
8303                 }
8304         } else if (!ret && on) {
8305                 spin_lock_irqsave(hba->host->host_lock, flags);
8306                 hba->clk_gating.state = CLKS_ON;
8307                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8308                         hba->clk_gating.state);
8309                 spin_unlock_irqrestore(hba->host->host_lock, flags);
8310                 /* restore the secure configuration as clocks are enabled */
8311                 ufshcd_vops_update_sec_cfg(hba, true);
8312         }
8313
8314         if (clk_state_changed)
8315                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8316                         (on ? "on" : "off"),
8317                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8318         return ret;
8319 }
8320
8321 static int ufshcd_enable_clocks(struct ufs_hba *hba)
8322 {
8323         return  ufshcd_setup_clocks(hba, true, false, false);
8324 }
8325
8326 static int ufshcd_disable_clocks(struct ufs_hba *hba,
8327                                  bool is_gating_context)
8328 {
8329         return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
8330 }
8331
8332 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
8333                                               bool is_gating_context)
8334 {
8335         return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
8336 }
8337
8338 static int ufshcd_init_clocks(struct ufs_hba *hba)
8339 {
8340         int ret = 0;
8341         struct ufs_clk_info *clki;
8342         struct device *dev = hba->dev;
8343         struct list_head *head = &hba->clk_list_head;
8344
8345         if (!head || list_empty(head))
8346                 goto out;
8347
8348         list_for_each_entry(clki, head, list) {
8349                 if (!clki->name)
8350                         continue;
8351
8352                 clki->clk = devm_clk_get(dev, clki->name);
8353                 if (IS_ERR(clki->clk)) {
8354                         ret = PTR_ERR(clki->clk);
8355                         dev_err(dev, "%s: %s clk get failed, %d\n",
8356                                         __func__, clki->name, ret);
8357                         goto out;
8358                 }
8359
8360                 if (clki->max_freq) {
8361                         ret = clk_set_rate(clki->clk, clki->max_freq);
8362                         if (ret) {
8363                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8364                                         __func__, clki->name,
8365                                         clki->max_freq, ret);
8366                                 goto out;
8367                         }
8368                         clki->curr_freq = clki->max_freq;
8369                 }
8370                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8371                                 clki->name, clk_get_rate(clki->clk));
8372         }
8373 out:
8374         return ret;
8375 }
8376
8377 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8378 {
8379         int err = 0;
8380
8381         if (!hba->var || !hba->var->vops)
8382                 goto out;
8383
8384         err = ufshcd_vops_init(hba);
8385         if (err)
8386                 goto out;
8387
8388         err = ufshcd_vops_setup_regulators(hba, true);
8389         if (err)
8390                 goto out_exit;
8391
8392         goto out;
8393
8394 out_exit:
8395         ufshcd_vops_exit(hba);
8396 out:
8397         if (err)
8398                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8399                         __func__, ufshcd_get_var_name(hba), err);
8400         return err;
8401 }
8402
8403 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8404 {
8405         if (!hba->var || !hba->var->vops)
8406                 return;
8407
8408         ufshcd_vops_setup_regulators(hba, false);
8409
8410         ufshcd_vops_exit(hba);
8411 }
8412
8413 static int ufshcd_hba_init(struct ufs_hba *hba)
8414 {
8415         int err;
8416
8417         /*
8418          * Handle host controller power separately from the UFS device power
8419          * rails as it will help controlling the UFS host controller power
8420          * collapse easily which is different than UFS device power collapse.
8421          * Also, enable the host controller power before we go ahead with rest
8422          * of the initialization here.
8423          */
8424         err = ufshcd_init_hba_vreg(hba);
8425         if (err)
8426                 goto out;
8427
8428         err = ufshcd_setup_hba_vreg(hba, true);
8429         if (err)
8430                 goto out;
8431
8432         err = ufshcd_init_clocks(hba);
8433         if (err)
8434                 goto out_disable_hba_vreg;
8435
8436         err = ufshcd_enable_clocks(hba);
8437         if (err)
8438                 goto out_disable_hba_vreg;
8439
8440         err = ufshcd_init_vreg(hba);
8441         if (err)
8442                 goto out_disable_clks;
8443
8444         err = ufshcd_setup_vreg(hba, true);
8445         if (err)
8446                 goto out_disable_clks;
8447
8448         err = ufshcd_variant_hba_init(hba);
8449         if (err)
8450                 goto out_disable_vreg;
8451
8452         hba->is_powered = true;
8453         goto out;
8454
8455 out_disable_vreg:
8456         ufshcd_setup_vreg(hba, false);
8457 out_disable_clks:
8458         ufshcd_disable_clocks(hba, false);
8459 out_disable_hba_vreg:
8460         ufshcd_setup_hba_vreg(hba, false);
8461 out:
8462         return err;
8463 }
8464
8465 static void ufshcd_hba_exit(struct ufs_hba *hba)
8466 {
8467         if (hba->is_powered) {
8468                 ufshcd_variant_hba_exit(hba);
8469                 ufshcd_setup_vreg(hba, false);
8470                 if (ufshcd_is_clkscaling_supported(hba)) {
8471                         if (hba->devfreq)
8472                                 ufshcd_suspend_clkscaling(hba);
8473                         if (hba->clk_scaling.workq)
8474                                 destroy_workqueue(hba->clk_scaling.workq);
8475                 }
8476                 ufshcd_disable_clocks(hba, false);
8477                 ufshcd_setup_hba_vreg(hba, false);
8478                 hba->is_powered = false;
8479         }
8480 }
8481
8482 static int
8483 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8484 {
8485         unsigned char cmd[6] = {REQUEST_SENSE,
8486                                 0,
8487                                 0,
8488                                 0,
8489                                 UFSHCD_REQ_SENSE_SIZE,
8490                                 0};
8491         char *buffer;
8492         int ret;
8493
8494         buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
8495         if (!buffer) {
8496                 ret = -ENOMEM;
8497                 goto out;
8498         }
8499
8500         ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
8501                                 UFSHCD_REQ_SENSE_SIZE, NULL,
8502                                 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
8503         if (ret)
8504                 pr_err("%s: failed with err %d\n", __func__, ret);
8505
8506         kfree(buffer);
8507 out:
8508         return ret;
8509 }
8510
8511 /**
8512  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8513  *                           power mode
8514  * @hba: per adapter instance
8515  * @pwr_mode: device power mode to set
8516  *
8517  * Returns 0 if requested power mode is set successfully
8518  * Returns non-zero if failed to set the requested power mode
8519  */
8520 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8521                                      enum ufs_dev_pwr_mode pwr_mode)
8522 {
8523         unsigned char cmd[6] = { START_STOP };
8524         struct scsi_sense_hdr sshdr;
8525         struct scsi_device *sdp;
8526         unsigned long flags;
8527         int ret;
8528
8529         spin_lock_irqsave(hba->host->host_lock, flags);
8530         sdp = hba->sdev_ufs_device;
8531         if (sdp) {
8532                 ret = scsi_device_get(sdp);
8533                 if (!ret && !scsi_device_online(sdp)) {
8534                         ret = -ENODEV;
8535                         scsi_device_put(sdp);
8536                 }
8537         } else {
8538                 ret = -ENODEV;
8539         }
8540         spin_unlock_irqrestore(hba->host->host_lock, flags);
8541
8542         if (ret)
8543                 return ret;
8544
8545         /*
8546          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8547          * handling, which would wait for host to be resumed. Since we know
8548          * we are functional while we are here, skip host resume in error
8549          * handling context.
8550          */
8551         hba->host->eh_noresume = 1;
8552         if (hba->wlun_dev_clr_ua) {
8553                 ret = ufshcd_send_request_sense(hba, sdp);
8554                 if (ret)
8555                         goto out;
8556                 /* Unit attention condition is cleared now */
8557                 hba->wlun_dev_clr_ua = false;
8558         }
8559
8560         cmd[4] = pwr_mode << 4;
8561
8562         /*
8563          * Current function would be generally called from the power management
8564          * callbacks hence set the REQ_PM flag so that it doesn't resume the
8565          * already suspended childs.
8566          */
8567         ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
8568                                      START_STOP_TIMEOUT, 0, NULL, REQ_PM);
8569         if (ret) {
8570                 sdev_printk(KERN_WARNING, sdp,
8571                             "START_STOP failed for power mode: %d, result %x\n",
8572                             pwr_mode, ret);
8573                 if (driver_byte(ret) & DRIVER_SENSE)
8574                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8575         }
8576
8577         if (!ret)
8578                 hba->curr_dev_pwr_mode = pwr_mode;
8579 out:
8580         scsi_device_put(sdp);
8581         hba->host->eh_noresume = 0;
8582         return ret;
8583 }
8584
8585 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8586                                         enum uic_link_state req_link_state,
8587                                         int check_for_bkops)
8588 {
8589         int ret = 0;
8590
8591         if (req_link_state == hba->uic_link_state)
8592                 return 0;
8593
8594         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8595                 ret = ufshcd_uic_hibern8_enter(hba);
8596                 if (!ret)
8597                         ufshcd_set_link_hibern8(hba);
8598                 else
8599                         goto out;
8600         }
8601         /*
8602          * If autobkops is enabled, link can't be turned off because
8603          * turning off the link would also turn off the device.
8604          */
8605         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8606                    (!check_for_bkops || (check_for_bkops &&
8607                     !hba->auto_bkops_enabled))) {
8608                 /*
8609                  * Let's make sure that link is in low power mode, we are doing
8610                  * this currently by putting the link in Hibern8. Otherway to
8611                  * put the link in low power mode is to send the DME end point
8612                  * to device and then send the DME reset command to local
8613                  * unipro. But putting the link in hibern8 is much faster.
8614                  */
8615                 ret = ufshcd_uic_hibern8_enter(hba);
8616                 if (ret)
8617                         goto out;
8618                 /*
8619                  * Change controller state to "reset state" which
8620                  * should also put the link in off/reset state
8621                  */
8622                 ufshcd_hba_stop(hba, true);
8623                 /*
8624                  * TODO: Check if we need any delay to make sure that
8625                  * controller is reset
8626                  */
8627                 ufshcd_set_link_off(hba);
8628         }
8629
8630 out:
8631         return ret;
8632 }
8633
8634 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8635 {
8636         /*
8637          * It seems some UFS devices may keep drawing more than sleep current
8638          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8639          * To avoid this situation, add 2ms delay before putting these UFS
8640          * rails in LPM mode.
8641          */
8642         if (!ufshcd_is_link_active(hba))
8643                 usleep_range(2000, 2100);
8644
8645         /*
8646          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8647          * power.
8648          *
8649          * If UFS device and link is in OFF state, all power supplies (VCC,
8650          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8651          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8652          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8653          *
8654          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8655          * in low power state which would save some power.
8656          */
8657         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8658             !hba->dev_info.is_lu_power_on_wp) {
8659                 ufshcd_setup_vreg(hba, false);
8660         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8661                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8662                 if (!ufshcd_is_link_active(hba)) {
8663                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8664                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8665                 }
8666         }
8667 }
8668
8669 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8670 {
8671         int ret = 0;
8672
8673         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8674             !hba->dev_info.is_lu_power_on_wp) {
8675                 ret = ufshcd_setup_vreg(hba, true);
8676         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8677                 if (!ret && !ufshcd_is_link_active(hba)) {
8678                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8679                         if (ret)
8680                                 goto vcc_disable;
8681                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8682                         if (ret)
8683                                 goto vccq_lpm;
8684                 }
8685                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8686         }
8687         goto out;
8688
8689 vccq_lpm:
8690         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8691 vcc_disable:
8692         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8693 out:
8694         return ret;
8695 }
8696
8697 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8698 {
8699         if (ufshcd_is_link_off(hba) ||
8700             (ufshcd_is_link_hibern8(hba)
8701              && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
8702                 ufshcd_setup_hba_vreg(hba, false);
8703 }
8704
8705 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8706 {
8707         if (ufshcd_is_link_off(hba) ||
8708             (ufshcd_is_link_hibern8(hba)
8709              && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
8710                 ufshcd_setup_hba_vreg(hba, true);
8711 }
8712
8713 /**
8714  * ufshcd_suspend - helper function for suspend operations
8715  * @hba: per adapter instance
8716  * @pm_op: desired low power operation type
8717  *
8718  * This function will try to put the UFS device and link into low power
8719  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8720  * (System PM level).
8721  *
8722  * If this function is called during shutdown, it will make sure that
8723  * both UFS device and UFS link is powered off.
8724  *
8725  * NOTE: UFS device & link must be active before we enter in this function.
8726  *
8727  * Returns 0 for success and non-zero for failure
8728  */
8729 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8730 {
8731         int ret = 0;
8732         enum ufs_pm_level pm_lvl;
8733         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8734         enum uic_link_state req_link_state;
8735
8736         hba->pm_op_in_progress = 1;
8737         if (!ufshcd_is_shutdown_pm(pm_op)) {
8738                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8739                          hba->rpm_lvl : hba->spm_lvl;
8740                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8741                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8742         } else {
8743                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8744                 req_link_state = UIC_LINK_OFF_STATE;
8745         }
8746
8747         /*
8748          * If we can't transition into any of the low power modes
8749          * just gate the clocks.
8750          */
8751         WARN_ON(hba->hibern8_on_idle.is_enabled &&
8752                 hba->hibern8_on_idle.active_reqs);
8753         ufshcd_hold_all(hba);
8754         hba->clk_gating.is_suspended = true;
8755         hba->hibern8_on_idle.is_suspended = true;
8756
8757         if (hba->clk_scaling.is_allowed) {
8758                 cancel_work_sync(&hba->clk_scaling.suspend_work);
8759                 cancel_work_sync(&hba->clk_scaling.resume_work);
8760                 ufshcd_suspend_clkscaling(hba);
8761         }
8762
8763         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8764                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8765                 goto disable_clks;
8766         }
8767
8768         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8769             (req_link_state == hba->uic_link_state))
8770                 goto enable_gating;
8771
8772         /* UFS device & link must be active before we enter in this function */
8773         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8774                 ret = -EINVAL;
8775                 goto enable_gating;
8776         }
8777
8778         if (ufshcd_is_runtime_pm(pm_op)) {
8779                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8780                         /*
8781                          * The device is idle with no requests in the queue,
8782                          * allow background operations if bkops status shows
8783                          * that performance might be impacted.
8784                          */
8785                         ret = ufshcd_urgent_bkops(hba);
8786                         if (ret)
8787                                 goto enable_gating;
8788                 } else {
8789                         /* make sure that auto bkops is disabled */
8790                         ufshcd_disable_auto_bkops(hba);
8791                 }
8792         }
8793
8794         if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
8795              ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8796                !ufshcd_is_runtime_pm(pm_op))) {
8797                 /* ensure that bkops is disabled */
8798                 ufshcd_disable_auto_bkops(hba);
8799                 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8800                 if (ret)
8801                         goto enable_gating;
8802         }
8803
8804         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8805         if (ret)
8806                 goto set_dev_active;
8807
8808         if (ufshcd_is_link_hibern8(hba) &&
8809             ufshcd_is_hibern8_on_idle_allowed(hba))
8810                 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8811
8812         ufshcd_vreg_set_lpm(hba);
8813
8814 disable_clks:
8815         /*
8816          * Call vendor specific suspend callback. As these callbacks may access
8817          * vendor specific host controller register space call them before the
8818          * host clocks are ON.
8819          */
8820         ret = ufshcd_vops_suspend(hba, pm_op);
8821         if (ret)
8822                 goto set_link_active;
8823
8824         if (!ufshcd_is_link_active(hba))
8825                 ret = ufshcd_disable_clocks(hba, false);
8826         else
8827                 /* If link is active, device ref_clk can't be switched off */
8828                 ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
8829         if (ret)
8830                 goto set_link_active;
8831
8832         if (ufshcd_is_clkgating_allowed(hba)) {
8833                 hba->clk_gating.state = CLKS_OFF;
8834                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8835                                         hba->clk_gating.state);
8836         }
8837         /*
8838          * Disable the host irq as host controller as there won't be any
8839          * host controller transaction expected till resume.
8840          */
8841         ufshcd_disable_irq(hba);
8842         /* Put the host controller in low power mode if possible */
8843         ufshcd_hba_vreg_set_lpm(hba);
8844         goto out;
8845
8846 set_link_active:
8847         if (hba->clk_scaling.is_allowed)
8848                 ufshcd_resume_clkscaling(hba);
8849         ufshcd_vreg_set_hpm(hba);
8850         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
8851                 ufshcd_set_link_active(hba);
8852         } else if (ufshcd_is_link_off(hba)) {
8853                 ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
8854                 ufshcd_host_reset_and_restore(hba);
8855         }
8856 set_dev_active:
8857         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8858                 ufshcd_disable_auto_bkops(hba);
8859 enable_gating:
8860         if (hba->clk_scaling.is_allowed)
8861                 ufshcd_resume_clkscaling(hba);
8862         hba->hibern8_on_idle.is_suspended = false;
8863         hba->clk_gating.is_suspended = false;
8864         ufshcd_release_all(hba);
8865 out:
8866         hba->pm_op_in_progress = 0;
8867
8868         if (ret)
8869                 ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
8870
8871         return ret;
8872 }
8873
8874 /**
8875  * ufshcd_resume - helper function for resume operations
8876  * @hba: per adapter instance
8877  * @pm_op: runtime PM or system PM
8878  *
8879  * This function basically brings the UFS device, UniPro link and controller
8880  * to active state.
8881  *
8882  * Returns 0 for success and non-zero for failure
8883  */
8884 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8885 {
8886         int ret;
8887         enum uic_link_state old_link_state;
8888
8889         hba->pm_op_in_progress = 1;
8890         old_link_state = hba->uic_link_state;
8891
8892         ufshcd_hba_vreg_set_hpm(hba);
8893         /* Make sure clocks are enabled before accessing controller */
8894         ret = ufshcd_enable_clocks(hba);
8895         if (ret)
8896                 goto out;
8897
8898         /* enable the host irq as host controller would be active soon */
8899         ufshcd_enable_irq(hba);
8900
8901         ret = ufshcd_vreg_set_hpm(hba);
8902         if (ret)
8903                 goto disable_irq_and_vops_clks;
8904
8905         /*
8906          * Call vendor specific resume callback. As these callbacks may access
8907          * vendor specific host controller register space call them when the
8908          * host clocks are ON.
8909          */
8910         ret = ufshcd_vops_resume(hba, pm_op);
8911         if (ret)
8912                 goto disable_vreg;
8913
8914         if (ufshcd_is_link_hibern8(hba)) {
8915                 ret = ufshcd_uic_hibern8_exit(hba);
8916                 if (!ret) {
8917                         ufshcd_set_link_active(hba);
8918                         if (ufshcd_is_hibern8_on_idle_allowed(hba))
8919                                 hba->hibern8_on_idle.state = HIBERN8_EXITED;
8920                 } else {
8921                         goto vendor_suspend;
8922                 }
8923         } else if (ufshcd_is_link_off(hba)) {
8924                 /*
8925                  * A full initialization of the host and the device is required
8926                  * since the link was put to off during suspend.
8927                  */
8928                 ret = ufshcd_reset_and_restore(hba);
8929                 /*
8930                  * ufshcd_reset_and_restore() should have already
8931                  * set the link state as active
8932                  */
8933                 if (ret || !ufshcd_is_link_active(hba))
8934                         goto vendor_suspend;
8935                 /* mark link state as hibern8 exited */
8936                 if (ufshcd_is_hibern8_on_idle_allowed(hba))
8937                         hba->hibern8_on_idle.state = HIBERN8_EXITED;
8938         }
8939
8940         if (!ufshcd_is_ufs_dev_active(hba)) {
8941                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8942                 if (ret)
8943                         goto set_old_link_state;
8944         }
8945
8946         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8947                 ufshcd_enable_auto_bkops(hba);
8948         else
8949                 /*
8950                  * If BKOPs operations are urgently needed at this moment then
8951                  * keep auto-bkops enabled or else disable it.
8952                  */
8953                 ufshcd_urgent_bkops(hba);
8954
8955         hba->clk_gating.is_suspended = false;
8956         hba->hibern8_on_idle.is_suspended = false;
8957
8958         if (hba->clk_scaling.is_allowed)
8959                 ufshcd_resume_clkscaling(hba);
8960
8961         /* Schedule clock gating in case of no access to UFS device yet */
8962         ufshcd_release_all(hba);
8963         goto out;
8964
8965 set_old_link_state:
8966         ufshcd_link_state_transition(hba, old_link_state, 0);
8967         if (ufshcd_is_link_hibern8(hba) &&
8968             ufshcd_is_hibern8_on_idle_allowed(hba))
8969                 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8970 vendor_suspend:
8971         ufshcd_vops_suspend(hba, pm_op);
8972 disable_vreg:
8973         ufshcd_vreg_set_lpm(hba);
8974 disable_irq_and_vops_clks:
8975         ufshcd_disable_irq(hba);
8976         if (hba->clk_scaling.is_allowed)
8977                 ufshcd_suspend_clkscaling(hba);
8978         ufshcd_disable_clocks(hba, false);
8979         if (ufshcd_is_clkgating_allowed(hba))
8980                 hba->clk_gating.state = CLKS_OFF;
8981 out:
8982         hba->pm_op_in_progress = 0;
8983
8984         if (ret)
8985                 ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
8986
8987         return ret;
8988 }
8989
8990 /**
8991  * ufshcd_system_suspend - system suspend routine
8992  * @hba: per adapter instance
8993  * @pm_op: runtime PM or system PM
8994  *
8995  * Check the description of ufshcd_suspend() function for more details.
8996  *
8997  * Returns 0 for success and non-zero for failure
8998  */
8999 int ufshcd_system_suspend(struct ufs_hba *hba)
9000 {
9001         int ret = 0;
9002         ktime_t start = ktime_get();
9003
9004         if (!hba || !hba->is_powered)
9005                 return 0;
9006
9007         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
9008              hba->curr_dev_pwr_mode) &&
9009             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
9010              hba->uic_link_state))
9011                 goto out;
9012
9013         if (pm_runtime_suspended(hba->dev)) {
9014                 /*
9015                  * UFS device and/or UFS link low power states during runtime
9016                  * suspend seems to be different than what is expected during
9017                  * system suspend. Hence runtime resume the devic & link and
9018                  * let the system suspend low power states to take effect.
9019                  * TODO: If resume takes longer time, we might have optimize
9020                  * it in future by not resuming everything if possible.
9021                  */
9022                 ret = ufshcd_runtime_resume(hba);
9023                 if (ret)
9024                         goto out;
9025         }
9026
9027         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
9028 out:
9029         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9030                 ktime_to_us(ktime_sub(ktime_get(), start)),
9031                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9032         if (!ret)
9033                 hba->is_sys_suspended = true;
9034         return ret;
9035 }
9036 EXPORT_SYMBOL(ufshcd_system_suspend);
9037
9038 /**
9039  * ufshcd_system_resume - system resume routine
9040  * @hba: per adapter instance
9041  *
9042  * Returns 0 for success and non-zero for failure
9043  */
9044
9045 int ufshcd_system_resume(struct ufs_hba *hba)
9046 {
9047         int ret = 0;
9048         ktime_t start = ktime_get();
9049
9050         if (!hba)
9051                 return -EINVAL;
9052
9053         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
9054                 /*
9055                  * Let the runtime resume take care of resuming
9056                  * if runtime suspended.
9057                  */
9058                 goto out;
9059         else
9060                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
9061 out:
9062         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9063                 ktime_to_us(ktime_sub(ktime_get(), start)),
9064                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9065         return ret;
9066 }
9067 EXPORT_SYMBOL(ufshcd_system_resume);
9068
9069 /**
9070  * ufshcd_runtime_suspend - runtime suspend routine
9071  * @hba: per adapter instance
9072  *
9073  * Check the description of ufshcd_suspend() function for more details.
9074  *
9075  * Returns 0 for success and non-zero for failure
9076  */
9077 int ufshcd_runtime_suspend(struct ufs_hba *hba)
9078 {
9079         int ret = 0;
9080         ktime_t start = ktime_get();
9081
9082         if (!hba)
9083                 return -EINVAL;
9084
9085         if (!hba->is_powered)
9086                 goto out;
9087         else
9088                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
9089 out:
9090         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9091                 ktime_to_us(ktime_sub(ktime_get(), start)),
9092                 hba->curr_dev_pwr_mode,
9093                 hba->uic_link_state);
9094         return ret;
9095
9096 }
9097 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9098
9099 /**
9100  * ufshcd_runtime_resume - runtime resume routine
9101  * @hba: per adapter instance
9102  *
9103  * This function basically brings the UFS device, UniPro link and controller
9104  * to active state. Following operations are done in this function:
9105  *
9106  * 1. Turn on all the controller related clocks
9107  * 2. Bring the UniPro link out of Hibernate state
9108  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9109  *    to active state.
9110  * 4. If auto-bkops is enabled on the device, disable it.
9111  *
9112  * So following would be the possible power state after this function return
9113  * successfully:
9114  *      S1: UFS device in Active state with VCC rail ON
9115  *          UniPro link in Active state
9116  *          All the UFS/UniPro controller clocks are ON
9117  *
9118  * Returns 0 for success and non-zero for failure
9119  */
9120 int ufshcd_runtime_resume(struct ufs_hba *hba)
9121 {
9122         int ret = 0;
9123         ktime_t start = ktime_get();
9124
9125         if (!hba)
9126                 return -EINVAL;
9127
9128         if (!hba->is_powered)
9129                 goto out;
9130         else
9131                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9132 out:
9133         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9134                 ktime_to_us(ktime_sub(ktime_get(), start)),
9135                 hba->curr_dev_pwr_mode,
9136                 hba->uic_link_state);
9137         return ret;
9138 }
9139 EXPORT_SYMBOL(ufshcd_runtime_resume);
9140
9141 int ufshcd_runtime_idle(struct ufs_hba *hba)
9142 {
9143         return 0;
9144 }
9145 EXPORT_SYMBOL(ufshcd_runtime_idle);
9146
9147 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
9148                                            struct device_attribute *attr,
9149                                            const char *buf, size_t count,
9150                                            bool rpm)
9151 {
9152         struct ufs_hba *hba = dev_get_drvdata(dev);
9153         unsigned long flags, value;
9154
9155         if (kstrtoul(buf, 0, &value))
9156                 return -EINVAL;
9157
9158         if (value >= UFS_PM_LVL_MAX)
9159                 return -EINVAL;
9160
9161         spin_lock_irqsave(hba->host->host_lock, flags);
9162         if (rpm)
9163                 hba->rpm_lvl = value;
9164         else
9165                 hba->spm_lvl = value;
9166         ufshcd_apply_pm_quirks(hba);
9167         spin_unlock_irqrestore(hba->host->host_lock, flags);
9168         return count;
9169 }
9170
9171 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
9172                 struct device_attribute *attr, char *buf)
9173 {
9174         struct ufs_hba *hba = dev_get_drvdata(dev);
9175         int curr_len;
9176         u8 lvl;
9177
9178         curr_len = snprintf(buf, PAGE_SIZE,
9179                             "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
9180                             hba->rpm_lvl,
9181                             ufschd_ufs_dev_pwr_mode_to_string(
9182                                 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
9183                             ufschd_uic_link_state_to_string(
9184                                 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
9185
9186         curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9187                              "\nAll available Runtime PM levels info:\n");
9188         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9189                 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9190                                      "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
9191                                     lvl,
9192                                     ufschd_ufs_dev_pwr_mode_to_string(
9193                                         ufs_pm_lvl_states[lvl].dev_state),
9194                                     ufschd_uic_link_state_to_string(
9195                                         ufs_pm_lvl_states[lvl].link_state));
9196
9197         return curr_len;
9198 }
9199
9200 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
9201                 struct device_attribute *attr, const char *buf, size_t count)
9202 {
9203         return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
9204 }
9205
9206 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
9207 {
9208         hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
9209         hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
9210         sysfs_attr_init(&hba->rpm_lvl_attr.attr);
9211         hba->rpm_lvl_attr.attr.name = "rpm_lvl";
9212         hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9213         if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
9214                 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
9215 }
9216
9217 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
9218                 struct device_attribute *attr, char *buf)
9219 {
9220         struct ufs_hba *hba = dev_get_drvdata(dev);
9221         int curr_len;
9222         u8 lvl;
9223
9224         curr_len = snprintf(buf, PAGE_SIZE,
9225                             "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
9226                             hba->spm_lvl,
9227                             ufschd_ufs_dev_pwr_mode_to_string(
9228                                 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
9229                             ufschd_uic_link_state_to_string(
9230                                 ufs_pm_lvl_states[hba->spm_lvl].link_state));
9231
9232         curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9233                              "\nAll available System PM levels info:\n");
9234         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9235                 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9236                                      "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
9237                                     lvl,
9238                                     ufschd_ufs_dev_pwr_mode_to_string(
9239                                         ufs_pm_lvl_states[lvl].dev_state),
9240                                     ufschd_uic_link_state_to_string(
9241                                         ufs_pm_lvl_states[lvl].link_state));
9242
9243         return curr_len;
9244 }
9245
9246 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
9247                 struct device_attribute *attr, const char *buf, size_t count)
9248 {
9249         return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
9250 }
9251
9252 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
9253 {
9254         hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
9255         hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
9256         sysfs_attr_init(&hba->spm_lvl_attr.attr);
9257         hba->spm_lvl_attr.attr.name = "spm_lvl";
9258         hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9259         if (device_create_file(hba->dev, &hba->spm_lvl_attr))
9260                 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
9261 }
9262
9263 static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
9264                                   enum desc_idn desc_id,
9265                                   u8 desc_index,
9266                                   u8 param_offset,
9267                                   u8 *sysfs_buf,
9268                                   u8 param_size)
9269 {
9270         u8 desc_buf[8] = {0};
9271         int ret;
9272
9273         if (param_size > 8)
9274                 return -EINVAL;
9275
9276         pm_runtime_get_sync(hba->dev);
9277         ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
9278                                 param_offset, desc_buf, param_size);
9279         pm_runtime_put_sync(hba->dev);
9280
9281         if (ret)
9282                 return -EINVAL;
9283         switch (param_size) {
9284         case 1:
9285                 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%02X\n", *desc_buf);
9286                 break;
9287         case 2:
9288                 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%04X\n",
9289                         get_unaligned_be16(desc_buf));
9290                 break;
9291         case 4:
9292                 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%08X\n",
9293                         get_unaligned_be32(desc_buf));
9294                 break;
9295         case 8:
9296                 ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%016llX\n",
9297                         get_unaligned_be64(desc_buf));
9298                 break;
9299         }
9300
9301         return ret;
9302 }
9303
9304
9305 #define UFS_DESC_PARAM(_name, _puname, _duname, _size)                  \
9306         static ssize_t _name##_show(struct device *dev,                 \
9307                 struct device_attribute *attr, char *buf)                       \
9308 {                                                                       \
9309         struct ufs_hba *hba = dev_get_drvdata(dev);             \
9310         return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
9311                 0, _duname##_DESC_PARAM##_puname, buf, _size);          \
9312 }                                                                       \
9313 static DEVICE_ATTR_RO(_name)
9314
9315 #define UFS_HEALTH_DESC_PARAM(_name, _uname, _size)                     \
9316                 UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
9317
9318 UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
9319 UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
9320 UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
9321
9322 static struct attribute *ufs_sysfs_health_descriptor[] = {
9323         &dev_attr_eol_info.attr,
9324         &dev_attr_life_time_estimation_a.attr,
9325         &dev_attr_life_time_estimation_b.attr,
9326         NULL,
9327 };
9328
9329 static const struct attribute_group ufs_sysfs_health_descriptor_group = {
9330         .name = "health_descriptor",
9331         .attrs = ufs_sysfs_health_descriptor,
9332 };
9333
9334 static const struct attribute_group *ufs_sysfs_groups[] = {
9335         &ufs_sysfs_health_descriptor_group,
9336         NULL,
9337 };
9338
9339
9340 static void ufshcd_add_desc_sysfs_nodes(struct device *dev)
9341 {
9342         int ret;
9343
9344         ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
9345         if (ret)
9346                 dev_err(dev,
9347                         "%s: sysfs groups creation failed (err = %d)\n",
9348                         __func__, ret);
9349 }
9350
9351 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
9352 {
9353         ufshcd_add_rpm_lvl_sysfs_nodes(hba);
9354         ufshcd_add_spm_lvl_sysfs_nodes(hba);
9355         ufshcd_add_desc_sysfs_nodes(hba->dev);
9356 }
9357
9358 static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
9359 {
9360         bool suspend = false;
9361         unsigned long flags;
9362
9363         spin_lock_irqsave(hba->host->host_lock, flags);
9364         if (hba->clk_scaling.is_allowed) {
9365                 hba->clk_scaling.is_allowed = false;
9366                 suspend = true;
9367         }
9368         spin_unlock_irqrestore(hba->host->host_lock, flags);
9369
9370         /**
9371          * Scaling may be scheduled before, hence make sure it
9372          * doesn't race with shutdown
9373          */
9374         if (ufshcd_is_clkscaling_supported(hba)) {
9375                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9376                 cancel_work_sync(&hba->clk_scaling.suspend_work);
9377                 cancel_work_sync(&hba->clk_scaling.resume_work);
9378                 if (suspend)
9379                         ufshcd_suspend_clkscaling(hba);
9380         }
9381
9382         /* Unregister so that devfreq_monitor can't race with shutdown */
9383         if (hba->devfreq)
9384                 devfreq_remove_device(hba->devfreq);
9385 }
9386
9387 /**
9388  * ufshcd_shutdown - shutdown routine
9389  * @hba: per adapter instance
9390  *
9391  * This function would power off both UFS device and UFS link.
9392  *
9393  * Returns 0 always to allow force shutdown even in case of errors.
9394  */
9395 int ufshcd_shutdown(struct ufs_hba *hba)
9396 {
9397         int ret = 0;
9398
9399         if (!hba->is_powered)
9400                 goto out;
9401
9402         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9403                 goto out;
9404
9405         pm_runtime_get_sync(hba->dev);
9406         ufshcd_hold_all(hba);
9407         ufshcd_mark_shutdown_ongoing(hba);
9408         ufshcd_shutdown_clkscaling(hba);
9409         /**
9410          * (1) Acquire the lock to stop any more requests
9411          * (2) Wait for all issued requests to complete
9412          */
9413         ufshcd_get_write_lock(hba);
9414         ufshcd_scsi_block_requests(hba);
9415         ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
9416         if (ret)
9417                 dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
9418                         __func__, ret);
9419         /* Requests may have errored out above, let it be handled */
9420         flush_work(&hba->eh_work);
9421         /* reqs issued from contexts other than shutdown will fail from now */
9422         ufshcd_scsi_unblock_requests(hba);
9423         ufshcd_release_all(hba);
9424         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9425 out:
9426         if (ret)
9427                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9428         /* allow force shutdown even in case of errors */
9429         return 0;
9430 }
9431 EXPORT_SYMBOL(ufshcd_shutdown);
9432
9433 /*
9434  * Values permitted 0, 1, 2.
9435  * 0 -> Disable IO latency histograms (default)
9436  * 1 -> Enable IO latency histograms
9437  * 2 -> Zero out IO latency histograms
9438  */
9439 static ssize_t
9440 latency_hist_store(struct device *dev, struct device_attribute *attr,
9441                    const char *buf, size_t count)
9442 {
9443         struct ufs_hba *hba = dev_get_drvdata(dev);
9444         long value;
9445
9446         if (kstrtol(buf, 0, &value))
9447                 return -EINVAL;
9448         if (value == BLK_IO_LAT_HIST_ZERO) {
9449                 memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
9450                 memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
9451         } else if (value == BLK_IO_LAT_HIST_ENABLE ||
9452                  value == BLK_IO_LAT_HIST_DISABLE)
9453                 hba->latency_hist_enabled = value;
9454         return count;
9455 }
9456
9457 ssize_t
9458 latency_hist_show(struct device *dev, struct device_attribute *attr,
9459                   char *buf)
9460 {
9461         struct ufs_hba *hba = dev_get_drvdata(dev);
9462         size_t written_bytes;
9463
9464         written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
9465                         buf, PAGE_SIZE);
9466         written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
9467                         buf + written_bytes, PAGE_SIZE - written_bytes);
9468
9469         return written_bytes;
9470 }
9471
9472 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
9473                    latency_hist_show, latency_hist_store);
9474
9475 static void
9476 ufshcd_init_latency_hist(struct ufs_hba *hba)
9477 {
9478         if (device_create_file(hba->dev, &dev_attr_latency_hist))
9479                 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
9480 }
9481
9482 static void
9483 ufshcd_exit_latency_hist(struct ufs_hba *hba)
9484 {
9485         device_create_file(hba->dev, &dev_attr_latency_hist);
9486 }
9487
9488 /**
9489  * ufshcd_remove - de-allocate SCSI host and host memory space
9490  *              data structure memory
9491  * @hba - per adapter instance
9492  */
9493 void ufshcd_remove(struct ufs_hba *hba)
9494 {
9495         scsi_remove_host(hba->host);
9496         /* disable interrupts */
9497         ufshcd_disable_intr(hba, hba->intr_mask);
9498         ufshcd_hba_stop(hba, true);
9499
9500         ufshcd_exit_clk_gating(hba);
9501         ufshcd_exit_hibern8_on_idle(hba);
9502         if (ufshcd_is_clkscaling_supported(hba)) {
9503                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9504                 ufshcd_exit_latency_hist(hba);
9505                 devfreq_remove_device(hba->devfreq);
9506         }
9507         ufshcd_hba_exit(hba);
9508         ufsdbg_remove_debugfs(hba);
9509 }
9510 EXPORT_SYMBOL_GPL(ufshcd_remove);
9511
9512 /**
9513  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9514  * @hba: pointer to Host Bus Adapter (HBA)
9515  */
9516 void ufshcd_dealloc_host(struct ufs_hba *hba)
9517 {
9518         scsi_host_put(hba->host);
9519 }
9520 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9521
9522 /**
9523  * ufshcd_set_dma_mask - Set dma mask based on the controller
9524  *                       addressing capability
9525  * @hba: per adapter instance
9526  *
9527  * Returns 0 for success, non-zero for failure
9528  */
9529 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9530 {
9531         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9532                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9533                         return 0;
9534         }
9535         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9536 }
9537
9538 /**
9539  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9540  * @dev: pointer to device handle
9541  * @hba_handle: driver private handle
9542  * Returns 0 on success, non-zero value on failure
9543  */
9544 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9545 {
9546         struct Scsi_Host *host;
9547         struct ufs_hba *hba;
9548         int err = 0;
9549
9550         if (!dev) {
9551                 dev_err(dev,
9552                 "Invalid memory reference for dev is NULL\n");
9553                 err = -ENODEV;
9554                 goto out_error;
9555         }
9556
9557         host = scsi_host_alloc(&ufshcd_driver_template,
9558                                 sizeof(struct ufs_hba));
9559         if (!host) {
9560                 dev_err(dev, "scsi_host_alloc failed\n");
9561                 err = -ENOMEM;
9562                 goto out_error;
9563         }
9564         hba = shost_priv(host);
9565         hba->host = host;
9566         hba->dev = dev;
9567         *hba_handle = hba;
9568
9569 out_error:
9570         return err;
9571 }
9572 EXPORT_SYMBOL(ufshcd_alloc_host);
9573
9574 /**
9575  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
9576  * @hba: per adapter instance
9577  * @scale_up: True if scaling up and false if scaling down
9578  *
9579  * Returns true if scaling is required, false otherwise.
9580  */
9581 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
9582                                                bool scale_up)
9583 {
9584         struct ufs_clk_info *clki;
9585         struct list_head *head = &hba->clk_list_head;
9586
9587         if (!head || list_empty(head))
9588                 return false;
9589
9590         list_for_each_entry(clki, head, list) {
9591                 if (!IS_ERR_OR_NULL(clki->clk)) {
9592                         if (scale_up && clki->max_freq) {
9593                                 if (clki->curr_freq == clki->max_freq)
9594                                         continue;
9595                                 return true;
9596                         } else if (!scale_up && clki->min_freq) {
9597                                 if (clki->curr_freq == clki->min_freq)
9598                                         continue;
9599                                 return true;
9600                         }
9601                 }
9602         }
9603
9604         return false;
9605 }
9606
9607 /**
9608  * ufshcd_scale_gear - scale up/down UFS gear
9609  * @hba: per adapter instance
9610  * @scale_up: True for scaling up gear and false for scaling down
9611  *
9612  * Returns 0 for success,
9613  * Returns -EBUSY if scaling can't happen at this time
9614  * Returns non-zero for any other errors
9615  */
9616 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
9617 {
9618         int ret = 0;
9619         struct ufs_pa_layer_attr new_pwr_info;
9620         u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
9621
9622         BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
9623
9624         if (scale_up) {
9625                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
9626                        sizeof(struct ufs_pa_layer_attr));
9627                 /*
9628                  * Some UFS devices may stop responding after switching from
9629                  * HS-G1 to HS-G3. Also, it is found that these devices work
9630                  * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
9631                  * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
9632                  * quirk is enabled for such devices, this 2 steps gear switch
9633                  * workaround will be applied.
9634                  */
9635                 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
9636                     && (hba->pwr_info.gear_tx == UFS_HS_G1)
9637                     && (new_pwr_info.gear_tx == UFS_HS_G3)) {
9638                         /* scale up to G2 first */
9639                         new_pwr_info.gear_tx = UFS_HS_G2;
9640                         new_pwr_info.gear_rx = UFS_HS_G2;
9641                         ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9642                         if (ret)
9643                                 goto out;
9644
9645                         /* scale up to G3 now */
9646                         new_pwr_info.gear_tx = UFS_HS_G3;
9647                         new_pwr_info.gear_rx = UFS_HS_G3;
9648                         /* now, fall through to set the HS-G3 */
9649                 }
9650                 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9651                 if (ret)
9652                         goto out;
9653         } else {
9654                 memcpy(&new_pwr_info, &hba->pwr_info,
9655                        sizeof(struct ufs_pa_layer_attr));
9656
9657                 if (hba->pwr_info.gear_tx > scale_down_gear
9658                     || hba->pwr_info.gear_rx > scale_down_gear) {
9659                         /* save the current power mode */
9660                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
9661                                 &hba->pwr_info,
9662                                 sizeof(struct ufs_pa_layer_attr));
9663
9664                         /* scale down gear */
9665                         new_pwr_info.gear_tx = scale_down_gear;
9666                         new_pwr_info.gear_rx = scale_down_gear;
9667                         if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
9668                                 new_pwr_info.pwr_tx = FASTAUTO_MODE;
9669                                 new_pwr_info.pwr_rx = FASTAUTO_MODE;
9670                         }
9671                 }
9672                 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
9673         }
9674
9675 out:
9676         if (ret)
9677                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
9678                         __func__, ret,
9679                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
9680                         new_pwr_info.gear_tx, new_pwr_info.gear_rx,
9681                         scale_up);
9682
9683         return ret;
9684 }
9685
9686 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
9687 {
9688         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
9689         int ret = 0;
9690         /*
9691          * make sure that there are no outstanding requests when
9692          * clock scaling is in progress
9693          */
9694         ufshcd_scsi_block_requests(hba);
9695         down_write(&hba->lock);
9696         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
9697                 ret = -EBUSY;
9698                 up_write(&hba->lock);
9699                 ufshcd_scsi_unblock_requests(hba);
9700         }
9701
9702         return ret;
9703 }
9704
9705 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
9706 {
9707         up_write(&hba->lock);
9708         ufshcd_scsi_unblock_requests(hba);
9709 }
9710
9711 /**
9712  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
9713  * @hba: per adapter instance
9714  * @scale_up: True for scaling up and false for scalin down
9715  *
9716  * Returns 0 for success,
9717  * Returns -EBUSY if scaling can't happen at this time
9718  * Returns non-zero for any other errors
9719  */
9720 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
9721 {
9722         int ret = 0;
9723
9724         /* let's not get into low power until clock scaling is completed */
9725         hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
9726         ufshcd_hold_all(hba);
9727
9728         ret = ufshcd_clock_scaling_prepare(hba);
9729         if (ret)
9730                 goto out;
9731
9732         /* scale down the gear before scaling down clocks */
9733         if (!scale_up) {
9734                 ret = ufshcd_scale_gear(hba, false);
9735                 if (ret)
9736                         goto clk_scaling_unprepare;
9737         }
9738
9739         /*
9740          * If auto hibern8 is supported then put the link in
9741          * hibern8 manually, this is to avoid auto hibern8
9742          * racing during clock frequency scaling sequence.
9743          */
9744         if (ufshcd_is_auto_hibern8_supported(hba)) {
9745                 ret = ufshcd_uic_hibern8_enter(hba);
9746                 if (ret)
9747                         /* link will be bad state so no need to scale_up_gear */
9748                         return ret;
9749         }
9750
9751         ret = ufshcd_scale_clks(hba, scale_up);
9752         if (ret)
9753                 goto scale_up_gear;
9754
9755         if (ufshcd_is_auto_hibern8_supported(hba)) {
9756                 ret = ufshcd_uic_hibern8_exit(hba);
9757                 if (ret)
9758                         /* link will be bad state so no need to scale_up_gear */
9759                         return ret;
9760         }
9761
9762         /* scale up the gear after scaling up clocks */
9763         if (scale_up) {
9764                 ret = ufshcd_scale_gear(hba, true);
9765                 if (ret) {
9766                         ufshcd_scale_clks(hba, false);
9767                         goto clk_scaling_unprepare;
9768                 }
9769         }
9770
9771         if (!ret) {
9772                 hba->clk_scaling.is_scaled_up = scale_up;
9773                 if (scale_up)
9774                         hba->clk_gating.delay_ms =
9775                                 hba->clk_gating.delay_ms_perf;
9776                 else
9777                         hba->clk_gating.delay_ms =
9778                                 hba->clk_gating.delay_ms_pwr_save;
9779         }
9780
9781         goto clk_scaling_unprepare;
9782
9783 scale_up_gear:
9784         if (!scale_up)
9785                 ufshcd_scale_gear(hba, true);
9786 clk_scaling_unprepare:
9787         ufshcd_clock_scaling_unprepare(hba);
9788 out:
9789         hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
9790         ufshcd_release_all(hba);
9791         return ret;
9792 }
9793
9794 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9795 {
9796         unsigned long flags;
9797
9798         devfreq_suspend_device(hba->devfreq);
9799         spin_lock_irqsave(hba->host->host_lock, flags);
9800         hba->clk_scaling.window_start_t = 0;
9801         spin_unlock_irqrestore(hba->host->host_lock, flags);
9802 }
9803
9804 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9805 {
9806         unsigned long flags;
9807         bool suspend = false;
9808
9809         if (!ufshcd_is_clkscaling_supported(hba))
9810                 return;
9811
9812         spin_lock_irqsave(hba->host->host_lock, flags);
9813         if (!hba->clk_scaling.is_suspended) {
9814                 suspend = true;
9815                 hba->clk_scaling.is_suspended = true;
9816         }
9817         spin_unlock_irqrestore(hba->host->host_lock, flags);
9818
9819         if (suspend)
9820                 __ufshcd_suspend_clkscaling(hba);
9821 }
9822
9823 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
9824 {
9825         unsigned long flags;
9826         bool resume = false;
9827
9828         if (!ufshcd_is_clkscaling_supported(hba))
9829                 return;
9830
9831         spin_lock_irqsave(hba->host->host_lock, flags);
9832         if (hba->clk_scaling.is_suspended) {
9833                 resume = true;
9834                 hba->clk_scaling.is_suspended = false;
9835         }
9836         spin_unlock_irqrestore(hba->host->host_lock, flags);
9837
9838         if (resume)
9839                 devfreq_resume_device(hba->devfreq);
9840 }
9841
9842 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
9843                 struct device_attribute *attr, char *buf)
9844 {
9845         struct ufs_hba *hba = dev_get_drvdata(dev);
9846
9847         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
9848 }
9849
9850 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
9851                 struct device_attribute *attr, const char *buf, size_t count)
9852 {
9853         struct ufs_hba *hba = dev_get_drvdata(dev);
9854         u32 value;
9855         int err;
9856
9857         if (kstrtou32(buf, 0, &value))
9858                 return -EINVAL;
9859
9860         value = !!value;
9861         if (value == hba->clk_scaling.is_allowed)
9862                 goto out;
9863
9864         pm_runtime_get_sync(hba->dev);
9865         ufshcd_hold(hba, false);
9866
9867         cancel_work_sync(&hba->clk_scaling.suspend_work);
9868         cancel_work_sync(&hba->clk_scaling.resume_work);
9869
9870         hba->clk_scaling.is_allowed = value;
9871
9872         if (value) {
9873                 ufshcd_resume_clkscaling(hba);
9874         } else {
9875                 ufshcd_suspend_clkscaling(hba);
9876                 err = ufshcd_devfreq_scale(hba, true);
9877                 if (err)
9878                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
9879                                         __func__, err);
9880         }
9881
9882         ufshcd_release(hba, false);
9883         pm_runtime_put_sync(hba->dev);
9884 out:
9885         return count;
9886 }
9887
9888 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
9889 {
9890         struct ufs_hba *hba = container_of(work, struct ufs_hba,
9891                                            clk_scaling.suspend_work);
9892         unsigned long irq_flags;
9893
9894         spin_lock_irqsave(hba->host->host_lock, irq_flags);
9895         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
9896                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9897                 return;
9898         }
9899         hba->clk_scaling.is_suspended = true;
9900         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9901
9902         __ufshcd_suspend_clkscaling(hba);
9903 }
9904
9905 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
9906 {
9907         struct ufs_hba *hba = container_of(work, struct ufs_hba,
9908                                            clk_scaling.resume_work);
9909         unsigned long irq_flags;
9910
9911         spin_lock_irqsave(hba->host->host_lock, irq_flags);
9912         if (!hba->clk_scaling.is_suspended) {
9913                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9914                 return;
9915         }
9916         hba->clk_scaling.is_suspended = false;
9917         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9918
9919         devfreq_resume_device(hba->devfreq);
9920 }
9921
9922 static int ufshcd_devfreq_target(struct device *dev,
9923                                 unsigned long *freq, u32 flags)
9924 {
9925         int ret = 0;
9926         struct ufs_hba *hba = dev_get_drvdata(dev);
9927         unsigned long irq_flags;
9928         ktime_t start;
9929         bool scale_up, sched_clk_scaling_suspend_work = false;
9930
9931         if (!ufshcd_is_clkscaling_supported(hba))
9932                 return -EINVAL;
9933
9934         if ((*freq > 0) && (*freq < UINT_MAX)) {
9935                 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
9936                 return -EINVAL;
9937         }
9938
9939         spin_lock_irqsave(hba->host->host_lock, irq_flags);
9940         if (ufshcd_eh_in_progress(hba)) {
9941                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9942                 return 0;
9943         }
9944
9945         if (!hba->clk_scaling.active_reqs)
9946                 sched_clk_scaling_suspend_work = true;
9947
9948         scale_up = (*freq == UINT_MAX) ? true : false;
9949         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
9950                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9951                 ret = 0;
9952                 goto out; /* no state change required */
9953         }
9954         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9955
9956         start = ktime_get();
9957         ret = ufshcd_devfreq_scale(hba, scale_up);
9958         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
9959                 (scale_up ? "up" : "down"),
9960                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9961
9962 out:
9963         if (sched_clk_scaling_suspend_work)
9964                 queue_work(hba->clk_scaling.workq,
9965                            &hba->clk_scaling.suspend_work);
9966
9967         return ret;
9968 }
9969
9970 static int ufshcd_devfreq_get_dev_status(struct device *dev,
9971                 struct devfreq_dev_status *stat)
9972 {
9973         struct ufs_hba *hba = dev_get_drvdata(dev);
9974         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
9975         unsigned long flags;
9976
9977         if (!ufshcd_is_clkscaling_supported(hba))
9978                 return -EINVAL;
9979
9980         memset(stat, 0, sizeof(*stat));
9981
9982         spin_lock_irqsave(hba->host->host_lock, flags);
9983         if (!scaling->window_start_t)
9984                 goto start_window;
9985
9986         if (scaling->is_busy_started)
9987                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
9988                                         scaling->busy_start_t));
9989
9990         stat->total_time = jiffies_to_usecs((long)jiffies -
9991                                 (long)scaling->window_start_t);
9992         stat->busy_time = scaling->tot_busy_t;
9993 start_window:
9994         scaling->window_start_t = jiffies;
9995         scaling->tot_busy_t = 0;
9996
9997         if (hba->outstanding_reqs) {
9998                 scaling->busy_start_t = ktime_get();
9999                 scaling->is_busy_started = true;
10000         } else {
10001                 scaling->busy_start_t = ktime_set(0, 0);
10002                 scaling->is_busy_started = false;
10003         }
10004         spin_unlock_irqrestore(hba->host->host_lock, flags);
10005         return 0;
10006 }
10007
10008 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
10009 {
10010         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
10011         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
10012         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
10013         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
10014         hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
10015         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
10016                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
10017 }
10018
10019 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
10020 {
10021         struct device *dev = hba->dev;
10022         int ret;
10023
10024         ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
10025                 &hba->lanes_per_direction);
10026         if (ret) {
10027                 dev_dbg(hba->dev,
10028                         "%s: failed to read lanes-per-direction, ret=%d\n",
10029                         __func__, ret);
10030                 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
10031         }
10032 }
10033 /**
10034  * ufshcd_init - Driver initialization routine
10035  * @hba: per-adapter instance
10036  * @mmio_base: base register address
10037  * @irq: Interrupt line of device
10038  * Returns 0 on success, non-zero value on failure
10039  */
10040 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10041 {
10042         int err;
10043         struct Scsi_Host *host = hba->host;
10044         struct device *dev = hba->dev;
10045
10046         if (!mmio_base) {
10047                 dev_err(hba->dev,
10048                 "Invalid memory reference for mmio_base is NULL\n");
10049                 err = -ENODEV;
10050                 goto out_error;
10051         }
10052
10053         hba->mmio_base = mmio_base;
10054         hba->irq = irq;
10055
10056         ufshcd_init_lanes_per_dir(hba);
10057
10058         err = ufshcd_hba_init(hba);
10059         if (err)
10060                 goto out_error;
10061
10062         /* Read capabilities registers */
10063         ufshcd_hba_capabilities(hba);
10064
10065         /* Get UFS version supported by the controller */
10066         hba->ufs_version = ufshcd_get_ufs_version(hba);
10067
10068         /* print error message if ufs_version is not valid */
10069         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
10070             (hba->ufs_version != UFSHCI_VERSION_11) &&
10071             (hba->ufs_version != UFSHCI_VERSION_20) &&
10072             (hba->ufs_version != UFSHCI_VERSION_21))
10073                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
10074                         hba->ufs_version);
10075
10076         /* Get Interrupt bit mask per version */
10077         hba->intr_mask = ufshcd_get_intr_mask(hba);
10078
10079         /* Enable debug prints */
10080         hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
10081
10082         err = ufshcd_set_dma_mask(hba);
10083         if (err) {
10084                 dev_err(hba->dev, "set dma mask failed\n");
10085                 goto out_disable;
10086         }
10087
10088         /* Allocate memory for host memory space */
10089         err = ufshcd_memory_alloc(hba);
10090         if (err) {
10091                 dev_err(hba->dev, "Memory allocation failed\n");
10092                 goto out_disable;
10093         }
10094
10095         /* Configure LRB */
10096         ufshcd_host_memory_configure(hba);
10097
10098         host->can_queue = hba->nutrs;
10099         host->cmd_per_lun = hba->nutrs;
10100         host->max_id = UFSHCD_MAX_ID;
10101         host->max_lun = UFS_MAX_LUNS;
10102         host->max_channel = UFSHCD_MAX_CHANNEL;
10103         host->unique_id = host->host_no;
10104         host->max_cmd_len = MAX_CDB_SIZE;
10105         host->set_dbd_for_caching = 1;
10106
10107         hba->max_pwr_info.is_valid = false;
10108
10109         /* Initailize wait queue for task management */
10110         init_waitqueue_head(&hba->tm_wq);
10111         init_waitqueue_head(&hba->tm_tag_wq);
10112
10113         /* Initialize work queues */
10114         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10115         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10116         INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
10117
10118         /* Initialize UIC command mutex */
10119         mutex_init(&hba->uic_cmd_mutex);
10120
10121         /* Initialize mutex for device management commands */
10122         mutex_init(&hba->dev_cmd.lock);
10123
10124         init_rwsem(&hba->lock);
10125
10126         /* Initialize device management tag acquire wait queue */
10127         init_waitqueue_head(&hba->dev_cmd.tag_wq);
10128
10129         ufshcd_init_clk_gating(hba);
10130         ufshcd_init_hibern8_on_idle(hba);
10131
10132         /*
10133          * In order to avoid any spurious interrupt immediately after
10134          * registering UFS controller interrupt handler, clear any pending UFS
10135          * interrupt status and disable all the UFS interrupts.
10136          */
10137         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10138                       REG_INTERRUPT_STATUS);
10139         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10140         /*
10141          * Make sure that UFS interrupts are disabled and any pending interrupt
10142          * status is cleared before registering UFS interrupt handler.
10143          */
10144         mb();
10145
10146         /* IRQ registration */
10147         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10148         if (err) {
10149                 dev_err(hba->dev, "request irq failed\n");
10150                 goto exit_gating;
10151         } else {
10152                 hba->is_irq_enabled = true;
10153         }
10154
10155         err = scsi_add_host(host, hba->dev);
10156         if (err) {
10157                 dev_err(hba->dev, "scsi_add_host failed\n");
10158                 goto exit_gating;
10159         }
10160
10161         /* Reset controller to power on reset (POR) state */
10162         ufshcd_vops_full_reset(hba);
10163
10164         /* reset connected UFS device */
10165         err = ufshcd_reset_device(hba);
10166         if (err)
10167                 dev_warn(hba->dev, "%s: device reset failed. err %d\n",
10168                          __func__, err);
10169
10170         /* Host controller enable */
10171         err = ufshcd_hba_enable(hba);
10172         if (err) {
10173                 dev_err(hba->dev, "Host controller enable failed\n");
10174                 ufshcd_print_host_regs(hba);
10175                 ufshcd_print_host_state(hba);
10176                 goto out_remove_scsi_host;
10177         }
10178
10179         if (ufshcd_is_clkscaling_supported(hba)) {
10180                 char wq_name[sizeof("ufs_clkscaling_00")];
10181
10182                 INIT_WORK(&hba->clk_scaling.suspend_work,
10183                           ufshcd_clk_scaling_suspend_work);
10184                 INIT_WORK(&hba->clk_scaling.resume_work,
10185                           ufshcd_clk_scaling_resume_work);
10186
10187                 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
10188                          host->host_no);
10189                 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
10190
10191                 ufshcd_clkscaling_init_sysfs(hba);
10192         }
10193
10194         /*
10195          * If rpm_lvl and and spm_lvl are not already set to valid levels,
10196          * set the default power management level for UFS runtime and system
10197          * suspend. Default power saving mode selected is keeping UFS link in
10198          * Hibern8 state and UFS device in sleep.
10199          */
10200         if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
10201                 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10202                                                         UFS_SLEEP_PWR_MODE,
10203                                                         UIC_LINK_HIBERN8_STATE);
10204         if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
10205                 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10206                                                         UFS_SLEEP_PWR_MODE,
10207                                                         UIC_LINK_HIBERN8_STATE);
10208
10209         /* Hold auto suspend until async scan completes */
10210         pm_runtime_get_sync(dev);
10211
10212         ufshcd_init_latency_hist(hba);
10213
10214         /*
10215          * We are assuming that device wasn't put in sleep/power-down
10216          * state exclusively during the boot stage before kernel.
10217          * This assumption helps avoid doing link startup twice during
10218          * ufshcd_probe_hba().
10219          */
10220         ufshcd_set_ufs_dev_active(hba);
10221
10222         ufshcd_cmd_log_init(hba);
10223
10224         async_schedule(ufshcd_async_scan, hba);
10225
10226         ufsdbg_add_debugfs(hba);
10227
10228         ufshcd_add_sysfs_nodes(hba);
10229
10230         return 0;
10231
10232 out_remove_scsi_host:
10233         scsi_remove_host(hba->host);
10234 exit_gating:
10235         ufshcd_exit_clk_gating(hba);
10236         ufshcd_exit_latency_hist(hba);
10237 out_disable:
10238         hba->is_irq_enabled = false;
10239         ufshcd_hba_exit(hba);
10240 out_error:
10241         return err;
10242 }
10243 EXPORT_SYMBOL_GPL(ufshcd_init);
10244
10245 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10246 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10247 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10248 MODULE_LICENSE("GPL");
10249 MODULE_VERSION(UFSHCD_DRIVER_VERSION);