OSDN Git Service

Merge 4.4.182 into android-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/blkdev.h>
43
44 #include "ufshcd.h"
45 #include "unipro.h"
46
47 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
48                                  UTP_TASK_REQ_COMPL |\
49                                  UFSHCD_ERROR_MASK)
50 /* UIC command timeout, unit: ms */
51 #define UIC_CMD_TIMEOUT 500
52
53 /* NOP OUT retries waiting for NOP IN response */
54 #define NOP_OUT_RETRIES    10
55 /* Timeout after 30 msecs if NOP OUT hangs without response */
56 #define NOP_OUT_TIMEOUT    30 /* msecs */
57
58 /* Query request retries */
59 #define QUERY_REQ_RETRIES 10
60 /* Query request timeout */
61 #define QUERY_REQ_TIMEOUT 30 /* msec */
62
63 /* Task management command timeout */
64 #define TM_CMD_TIMEOUT  100 /* msecs */
65
66 /* maximum number of link-startup retries */
67 #define DME_LINKSTARTUP_RETRIES 3
68
69 /* maximum number of reset retries before giving up */
70 #define MAX_HOST_RESET_RETRIES 5
71
72 /* Expose the flag value from utp_upiu_query.value */
73 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
74
75 /* Interrupt aggregation default timeout, unit: 40us */
76 #define INT_AGGR_DEF_TO 0x02
77
78 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
79         ({                                                              \
80                 int _ret;                                               \
81                 if (_on)                                                \
82                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
83                 else                                                    \
84                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
85                 _ret;                                                   \
86         })
87
88 static u32 ufs_query_desc_max_size[] = {
89         QUERY_DESC_DEVICE_MAX_SIZE,
90         QUERY_DESC_CONFIGURAION_MAX_SIZE,
91         QUERY_DESC_UNIT_MAX_SIZE,
92         QUERY_DESC_RFU_MAX_SIZE,
93         QUERY_DESC_INTERCONNECT_MAX_SIZE,
94         QUERY_DESC_STRING_MAX_SIZE,
95         QUERY_DESC_RFU_MAX_SIZE,
96         QUERY_DESC_GEOMETRY_MAZ_SIZE,
97         QUERY_DESC_POWER_MAX_SIZE,
98         QUERY_DESC_RFU_MAX_SIZE,
99 };
100
101 enum {
102         UFSHCD_MAX_CHANNEL      = 0,
103         UFSHCD_MAX_ID           = 1,
104         UFSHCD_CMD_PER_LUN      = 32,
105         UFSHCD_CAN_QUEUE        = 32,
106 };
107
108 /* UFSHCD states */
109 enum {
110         UFSHCD_STATE_RESET,
111         UFSHCD_STATE_ERROR,
112         UFSHCD_STATE_OPERATIONAL,
113 };
114
115 /* UFSHCD error handling flags */
116 enum {
117         UFSHCD_EH_IN_PROGRESS = (1 << 0),
118 };
119
120 /* UFSHCD UIC layer error flags */
121 enum {
122         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
123         UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
124         UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
125         UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
126 };
127
128 /* Interrupt configuration options */
129 enum {
130         UFSHCD_INT_DISABLE,
131         UFSHCD_INT_ENABLE,
132         UFSHCD_INT_CLEAR,
133 };
134
135 #define ufshcd_set_eh_in_progress(h) \
136         (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
137 #define ufshcd_eh_in_progress(h) \
138         (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
139 #define ufshcd_clear_eh_in_progress(h) \
140         (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
141
142 #define ufshcd_set_ufs_dev_active(h) \
143         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
144 #define ufshcd_set_ufs_dev_sleep(h) \
145         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
146 #define ufshcd_set_ufs_dev_poweroff(h) \
147         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
148 #define ufshcd_is_ufs_dev_active(h) \
149         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
150 #define ufshcd_is_ufs_dev_sleep(h) \
151         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
152 #define ufshcd_is_ufs_dev_poweroff(h) \
153         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
154
155 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
156         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
157         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
158         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
159         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
160         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
161         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
162 };
163
164 static inline enum ufs_dev_pwr_mode
165 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
166 {
167         return ufs_pm_lvl_states[lvl].dev_state;
168 }
169
170 static inline enum uic_link_state
171 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
172 {
173         return ufs_pm_lvl_states[lvl].link_state;
174 }
175
176 static void ufshcd_tmc_handler(struct ufs_hba *hba);
177 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
178 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
179 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
180 static void ufshcd_hba_exit(struct ufs_hba *hba);
181 static int ufshcd_probe_hba(struct ufs_hba *hba);
182 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
183                                  bool skip_ref_clk);
184 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
185 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
186 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
187 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
188 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
189 static irqreturn_t ufshcd_intr(int irq, void *__hba);
190 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
191                 struct ufs_pa_layer_attr *desired_pwr_mode);
192 static int ufshcd_change_power_mode(struct ufs_hba *hba,
193                              struct ufs_pa_layer_attr *pwr_mode);
194
195 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
196 {
197         int ret = 0;
198
199         if (!hba->is_irq_enabled) {
200                 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
201                                 hba);
202                 if (ret)
203                         dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
204                                 __func__, ret);
205                 hba->is_irq_enabled = true;
206         }
207
208         return ret;
209 }
210
211 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
212 {
213         if (hba->is_irq_enabled) {
214                 free_irq(hba->irq, hba);
215                 hba->is_irq_enabled = false;
216         }
217 }
218
219 /*
220  * ufshcd_wait_for_register - wait for register value to change
221  * @hba - per-adapter interface
222  * @reg - mmio register offset
223  * @mask - mask to apply to read register value
224  * @val - wait condition
225  * @interval_us - polling interval in microsecs
226  * @timeout_ms - timeout in millisecs
227  *
228  * Returns -ETIMEDOUT on error, zero on success
229  */
230 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
231                 u32 val, unsigned long interval_us, unsigned long timeout_ms)
232 {
233         int err = 0;
234         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
235
236         /* ignore bits that we don't intend to wait on */
237         val = val & mask;
238
239         while ((ufshcd_readl(hba, reg) & mask) != val) {
240                 /* wakeup within 50us of expiry */
241                 usleep_range(interval_us, interval_us + 50);
242
243                 if (time_after(jiffies, timeout)) {
244                         if ((ufshcd_readl(hba, reg) & mask) != val)
245                                 err = -ETIMEDOUT;
246                         break;
247                 }
248         }
249
250         return err;
251 }
252
253 /**
254  * ufshcd_get_intr_mask - Get the interrupt bit mask
255  * @hba - Pointer to adapter instance
256  *
257  * Returns interrupt bit mask per version
258  */
259 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
260 {
261         if (hba->ufs_version == UFSHCI_VERSION_10)
262                 return INTERRUPT_MASK_ALL_VER_10;
263         else
264                 return INTERRUPT_MASK_ALL_VER_11;
265 }
266
267 /**
268  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
269  * @hba - Pointer to adapter instance
270  *
271  * Returns UFSHCI version supported by the controller
272  */
273 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
274 {
275         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
276                 return ufshcd_vops_get_ufs_hci_version(hba);
277
278         return ufshcd_readl(hba, REG_UFS_VERSION);
279 }
280
281 /**
282  * ufshcd_is_device_present - Check if any device connected to
283  *                            the host controller
284  * @hba: pointer to adapter instance
285  *
286  * Returns 1 if device present, 0 if no device detected
287  */
288 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
289 {
290         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
291                                                 DEVICE_PRESENT) ? 1 : 0;
292 }
293
294 /**
295  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
296  * @lrb: pointer to local command reference block
297  *
298  * This function is used to get the OCS field from UTRD
299  * Returns the OCS field in the UTRD
300  */
301 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
302 {
303         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
304 }
305
306 /**
307  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
308  * @task_req_descp: pointer to utp_task_req_desc structure
309  *
310  * This function is used to get the OCS field from UTMRD
311  * Returns the OCS field in the UTMRD
312  */
313 static inline int
314 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
315 {
316         return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
317 }
318
319 /**
320  * ufshcd_get_tm_free_slot - get a free slot for task management request
321  * @hba: per adapter instance
322  * @free_slot: pointer to variable with available slot value
323  *
324  * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
325  * Returns 0 if free slot is not available, else return 1 with tag value
326  * in @free_slot.
327  */
328 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
329 {
330         int tag;
331         bool ret = false;
332
333         if (!free_slot)
334                 goto out;
335
336         do {
337                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
338                 if (tag >= hba->nutmrs)
339                         goto out;
340         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
341
342         *free_slot = tag;
343         ret = true;
344 out:
345         return ret;
346 }
347
348 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
349 {
350         clear_bit_unlock(slot, &hba->tm_slots_in_use);
351 }
352
353 /**
354  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
355  * @hba: per adapter instance
356  * @pos: position of the bit to be cleared
357  */
358 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
359 {
360         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
361 }
362
363 /**
364  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
365  * @reg: Register value of host controller status
366  *
367  * Returns integer, 0 on Success and positive value if failed
368  */
369 static inline int ufshcd_get_lists_status(u32 reg)
370 {
371         /*
372          * The mask 0xFF is for the following HCS register bits
373          * Bit          Description
374          *  0           Device Present
375          *  1           UTRLRDY
376          *  2           UTMRLRDY
377          *  3           UCRDY
378          *  4           HEI
379          *  5           DEI
380          * 6-7          reserved
381          */
382         return (((reg) & (0xFF)) >> 1) ^ (0x07);
383 }
384
385 /**
386  * ufshcd_get_uic_cmd_result - Get the UIC command result
387  * @hba: Pointer to adapter instance
388  *
389  * This function gets the result of UIC command completion
390  * Returns 0 on success, non zero value on error
391  */
392 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
393 {
394         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
395                MASK_UIC_COMMAND_RESULT;
396 }
397
398 /**
399  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
400  * @hba: Pointer to adapter instance
401  *
402  * This function gets UIC command argument3
403  * Returns 0 on success, non zero value on error
404  */
405 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
406 {
407         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
408 }
409
410 /**
411  * ufshcd_get_req_rsp - returns the TR response transaction type
412  * @ucd_rsp_ptr: pointer to response UPIU
413  */
414 static inline int
415 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
416 {
417         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
418 }
419
420 /**
421  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
422  * @ucd_rsp_ptr: pointer to response UPIU
423  *
424  * This function gets the response status and scsi_status from response UPIU
425  * Returns the response result code.
426  */
427 static inline int
428 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
429 {
430         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
431 }
432
433 /*
434  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
435  *                              from response UPIU
436  * @ucd_rsp_ptr: pointer to response UPIU
437  *
438  * Return the data segment length.
439  */
440 static inline unsigned int
441 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
442 {
443         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
444                 MASK_RSP_UPIU_DATA_SEG_LEN;
445 }
446
447 /**
448  * ufshcd_is_exception_event - Check if the device raised an exception event
449  * @ucd_rsp_ptr: pointer to response UPIU
450  *
451  * The function checks if the device raised an exception event indicated in
452  * the Device Information field of response UPIU.
453  *
454  * Returns true if exception is raised, false otherwise.
455  */
456 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
457 {
458         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
459                         MASK_RSP_EXCEPTION_EVENT ? true : false;
460 }
461
462 /**
463  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
464  * @hba: per adapter instance
465  */
466 static inline void
467 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
468 {
469         ufshcd_writel(hba, INT_AGGR_ENABLE |
470                       INT_AGGR_COUNTER_AND_TIMER_RESET,
471                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
472 }
473
474 /**
475  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
476  * @hba: per adapter instance
477  * @cnt: Interrupt aggregation counter threshold
478  * @tmout: Interrupt aggregation timeout value
479  */
480 static inline void
481 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
482 {
483         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
484                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
485                       INT_AGGR_TIMEOUT_VAL(tmout),
486                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
487 }
488
489 /**
490  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
491  * @hba: per adapter instance
492  */
493 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
494 {
495         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
496 }
497
498 /**
499  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
500  *                      When run-stop registers are set to 1, it indicates the
501  *                      host controller that it can process the requests
502  * @hba: per adapter instance
503  */
504 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
505 {
506         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
507                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
508         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
509                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
510 }
511
512 /**
513  * ufshcd_hba_start - Start controller initialization sequence
514  * @hba: per adapter instance
515  */
516 static inline void ufshcd_hba_start(struct ufs_hba *hba)
517 {
518         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
519 }
520
521 /**
522  * ufshcd_is_hba_active - Get controller state
523  * @hba: per adapter instance
524  *
525  * Returns zero if controller is active, 1 otherwise
526  */
527 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
528 {
529         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
530 }
531
532 static void ufshcd_ungate_work(struct work_struct *work)
533 {
534         int ret;
535         unsigned long flags;
536         struct ufs_hba *hba = container_of(work, struct ufs_hba,
537                         clk_gating.ungate_work);
538
539         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
540
541         spin_lock_irqsave(hba->host->host_lock, flags);
542         if (hba->clk_gating.state == CLKS_ON) {
543                 spin_unlock_irqrestore(hba->host->host_lock, flags);
544                 goto unblock_reqs;
545         }
546
547         spin_unlock_irqrestore(hba->host->host_lock, flags);
548         ufshcd_setup_clocks(hba, true);
549
550         /* Exit from hibern8 */
551         if (ufshcd_can_hibern8_during_gating(hba)) {
552                 /* Prevent gating in this path */
553                 hba->clk_gating.is_suspended = true;
554                 if (ufshcd_is_link_hibern8(hba)) {
555                         ret = ufshcd_uic_hibern8_exit(hba);
556                         if (ret)
557                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
558                                         __func__, ret);
559                         else
560                                 ufshcd_set_link_active(hba);
561                 }
562                 hba->clk_gating.is_suspended = false;
563         }
564 unblock_reqs:
565         if (ufshcd_is_clkscaling_enabled(hba))
566                 devfreq_resume_device(hba->devfreq);
567         scsi_unblock_requests(hba->host);
568 }
569
570 /**
571  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
572  * Also, exit from hibern8 mode and set the link as active.
573  * @hba: per adapter instance
574  * @async: This indicates whether caller should ungate clocks asynchronously.
575  */
576 int ufshcd_hold(struct ufs_hba *hba, bool async)
577 {
578         int rc = 0;
579         unsigned long flags;
580
581         if (!ufshcd_is_clkgating_allowed(hba))
582                 goto out;
583         spin_lock_irqsave(hba->host->host_lock, flags);
584         hba->clk_gating.active_reqs++;
585
586 start:
587         switch (hba->clk_gating.state) {
588         case CLKS_ON:
589                 /*
590                  * Wait for the ungate work to complete if in progress.
591                  * Though the clocks may be in ON state, the link could
592                  * still be in hibner8 state if hibern8 is allowed
593                  * during clock gating.
594                  * Make sure we exit hibern8 state also in addition to
595                  * clocks being ON.
596                  */
597                 if (ufshcd_can_hibern8_during_gating(hba) &&
598                     ufshcd_is_link_hibern8(hba)) {
599                         spin_unlock_irqrestore(hba->host->host_lock, flags);
600                         flush_work(&hba->clk_gating.ungate_work);
601                         spin_lock_irqsave(hba->host->host_lock, flags);
602                         goto start;
603                 }
604                 break;
605         case REQ_CLKS_OFF:
606                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
607                         hba->clk_gating.state = CLKS_ON;
608                         break;
609                 }
610                 /*
611                  * If we here, it means gating work is either done or
612                  * currently running. Hence, fall through to cancel gating
613                  * work and to enable clocks.
614                  */
615         case CLKS_OFF:
616                 scsi_block_requests(hba->host);
617                 hba->clk_gating.state = REQ_CLKS_ON;
618                 schedule_work(&hba->clk_gating.ungate_work);
619                 /*
620                  * fall through to check if we should wait for this
621                  * work to be done or not.
622                  */
623         case REQ_CLKS_ON:
624                 if (async) {
625                         rc = -EAGAIN;
626                         hba->clk_gating.active_reqs--;
627                         break;
628                 }
629
630                 spin_unlock_irqrestore(hba->host->host_lock, flags);
631                 flush_work(&hba->clk_gating.ungate_work);
632                 /* Make sure state is CLKS_ON before returning */
633                 spin_lock_irqsave(hba->host->host_lock, flags);
634                 goto start;
635         default:
636                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
637                                 __func__, hba->clk_gating.state);
638                 break;
639         }
640         spin_unlock_irqrestore(hba->host->host_lock, flags);
641 out:
642         return rc;
643 }
644 EXPORT_SYMBOL_GPL(ufshcd_hold);
645
646 static void ufshcd_gate_work(struct work_struct *work)
647 {
648         struct ufs_hba *hba = container_of(work, struct ufs_hba,
649                         clk_gating.gate_work.work);
650         unsigned long flags;
651
652         spin_lock_irqsave(hba->host->host_lock, flags);
653         if (hba->clk_gating.is_suspended) {
654                 hba->clk_gating.state = CLKS_ON;
655                 goto rel_lock;
656         }
657
658         if (hba->clk_gating.active_reqs
659                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
660                 || hba->lrb_in_use || hba->outstanding_tasks
661                 || hba->active_uic_cmd || hba->uic_async_done)
662                 goto rel_lock;
663
664         spin_unlock_irqrestore(hba->host->host_lock, flags);
665
666         /* put the link into hibern8 mode before turning off clocks */
667         if (ufshcd_can_hibern8_during_gating(hba)) {
668                 if (ufshcd_uic_hibern8_enter(hba)) {
669                         hba->clk_gating.state = CLKS_ON;
670                         goto out;
671                 }
672                 ufshcd_set_link_hibern8(hba);
673         }
674
675         if (ufshcd_is_clkscaling_enabled(hba)) {
676                 devfreq_suspend_device(hba->devfreq);
677                 hba->clk_scaling.window_start_t = 0;
678         }
679
680         if (!ufshcd_is_link_active(hba))
681                 ufshcd_setup_clocks(hba, false);
682         else
683                 /* If link is active, device ref_clk can't be switched off */
684                 __ufshcd_setup_clocks(hba, false, true);
685
686         /*
687          * In case you are here to cancel this work the gating state
688          * would be marked as REQ_CLKS_ON. In this case keep the state
689          * as REQ_CLKS_ON which would anyway imply that clocks are off
690          * and a request to turn them on is pending. By doing this way,
691          * we keep the state machine in tact and this would ultimately
692          * prevent from doing cancel work multiple times when there are
693          * new requests arriving before the current cancel work is done.
694          */
695         spin_lock_irqsave(hba->host->host_lock, flags);
696         if (hba->clk_gating.state == REQ_CLKS_OFF)
697                 hba->clk_gating.state = CLKS_OFF;
698
699 rel_lock:
700         spin_unlock_irqrestore(hba->host->host_lock, flags);
701 out:
702         return;
703 }
704
705 /* host lock must be held before calling this variant */
706 static void __ufshcd_release(struct ufs_hba *hba)
707 {
708         if (!ufshcd_is_clkgating_allowed(hba))
709                 return;
710
711         hba->clk_gating.active_reqs--;
712
713         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
714                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
715                 || hba->lrb_in_use || hba->outstanding_tasks
716                 || hba->active_uic_cmd || hba->uic_async_done)
717                 return;
718
719         hba->clk_gating.state = REQ_CLKS_OFF;
720         schedule_delayed_work(&hba->clk_gating.gate_work,
721                         msecs_to_jiffies(hba->clk_gating.delay_ms));
722 }
723
724 void ufshcd_release(struct ufs_hba *hba)
725 {
726         unsigned long flags;
727
728         spin_lock_irqsave(hba->host->host_lock, flags);
729         __ufshcd_release(hba);
730         spin_unlock_irqrestore(hba->host->host_lock, flags);
731 }
732 EXPORT_SYMBOL_GPL(ufshcd_release);
733
734 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
735                 struct device_attribute *attr, char *buf)
736 {
737         struct ufs_hba *hba = dev_get_drvdata(dev);
738
739         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
740 }
741
742 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
743                 struct device_attribute *attr, const char *buf, size_t count)
744 {
745         struct ufs_hba *hba = dev_get_drvdata(dev);
746         unsigned long flags, value;
747
748         if (kstrtoul(buf, 0, &value))
749                 return -EINVAL;
750
751         spin_lock_irqsave(hba->host->host_lock, flags);
752         hba->clk_gating.delay_ms = value;
753         spin_unlock_irqrestore(hba->host->host_lock, flags);
754         return count;
755 }
756
757 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
758 {
759         if (!ufshcd_is_clkgating_allowed(hba))
760                 return;
761
762         hba->clk_gating.delay_ms = 150;
763         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
764         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
765
766         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
767         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
768         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
769         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
770         hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
771         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
772                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
773 }
774
775 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
776 {
777         if (!ufshcd_is_clkgating_allowed(hba))
778                 return;
779         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
780         cancel_work_sync(&hba->clk_gating.ungate_work);
781         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
782 }
783
784 /* Must be called with host lock acquired */
785 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
786 {
787         if (!ufshcd_is_clkscaling_enabled(hba))
788                 return;
789
790         if (!hba->clk_scaling.is_busy_started) {
791                 hba->clk_scaling.busy_start_t = ktime_get();
792                 hba->clk_scaling.is_busy_started = true;
793         }
794 }
795
796 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
797 {
798         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
799
800         if (!ufshcd_is_clkscaling_enabled(hba))
801                 return;
802
803         if (!hba->outstanding_reqs && scaling->is_busy_started) {
804                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
805                                         scaling->busy_start_t));
806                 scaling->busy_start_t = ktime_set(0, 0);
807                 scaling->is_busy_started = false;
808         }
809 }
810 /**
811  * ufshcd_send_command - Send SCSI or device management commands
812  * @hba: per adapter instance
813  * @task_tag: Task tag of the command
814  */
815 static inline
816 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
817 {
818         ufshcd_clk_scaling_start_busy(hba);
819         __set_bit(task_tag, &hba->outstanding_reqs);
820         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
821 }
822
823 /**
824  * ufshcd_copy_sense_data - Copy sense data in case of check condition
825  * @lrb - pointer to local reference block
826  */
827 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
828 {
829         int len;
830         if (lrbp->sense_buffer &&
831             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
832                 int len_to_copy;
833
834                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
835                 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
836
837                 memcpy(lrbp->sense_buffer,
838                         lrbp->ucd_rsp_ptr->sr.sense_data,
839                         min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
840         }
841 }
842
843 /**
844  * ufshcd_copy_query_response() - Copy the Query Response and the data
845  * descriptor
846  * @hba: per adapter instance
847  * @lrb - pointer to local reference block
848  */
849 static
850 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
851 {
852         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
853
854         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
855
856         /* Get the descriptor */
857         if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
858                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
859                                 GENERAL_UPIU_REQUEST_SIZE;
860                 u16 resp_len;
861                 u16 buf_len;
862
863                 /* data segment length */
864                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
865                                                 MASK_QUERY_DATA_SEG_LEN;
866                 buf_len = be16_to_cpu(
867                                 hba->dev_cmd.query.request.upiu_req.length);
868                 if (likely(buf_len >= resp_len)) {
869                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
870                 } else {
871                         dev_warn(hba->dev,
872                                 "%s: Response size is bigger than buffer",
873                                 __func__);
874                         return -EINVAL;
875                 }
876         }
877
878         return 0;
879 }
880
881 /**
882  * ufshcd_hba_capabilities - Read controller capabilities
883  * @hba: per adapter instance
884  */
885 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
886 {
887         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
888
889         /* nutrs and nutmrs are 0 based values */
890         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
891         hba->nutmrs =
892         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
893 }
894
895 /**
896  * ufshcd_ready_for_uic_cmd - Check if controller is ready
897  *                            to accept UIC commands
898  * @hba: per adapter instance
899  * Return true on success, else false
900  */
901 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
902 {
903         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
904                 return true;
905         else
906                 return false;
907 }
908
909 /**
910  * ufshcd_get_upmcrs - Get the power mode change request status
911  * @hba: Pointer to adapter instance
912  *
913  * This function gets the UPMCRS field of HCS register
914  * Returns value of UPMCRS field
915  */
916 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
917 {
918         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
919 }
920
921 /**
922  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
923  * @hba: per adapter instance
924  * @uic_cmd: UIC command
925  *
926  * Mutex must be held.
927  */
928 static inline void
929 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
930 {
931         WARN_ON(hba->active_uic_cmd);
932
933         hba->active_uic_cmd = uic_cmd;
934
935         /* Write Args */
936         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
937         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
938         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
939
940         /* Write UIC Cmd */
941         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
942                       REG_UIC_COMMAND);
943 }
944
945 /**
946  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
947  * @hba: per adapter instance
948  * @uic_command: UIC command
949  *
950  * Must be called with mutex held.
951  * Returns 0 only if success.
952  */
953 static int
954 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
955 {
956         int ret;
957         unsigned long flags;
958
959         if (wait_for_completion_timeout(&uic_cmd->done,
960                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
961                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
962         else
963                 ret = -ETIMEDOUT;
964
965         spin_lock_irqsave(hba->host->host_lock, flags);
966         hba->active_uic_cmd = NULL;
967         spin_unlock_irqrestore(hba->host->host_lock, flags);
968
969         return ret;
970 }
971
972 /**
973  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
974  * @hba: per adapter instance
975  * @uic_cmd: UIC command
976  *
977  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
978  * with mutex held and host_lock locked.
979  * Returns 0 only if success.
980  */
981 static int
982 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
983 {
984         if (!ufshcd_ready_for_uic_cmd(hba)) {
985                 dev_err(hba->dev,
986                         "Controller not ready to accept UIC commands\n");
987                 return -EIO;
988         }
989
990         init_completion(&uic_cmd->done);
991
992         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
993
994         return 0;
995 }
996
997 /**
998  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
999  * @hba: per adapter instance
1000  * @uic_cmd: UIC command
1001  *
1002  * Returns 0 only if success.
1003  */
1004 static int
1005 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1006 {
1007         int ret;
1008         unsigned long flags;
1009
1010         ufshcd_hold(hba, false);
1011         mutex_lock(&hba->uic_cmd_mutex);
1012         ufshcd_add_delay_before_dme_cmd(hba);
1013
1014         spin_lock_irqsave(hba->host->host_lock, flags);
1015         ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
1016         spin_unlock_irqrestore(hba->host->host_lock, flags);
1017         if (!ret)
1018                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1019
1020         mutex_unlock(&hba->uic_cmd_mutex);
1021
1022         ufshcd_release(hba);
1023         return ret;
1024 }
1025
1026 /**
1027  * ufshcd_map_sg - Map scatter-gather list to prdt
1028  * @lrbp - pointer to local reference block
1029  *
1030  * Returns 0 in case of success, non-zero value in case of failure
1031  */
1032 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
1033 {
1034         struct ufshcd_sg_entry *prd_table;
1035         struct scatterlist *sg;
1036         struct scsi_cmnd *cmd;
1037         int sg_segments;
1038         int i;
1039
1040         cmd = lrbp->cmd;
1041         sg_segments = scsi_dma_map(cmd);
1042         if (sg_segments < 0)
1043                 return sg_segments;
1044
1045         if (sg_segments) {
1046                 lrbp->utr_descriptor_ptr->prd_table_length =
1047                                         cpu_to_le16((u16) (sg_segments));
1048
1049                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1050
1051                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1052                         prd_table[i].size  =
1053                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1054                         prd_table[i].base_addr =
1055                                 cpu_to_le32(lower_32_bits(sg->dma_address));
1056                         prd_table[i].upper_addr =
1057                                 cpu_to_le32(upper_32_bits(sg->dma_address));
1058                 }
1059         } else {
1060                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1061         }
1062
1063         return 0;
1064 }
1065
1066 /**
1067  * ufshcd_enable_intr - enable interrupts
1068  * @hba: per adapter instance
1069  * @intrs: interrupt bits
1070  */
1071 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
1072 {
1073         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1074
1075         if (hba->ufs_version == UFSHCI_VERSION_10) {
1076                 u32 rw;
1077                 rw = set & INTERRUPT_MASK_RW_VER_10;
1078                 set = rw | ((set ^ intrs) & intrs);
1079         } else {
1080                 set |= intrs;
1081         }
1082
1083         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1084 }
1085
1086 /**
1087  * ufshcd_disable_intr - disable interrupts
1088  * @hba: per adapter instance
1089  * @intrs: interrupt bits
1090  */
1091 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1092 {
1093         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1094
1095         if (hba->ufs_version == UFSHCI_VERSION_10) {
1096                 u32 rw;
1097                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1098                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
1099                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1100
1101         } else {
1102                 set &= ~intrs;
1103         }
1104
1105         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1106 }
1107
1108 /**
1109  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1110  * descriptor according to request
1111  * @lrbp: pointer to local reference block
1112  * @upiu_flags: flags required in the header
1113  * @cmd_dir: requests data direction
1114  */
1115 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
1116                 u32 *upiu_flags, enum dma_data_direction cmd_dir)
1117 {
1118         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1119         u32 data_direction;
1120         u32 dword_0;
1121
1122         if (cmd_dir == DMA_FROM_DEVICE) {
1123                 data_direction = UTP_DEVICE_TO_HOST;
1124                 *upiu_flags = UPIU_CMD_FLAGS_READ;
1125         } else if (cmd_dir == DMA_TO_DEVICE) {
1126                 data_direction = UTP_HOST_TO_DEVICE;
1127                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1128         } else {
1129                 data_direction = UTP_NO_DATA_TRANSFER;
1130                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1131         }
1132
1133         dword_0 = data_direction | (lrbp->command_type
1134                                 << UPIU_COMMAND_TYPE_OFFSET);
1135         if (lrbp->intr_cmd)
1136                 dword_0 |= UTP_REQ_DESC_INT_CMD;
1137
1138         /* Transfer request descriptor header fields */
1139         req_desc->header.dword_0 = cpu_to_le32(dword_0);
1140
1141         /*
1142          * assigning invalid value for command status. Controller
1143          * updates OCS on command completion, with the command
1144          * status
1145          */
1146         req_desc->header.dword_2 =
1147                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1148 }
1149
1150 /**
1151  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1152  * for scsi commands
1153  * @lrbp - local reference block pointer
1154  * @upiu_flags - flags
1155  */
1156 static
1157 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1158 {
1159         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1160
1161         /* command descriptor fields */
1162         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1163                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
1164                                 lrbp->lun, lrbp->task_tag);
1165         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1166                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1167
1168         /* Total EHS length and Data segment length will be zero */
1169         ucd_req_ptr->header.dword_2 = 0;
1170
1171         ucd_req_ptr->sc.exp_data_transfer_len =
1172                 cpu_to_be32(lrbp->cmd->sdb.length);
1173
1174         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
1175                 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
1176 }
1177
1178 /**
1179  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1180  * for query requsts
1181  * @hba: UFS hba
1182  * @lrbp: local reference block pointer
1183  * @upiu_flags: flags
1184  */
1185 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1186                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1187 {
1188         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1189         struct ufs_query *query = &hba->dev_cmd.query;
1190         u16 len = be16_to_cpu(query->request.upiu_req.length);
1191         u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1192
1193         /* Query request header */
1194         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1195                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1196                         lrbp->lun, lrbp->task_tag);
1197         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1198                         0, query->request.query_func, 0, 0);
1199
1200         /* Data segment length */
1201         ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
1202                         0, 0, len >> 8, (u8)len);
1203
1204         /* Copy the Query Request buffer as is */
1205         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1206                         QUERY_OSF_SIZE);
1207
1208         /* Copy the Descriptor */
1209         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1210                 memcpy(descp, query->descriptor, len);
1211
1212 }
1213
1214 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1215 {
1216         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1217
1218         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1219
1220         /* command descriptor fields */
1221         ucd_req_ptr->header.dword_0 =
1222                 UPIU_HEADER_DWORD(
1223                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
1224 }
1225
1226 /**
1227  * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
1228  * @hba - per adapter instance
1229  * @lrb - pointer to local reference block
1230  */
1231 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1232 {
1233         u32 upiu_flags;
1234         int ret = 0;
1235
1236         switch (lrbp->command_type) {
1237         case UTP_CMD_TYPE_SCSI:
1238                 if (likely(lrbp->cmd)) {
1239                         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1240                                         lrbp->cmd->sc_data_direction);
1241                         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1242                 } else {
1243                         ret = -EINVAL;
1244                 }
1245                 break;
1246         case UTP_CMD_TYPE_DEV_MANAGE:
1247                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1248                 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1249                         ufshcd_prepare_utp_query_req_upiu(
1250                                         hba, lrbp, upiu_flags);
1251                 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1252                         ufshcd_prepare_utp_nop_upiu(lrbp);
1253                 else
1254                         ret = -EINVAL;
1255                 break;
1256         case UTP_CMD_TYPE_UFS:
1257                 /* For UFS native command implementation */
1258                 ret = -ENOTSUPP;
1259                 dev_err(hba->dev, "%s: UFS native command are not supported\n",
1260                         __func__);
1261                 break;
1262         default:
1263                 ret = -ENOTSUPP;
1264                 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
1265                                 __func__, lrbp->command_type);
1266                 break;
1267         } /* end of switch */
1268
1269         return ret;
1270 }
1271
1272 /*
1273  * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1274  * @scsi_lun: scsi LUN id
1275  *
1276  * Returns UPIU LUN id
1277  */
1278 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1279 {
1280         if (scsi_is_wlun(scsi_lun))
1281                 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1282                         | UFS_UPIU_WLUN_ID;
1283         else
1284                 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1285 }
1286
1287 /**
1288  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1289  * @scsi_lun: UPIU W-LUN id
1290  *
1291  * Returns SCSI W-LUN id
1292  */
1293 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1294 {
1295         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1296 }
1297
1298 /**
1299  * ufshcd_queuecommand - main entry point for SCSI requests
1300  * @cmd: command from SCSI Midlayer
1301  * @done: call back function
1302  *
1303  * Returns 0 for success, non-zero in case of failure
1304  */
1305 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1306 {
1307         struct ufshcd_lrb *lrbp;
1308         struct ufs_hba *hba;
1309         unsigned long flags;
1310         int tag;
1311         int err = 0;
1312
1313         hba = shost_priv(host);
1314
1315         tag = cmd->request->tag;
1316
1317         spin_lock_irqsave(hba->host->host_lock, flags);
1318         switch (hba->ufshcd_state) {
1319         case UFSHCD_STATE_OPERATIONAL:
1320                 break;
1321         case UFSHCD_STATE_RESET:
1322                 err = SCSI_MLQUEUE_HOST_BUSY;
1323                 goto out_unlock;
1324         case UFSHCD_STATE_ERROR:
1325                 set_host_byte(cmd, DID_ERROR);
1326                 cmd->scsi_done(cmd);
1327                 goto out_unlock;
1328         default:
1329                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1330                                 __func__, hba->ufshcd_state);
1331                 set_host_byte(cmd, DID_BAD_TARGET);
1332                 cmd->scsi_done(cmd);
1333                 goto out_unlock;
1334         }
1335         spin_unlock_irqrestore(hba->host->host_lock, flags);
1336
1337         /* acquire the tag to make sure device cmds don't use it */
1338         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1339                 /*
1340                  * Dev manage command in progress, requeue the command.
1341                  * Requeuing the command helps in cases where the request *may*
1342                  * find different tag instead of waiting for dev manage command
1343                  * completion.
1344                  */
1345                 err = SCSI_MLQUEUE_HOST_BUSY;
1346                 goto out;
1347         }
1348
1349         err = ufshcd_hold(hba, true);
1350         if (err) {
1351                 err = SCSI_MLQUEUE_HOST_BUSY;
1352                 clear_bit_unlock(tag, &hba->lrb_in_use);
1353                 goto out;
1354         }
1355
1356         /* IO svc time latency histogram */
1357         if (hba != NULL && cmd->request != NULL) {
1358                 if (hba->latency_hist_enabled &&
1359                     (cmd->request->cmd_type == REQ_TYPE_FS)) {
1360                         cmd->request->lat_hist_io_start = ktime_get();
1361                         cmd->request->lat_hist_enabled = 1;
1362                 } else
1363                         cmd->request->lat_hist_enabled = 0;
1364         }
1365
1366         WARN_ON(hba->clk_gating.state != CLKS_ON);
1367
1368         lrbp = &hba->lrb[tag];
1369
1370         WARN_ON(lrbp->cmd);
1371         lrbp->cmd = cmd;
1372         lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1373         lrbp->sense_buffer = cmd->sense_buffer;
1374         lrbp->task_tag = tag;
1375         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1376         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
1377         lrbp->command_type = UTP_CMD_TYPE_SCSI;
1378
1379         /* form UPIU before issuing the command */
1380         ufshcd_compose_upiu(hba, lrbp);
1381         err = ufshcd_map_sg(lrbp);
1382         if (err) {
1383                 lrbp->cmd = NULL;
1384                 clear_bit_unlock(tag, &hba->lrb_in_use);
1385                 goto out;
1386         }
1387
1388         /* issue command to the controller */
1389         spin_lock_irqsave(hba->host->host_lock, flags);
1390         ufshcd_send_command(hba, tag);
1391 out_unlock:
1392         spin_unlock_irqrestore(hba->host->host_lock, flags);
1393 out:
1394         return err;
1395 }
1396
1397 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1398                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1399 {
1400         lrbp->cmd = NULL;
1401         lrbp->sense_bufflen = 0;
1402         lrbp->sense_buffer = NULL;
1403         lrbp->task_tag = tag;
1404         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
1405         lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1406         lrbp->intr_cmd = true; /* No interrupt aggregation */
1407         hba->dev_cmd.type = cmd_type;
1408
1409         return ufshcd_compose_upiu(hba, lrbp);
1410 }
1411
1412 static int
1413 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1414 {
1415         int err = 0;
1416         unsigned long flags;
1417         u32 mask = 1 << tag;
1418
1419         /* clear outstanding transaction before retry */
1420         spin_lock_irqsave(hba->host->host_lock, flags);
1421         ufshcd_utrl_clear(hba, tag);
1422         spin_unlock_irqrestore(hba->host->host_lock, flags);
1423
1424         /*
1425          * wait for for h/w to clear corresponding bit in door-bell.
1426          * max. wait is 1 sec.
1427          */
1428         err = ufshcd_wait_for_register(hba,
1429                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
1430                         mask, ~mask, 1000, 1000);
1431
1432         return err;
1433 }
1434
1435 static int
1436 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1437 {
1438         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1439
1440         /* Get the UPIU response */
1441         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1442                                 UPIU_RSP_CODE_OFFSET;
1443         return query_res->response;
1444 }
1445
1446 /**
1447  * ufshcd_dev_cmd_completion() - handles device management command responses
1448  * @hba: per adapter instance
1449  * @lrbp: pointer to local reference block
1450  */
1451 static int
1452 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1453 {
1454         int resp;
1455         int err = 0;
1456
1457         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1458
1459         switch (resp) {
1460         case UPIU_TRANSACTION_NOP_IN:
1461                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1462                         err = -EINVAL;
1463                         dev_err(hba->dev, "%s: unexpected response %x\n",
1464                                         __func__, resp);
1465                 }
1466                 break;
1467         case UPIU_TRANSACTION_QUERY_RSP:
1468                 err = ufshcd_check_query_response(hba, lrbp);
1469                 if (!err)
1470                         err = ufshcd_copy_query_response(hba, lrbp);
1471                 break;
1472         case UPIU_TRANSACTION_REJECT_UPIU:
1473                 /* TODO: handle Reject UPIU Response */
1474                 err = -EPERM;
1475                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1476                                 __func__);
1477                 break;
1478         default:
1479                 err = -EINVAL;
1480                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1481                                 __func__, resp);
1482                 break;
1483         }
1484
1485         return err;
1486 }
1487
1488 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1489                 struct ufshcd_lrb *lrbp, int max_timeout)
1490 {
1491         int err = 0;
1492         unsigned long time_left;
1493         unsigned long flags;
1494
1495         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1496                         msecs_to_jiffies(max_timeout));
1497
1498         spin_lock_irqsave(hba->host->host_lock, flags);
1499         hba->dev_cmd.complete = NULL;
1500         if (likely(time_left)) {
1501                 err = ufshcd_get_tr_ocs(lrbp);
1502                 if (!err)
1503                         err = ufshcd_dev_cmd_completion(hba, lrbp);
1504         }
1505         spin_unlock_irqrestore(hba->host->host_lock, flags);
1506
1507         if (!time_left) {
1508                 err = -ETIMEDOUT;
1509                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1510                         /* sucessfully cleared the command, retry if needed */
1511                         err = -EAGAIN;
1512         }
1513
1514         return err;
1515 }
1516
1517 /**
1518  * ufshcd_get_dev_cmd_tag - Get device management command tag
1519  * @hba: per-adapter instance
1520  * @tag: pointer to variable with available slot value
1521  *
1522  * Get a free slot and lock it until device management command
1523  * completes.
1524  *
1525  * Returns false if free slot is unavailable for locking, else
1526  * return true with tag value in @tag.
1527  */
1528 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1529 {
1530         int tag;
1531         bool ret = false;
1532         unsigned long tmp;
1533
1534         if (!tag_out)
1535                 goto out;
1536
1537         do {
1538                 tmp = ~hba->lrb_in_use;
1539                 tag = find_last_bit(&tmp, hba->nutrs);
1540                 if (tag >= hba->nutrs)
1541                         goto out;
1542         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1543
1544         *tag_out = tag;
1545         ret = true;
1546 out:
1547         return ret;
1548 }
1549
1550 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1551 {
1552         clear_bit_unlock(tag, &hba->lrb_in_use);
1553 }
1554
1555 /**
1556  * ufshcd_exec_dev_cmd - API for sending device management requests
1557  * @hba - UFS hba
1558  * @cmd_type - specifies the type (NOP, Query...)
1559  * @timeout - time in seconds
1560  *
1561  * NOTE: Since there is only one available tag for device management commands,
1562  * it is expected you hold the hba->dev_cmd.lock mutex.
1563  */
1564 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1565                 enum dev_cmd_type cmd_type, int timeout)
1566 {
1567         struct ufshcd_lrb *lrbp;
1568         int err;
1569         int tag;
1570         struct completion wait;
1571         unsigned long flags;
1572
1573         /*
1574          * Get free slot, sleep if slots are unavailable.
1575          * Even though we use wait_event() which sleeps indefinitely,
1576          * the maximum wait time is bounded by SCSI request timeout.
1577          */
1578         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1579
1580         init_completion(&wait);
1581         lrbp = &hba->lrb[tag];
1582         WARN_ON(lrbp->cmd);
1583         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1584         if (unlikely(err))
1585                 goto out_put_tag;
1586
1587         hba->dev_cmd.complete = &wait;
1588
1589         spin_lock_irqsave(hba->host->host_lock, flags);
1590         ufshcd_send_command(hba, tag);
1591         spin_unlock_irqrestore(hba->host->host_lock, flags);
1592
1593         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1594
1595 out_put_tag:
1596         ufshcd_put_dev_cmd_tag(hba, tag);
1597         wake_up(&hba->dev_cmd.tag_wq);
1598         return err;
1599 }
1600
1601 /**
1602  * ufshcd_init_query() - init the query response and request parameters
1603  * @hba: per-adapter instance
1604  * @request: address of the request pointer to be initialized
1605  * @response: address of the response pointer to be initialized
1606  * @opcode: operation to perform
1607  * @idn: flag idn to access
1608  * @index: LU number to access
1609  * @selector: query/flag/descriptor further identification
1610  */
1611 static inline void ufshcd_init_query(struct ufs_hba *hba,
1612                 struct ufs_query_req **request, struct ufs_query_res **response,
1613                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1614 {
1615         *request = &hba->dev_cmd.query.request;
1616         *response = &hba->dev_cmd.query.response;
1617         memset(*request, 0, sizeof(struct ufs_query_req));
1618         memset(*response, 0, sizeof(struct ufs_query_res));
1619         (*request)->upiu_req.opcode = opcode;
1620         (*request)->upiu_req.idn = idn;
1621         (*request)->upiu_req.index = index;
1622         (*request)->upiu_req.selector = selector;
1623 }
1624
1625 /**
1626  * ufshcd_query_flag() - API function for sending flag query requests
1627  * hba: per-adapter instance
1628  * query_opcode: flag query to perform
1629  * idn: flag idn to access
1630  * flag_res: the flag value after the query request completes
1631  *
1632  * Returns 0 for success, non-zero in case of failure
1633  */
1634 static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1635                         enum flag_idn idn, bool *flag_res)
1636 {
1637         struct ufs_query_req *request = NULL;
1638         struct ufs_query_res *response = NULL;
1639         int err, index = 0, selector = 0;
1640
1641         BUG_ON(!hba);
1642
1643         ufshcd_hold(hba, false);
1644         mutex_lock(&hba->dev_cmd.lock);
1645         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1646                         selector);
1647
1648         switch (opcode) {
1649         case UPIU_QUERY_OPCODE_SET_FLAG:
1650         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1651         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1652                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1653                 break;
1654         case UPIU_QUERY_OPCODE_READ_FLAG:
1655                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1656                 if (!flag_res) {
1657                         /* No dummy reads */
1658                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
1659                                         __func__);
1660                         err = -EINVAL;
1661                         goto out_unlock;
1662                 }
1663                 break;
1664         default:
1665                 dev_err(hba->dev,
1666                         "%s: Expected query flag opcode but got = %d\n",
1667                         __func__, opcode);
1668                 err = -EINVAL;
1669                 goto out_unlock;
1670         }
1671
1672         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1673
1674         if (err) {
1675                 dev_err(hba->dev,
1676                         "%s: Sending flag query for idn %d failed, err = %d\n",
1677                         __func__, idn, err);
1678                 goto out_unlock;
1679         }
1680
1681         if (flag_res)
1682                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1683                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1684
1685 out_unlock:
1686         mutex_unlock(&hba->dev_cmd.lock);
1687         ufshcd_release(hba);
1688         return err;
1689 }
1690
1691 /**
1692  * ufshcd_query_attr - API function for sending attribute requests
1693  * hba: per-adapter instance
1694  * opcode: attribute opcode
1695  * idn: attribute idn to access
1696  * index: index field
1697  * selector: selector field
1698  * attr_val: the attribute value after the query request completes
1699  *
1700  * Returns 0 for success, non-zero in case of failure
1701 */
1702 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1703                         enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1704 {
1705         struct ufs_query_req *request = NULL;
1706         struct ufs_query_res *response = NULL;
1707         int err;
1708
1709         BUG_ON(!hba);
1710
1711         ufshcd_hold(hba, false);
1712         if (!attr_val) {
1713                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1714                                 __func__, opcode);
1715                 err = -EINVAL;
1716                 goto out;
1717         }
1718
1719         mutex_lock(&hba->dev_cmd.lock);
1720         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1721                         selector);
1722
1723         switch (opcode) {
1724         case UPIU_QUERY_OPCODE_WRITE_ATTR:
1725                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1726                 request->upiu_req.value = cpu_to_be32(*attr_val);
1727                 break;
1728         case UPIU_QUERY_OPCODE_READ_ATTR:
1729                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1730                 break;
1731         default:
1732                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1733                                 __func__, opcode);
1734                 err = -EINVAL;
1735                 goto out_unlock;
1736         }
1737
1738         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1739
1740         if (err) {
1741                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1742                                 __func__, opcode, idn, err);
1743                 goto out_unlock;
1744         }
1745
1746         *attr_val = be32_to_cpu(response->upiu_res.value);
1747
1748 out_unlock:
1749         mutex_unlock(&hba->dev_cmd.lock);
1750 out:
1751         ufshcd_release(hba);
1752         return err;
1753 }
1754
1755 /**
1756  * ufshcd_query_descriptor - API function for sending descriptor requests
1757  * hba: per-adapter instance
1758  * opcode: attribute opcode
1759  * idn: attribute idn to access
1760  * index: index field
1761  * selector: selector field
1762  * desc_buf: the buffer that contains the descriptor
1763  * buf_len: length parameter passed to the device
1764  *
1765  * Returns 0 for success, non-zero in case of failure.
1766  * The buf_len parameter will contain, on return, the length parameter
1767  * received on the response.
1768  */
1769 static int ufshcd_query_descriptor(struct ufs_hba *hba,
1770                         enum query_opcode opcode, enum desc_idn idn, u8 index,
1771                         u8 selector, u8 *desc_buf, int *buf_len)
1772 {
1773         struct ufs_query_req *request = NULL;
1774         struct ufs_query_res *response = NULL;
1775         int err;
1776
1777         BUG_ON(!hba);
1778
1779         ufshcd_hold(hba, false);
1780         if (!desc_buf) {
1781                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1782                                 __func__, opcode);
1783                 err = -EINVAL;
1784                 goto out;
1785         }
1786
1787         if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1788                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1789                                 __func__, *buf_len);
1790                 err = -EINVAL;
1791                 goto out;
1792         }
1793
1794         mutex_lock(&hba->dev_cmd.lock);
1795         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1796                         selector);
1797         hba->dev_cmd.query.descriptor = desc_buf;
1798         request->upiu_req.length = cpu_to_be16(*buf_len);
1799
1800         switch (opcode) {
1801         case UPIU_QUERY_OPCODE_WRITE_DESC:
1802                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1803                 break;
1804         case UPIU_QUERY_OPCODE_READ_DESC:
1805                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1806                 break;
1807         default:
1808                 dev_err(hba->dev,
1809                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1810                                 __func__, opcode);
1811                 err = -EINVAL;
1812                 goto out_unlock;
1813         }
1814
1815         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1816
1817         if (err) {
1818                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1819                                 __func__, opcode, idn, err);
1820                 goto out_unlock;
1821         }
1822
1823         hba->dev_cmd.query.descriptor = NULL;
1824         *buf_len = be16_to_cpu(response->upiu_res.length);
1825
1826 out_unlock:
1827         mutex_unlock(&hba->dev_cmd.lock);
1828 out:
1829         ufshcd_release(hba);
1830         return err;
1831 }
1832
1833 /**
1834  * ufshcd_read_desc_param - read the specified descriptor parameter
1835  * @hba: Pointer to adapter instance
1836  * @desc_id: descriptor idn value
1837  * @desc_index: descriptor index
1838  * @param_offset: offset of the parameter to read
1839  * @param_read_buf: pointer to buffer where parameter would be read
1840  * @param_size: sizeof(param_read_buf)
1841  *
1842  * Return 0 in case of success, non-zero otherwise
1843  */
1844 static int ufshcd_read_desc_param(struct ufs_hba *hba,
1845                                   enum desc_idn desc_id,
1846                                   int desc_index,
1847                                   u32 param_offset,
1848                                   u8 *param_read_buf,
1849                                   u32 param_size)
1850 {
1851         int ret;
1852         u8 *desc_buf;
1853         u32 buff_len;
1854         bool is_kmalloc = true;
1855
1856         /* safety checks */
1857         if (desc_id >= QUERY_DESC_IDN_MAX)
1858                 return -EINVAL;
1859
1860         buff_len = ufs_query_desc_max_size[desc_id];
1861         if ((param_offset + param_size) > buff_len)
1862                 return -EINVAL;
1863
1864         if (!param_offset && (param_size == buff_len)) {
1865                 /* memory space already available to hold full descriptor */
1866                 desc_buf = param_read_buf;
1867                 is_kmalloc = false;
1868         } else {
1869                 /* allocate memory to hold full descriptor */
1870                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1871                 if (!desc_buf)
1872                         return -ENOMEM;
1873         }
1874
1875         ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
1876                                       desc_id, desc_index, 0, desc_buf,
1877                                       &buff_len);
1878
1879         if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
1880             (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
1881              ufs_query_desc_max_size[desc_id])
1882             || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
1883                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
1884                         __func__, desc_id, param_offset, buff_len, ret);
1885                 if (!ret)
1886                         ret = -EINVAL;
1887
1888                 goto out;
1889         }
1890
1891         if (is_kmalloc)
1892                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1893 out:
1894         if (is_kmalloc)
1895                 kfree(desc_buf);
1896         return ret;
1897 }
1898
1899 static inline int ufshcd_read_desc(struct ufs_hba *hba,
1900                                    enum desc_idn desc_id,
1901                                    int desc_index,
1902                                    u8 *buf,
1903                                    u32 size)
1904 {
1905         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1906 }
1907
1908 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
1909                                          u8 *buf,
1910                                          u32 size)
1911 {
1912         return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
1913 }
1914
1915 /**
1916  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
1917  * @hba: Pointer to adapter instance
1918  * @lun: lun id
1919  * @param_offset: offset of the parameter to read
1920  * @param_read_buf: pointer to buffer where parameter would be read
1921  * @param_size: sizeof(param_read_buf)
1922  *
1923  * Return 0 in case of success, non-zero otherwise
1924  */
1925 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
1926                                               int lun,
1927                                               enum unit_desc_param param_offset,
1928                                               u8 *param_read_buf,
1929                                               u32 param_size)
1930 {
1931         /*
1932          * Unit descriptors are only available for general purpose LUs (LUN id
1933          * from 0 to 7) and RPMB Well known LU.
1934          */
1935         if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
1936                 return -EOPNOTSUPP;
1937
1938         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
1939                                       param_offset, param_read_buf, param_size);
1940 }
1941
1942 /**
1943  * ufshcd_memory_alloc - allocate memory for host memory space data structures
1944  * @hba: per adapter instance
1945  *
1946  * 1. Allocate DMA memory for Command Descriptor array
1947  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1948  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1949  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1950  *      (UTMRDL)
1951  * 4. Allocate memory for local reference block(lrb).
1952  *
1953  * Returns 0 for success, non-zero in case of failure
1954  */
1955 static int ufshcd_memory_alloc(struct ufs_hba *hba)
1956 {
1957         size_t utmrdl_size, utrdl_size, ucdl_size;
1958
1959         /* Allocate memory for UTP command descriptors */
1960         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
1961         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1962                                                   ucdl_size,
1963                                                   &hba->ucdl_dma_addr,
1964                                                   GFP_KERNEL);
1965
1966         /*
1967          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1968          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1969          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1970          * be aligned to 128 bytes as well
1971          */
1972         if (!hba->ucdl_base_addr ||
1973             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
1974                 dev_err(hba->dev,
1975                         "Command Descriptor Memory allocation failed\n");
1976                 goto out;
1977         }
1978
1979         /*
1980          * Allocate memory for UTP Transfer descriptors
1981          * UFSHCI requires 1024 byte alignment of UTRD
1982          */
1983         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
1984         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1985                                                    utrdl_size,
1986                                                    &hba->utrdl_dma_addr,
1987                                                    GFP_KERNEL);
1988         if (!hba->utrdl_base_addr ||
1989             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
1990                 dev_err(hba->dev,
1991                         "Transfer Descriptor Memory allocation failed\n");
1992                 goto out;
1993         }
1994
1995         /*
1996          * Allocate memory for UTP Task Management descriptors
1997          * UFSHCI requires 1024 byte alignment of UTMRD
1998          */
1999         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2000         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2001                                                     utmrdl_size,
2002                                                     &hba->utmrdl_dma_addr,
2003                                                     GFP_KERNEL);
2004         if (!hba->utmrdl_base_addr ||
2005             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
2006                 dev_err(hba->dev,
2007                 "Task Management Descriptor Memory allocation failed\n");
2008                 goto out;
2009         }
2010
2011         /* Allocate memory for local reference block */
2012         hba->lrb = devm_kzalloc(hba->dev,
2013                                 hba->nutrs * sizeof(struct ufshcd_lrb),
2014                                 GFP_KERNEL);
2015         if (!hba->lrb) {
2016                 dev_err(hba->dev, "LRB Memory allocation failed\n");
2017                 goto out;
2018         }
2019         return 0;
2020 out:
2021         return -ENOMEM;
2022 }
2023
2024 /**
2025  * ufshcd_host_memory_configure - configure local reference block with
2026  *                              memory offsets
2027  * @hba: per adapter instance
2028  *
2029  * Configure Host memory space
2030  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2031  * address.
2032  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2033  * and PRDT offset.
2034  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2035  * into local reference block.
2036  */
2037 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2038 {
2039         struct utp_transfer_cmd_desc *cmd_descp;
2040         struct utp_transfer_req_desc *utrdlp;
2041         dma_addr_t cmd_desc_dma_addr;
2042         dma_addr_t cmd_desc_element_addr;
2043         u16 response_offset;
2044         u16 prdt_offset;
2045         int cmd_desc_size;
2046         int i;
2047
2048         utrdlp = hba->utrdl_base_addr;
2049         cmd_descp = hba->ucdl_base_addr;
2050
2051         response_offset =
2052                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2053         prdt_offset =
2054                 offsetof(struct utp_transfer_cmd_desc, prd_table);
2055
2056         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2057         cmd_desc_dma_addr = hba->ucdl_dma_addr;
2058
2059         for (i = 0; i < hba->nutrs; i++) {
2060                 /* Configure UTRD with command descriptor base address */
2061                 cmd_desc_element_addr =
2062                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
2063                 utrdlp[i].command_desc_base_addr_lo =
2064                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2065                 utrdlp[i].command_desc_base_addr_hi =
2066                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2067
2068                 /* Response upiu and prdt offset should be in double words */
2069                 utrdlp[i].response_upiu_offset =
2070                                 cpu_to_le16((response_offset >> 2));
2071                 utrdlp[i].prd_table_offset =
2072                                 cpu_to_le16((prdt_offset >> 2));
2073                 utrdlp[i].response_upiu_length =
2074                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2075
2076                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2077                 hba->lrb[i].ucd_req_ptr =
2078                         (struct utp_upiu_req *)(cmd_descp + i);
2079                 hba->lrb[i].ucd_rsp_ptr =
2080                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2081                 hba->lrb[i].ucd_prdt_ptr =
2082                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2083         }
2084 }
2085
2086 /**
2087  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2088  * @hba: per adapter instance
2089  *
2090  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2091  * in order to initialize the Unipro link startup procedure.
2092  * Once the Unipro links are up, the device connected to the controller
2093  * is detected.
2094  *
2095  * Returns 0 on success, non-zero value on failure
2096  */
2097 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2098 {
2099         struct uic_command uic_cmd = {0};
2100         int ret;
2101
2102         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2103
2104         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2105         if (ret)
2106                 dev_err(hba->dev,
2107                         "dme-link-startup: error code %d\n", ret);
2108         return ret;
2109 }
2110
2111 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2112 {
2113         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
2114         unsigned long min_sleep_time_us;
2115
2116         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2117                 return;
2118
2119         /*
2120          * last_dme_cmd_tstamp will be 0 only for 1st call to
2121          * this function
2122          */
2123         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2124                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2125         } else {
2126                 unsigned long delta =
2127                         (unsigned long) ktime_to_us(
2128                                 ktime_sub(ktime_get(),
2129                                 hba->last_dme_cmd_tstamp));
2130
2131                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2132                         min_sleep_time_us =
2133                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2134                 else
2135                         return; /* no more delay required */
2136         }
2137
2138         /* allow sleep for extra 50us if needed */
2139         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2140 }
2141
2142 /**
2143  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2144  * @hba: per adapter instance
2145  * @attr_sel: uic command argument1
2146  * @attr_set: attribute set type as uic command argument2
2147  * @mib_val: setting value as uic command argument3
2148  * @peer: indicate whether peer or local
2149  *
2150  * Returns 0 on success, non-zero value on failure
2151  */
2152 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2153                         u8 attr_set, u32 mib_val, u8 peer)
2154 {
2155         struct uic_command uic_cmd = {0};
2156         static const char *const action[] = {
2157                 "dme-set",
2158                 "dme-peer-set"
2159         };
2160         const char *set = action[!!peer];
2161         int ret;
2162
2163         uic_cmd.command = peer ?
2164                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2165         uic_cmd.argument1 = attr_sel;
2166         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2167         uic_cmd.argument3 = mib_val;
2168
2169         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2170         if (ret)
2171                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2172                         set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2173
2174         return ret;
2175 }
2176 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2177
2178 /**
2179  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2180  * @hba: per adapter instance
2181  * @attr_sel: uic command argument1
2182  * @mib_val: the value of the attribute as returned by the UIC command
2183  * @peer: indicate whether peer or local
2184  *
2185  * Returns 0 on success, non-zero value on failure
2186  */
2187 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2188                         u32 *mib_val, u8 peer)
2189 {
2190         struct uic_command uic_cmd = {0};
2191         static const char *const action[] = {
2192                 "dme-get",
2193                 "dme-peer-get"
2194         };
2195         const char *get = action[!!peer];
2196         int ret;
2197         struct ufs_pa_layer_attr orig_pwr_info;
2198         struct ufs_pa_layer_attr temp_pwr_info;
2199         bool pwr_mode_change = false;
2200
2201         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2202                 orig_pwr_info = hba->pwr_info;
2203                 temp_pwr_info = orig_pwr_info;
2204
2205                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2206                     orig_pwr_info.pwr_rx == FAST_MODE) {
2207                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2208                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2209                         pwr_mode_change = true;
2210                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2211                     orig_pwr_info.pwr_rx == SLOW_MODE) {
2212                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2213                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2214                         pwr_mode_change = true;
2215                 }
2216                 if (pwr_mode_change) {
2217                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2218                         if (ret)
2219                                 goto out;
2220                 }
2221         }
2222
2223         uic_cmd.command = peer ?
2224                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2225         uic_cmd.argument1 = attr_sel;
2226
2227         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2228         if (ret) {
2229                 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
2230                         get, UIC_GET_ATTR_ID(attr_sel), ret);
2231                 goto out;
2232         }
2233
2234         if (mib_val)
2235                 *mib_val = uic_cmd.argument3;
2236
2237         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2238             && pwr_mode_change)
2239                 ufshcd_change_power_mode(hba, &orig_pwr_info);
2240 out:
2241         return ret;
2242 }
2243 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2244
2245 /**
2246  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2247  * state) and waits for it to take effect.
2248  *
2249  * @hba: per adapter instance
2250  * @cmd: UIC command to execute
2251  *
2252  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2253  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2254  * and device UniPro link and hence it's final completion would be indicated by
2255  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2256  * addition to normal UIC command completion Status (UCCS). This function only
2257  * returns after the relevant status bits indicate the completion.
2258  *
2259  * Returns 0 on success, non-zero value on failure
2260  */
2261 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2262 {
2263         struct completion uic_async_done;
2264         unsigned long flags;
2265         u8 status;
2266         int ret;
2267
2268         mutex_lock(&hba->uic_cmd_mutex);
2269         init_completion(&uic_async_done);
2270         ufshcd_add_delay_before_dme_cmd(hba);
2271
2272         spin_lock_irqsave(hba->host->host_lock, flags);
2273         hba->uic_async_done = &uic_async_done;
2274         ret = __ufshcd_send_uic_cmd(hba, cmd);
2275         spin_unlock_irqrestore(hba->host->host_lock, flags);
2276         if (ret) {
2277                 dev_err(hba->dev,
2278                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2279                         cmd->command, cmd->argument3, ret);
2280                 goto out;
2281         }
2282         ret = ufshcd_wait_for_uic_cmd(hba, cmd);
2283         if (ret) {
2284                 dev_err(hba->dev,
2285                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2286                         cmd->command, cmd->argument3, ret);
2287                 goto out;
2288         }
2289
2290         if (!wait_for_completion_timeout(hba->uic_async_done,
2291                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2292                 dev_err(hba->dev,
2293                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2294                         cmd->command, cmd->argument3);
2295                 ret = -ETIMEDOUT;
2296                 goto out;
2297         }
2298
2299         status = ufshcd_get_upmcrs(hba);
2300         if (status != PWR_LOCAL) {
2301                 dev_err(hba->dev,
2302                         "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
2303                         cmd->command, status);
2304                 ret = (status != PWR_OK) ? status : -1;
2305         }
2306 out:
2307         spin_lock_irqsave(hba->host->host_lock, flags);
2308         hba->uic_async_done = NULL;
2309         spin_unlock_irqrestore(hba->host->host_lock, flags);
2310         mutex_unlock(&hba->uic_cmd_mutex);
2311
2312         return ret;
2313 }
2314
2315 /**
2316  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2317  *                              using DME_SET primitives.
2318  * @hba: per adapter instance
2319  * @mode: powr mode value
2320  *
2321  * Returns 0 on success, non-zero value on failure
2322  */
2323 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2324 {
2325         struct uic_command uic_cmd = {0};
2326         int ret;
2327
2328         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2329                 ret = ufshcd_dme_set(hba,
2330                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2331                 if (ret) {
2332                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2333                                                 __func__, ret);
2334                         goto out;
2335                 }
2336         }
2337
2338         uic_cmd.command = UIC_CMD_DME_SET;
2339         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2340         uic_cmd.argument3 = mode;
2341         ufshcd_hold(hba, false);
2342         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2343         ufshcd_release(hba);
2344
2345 out:
2346         return ret;
2347 }
2348
2349 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2350 {
2351         struct uic_command uic_cmd = {0};
2352
2353         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2354
2355         return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2356 }
2357
2358 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2359 {
2360         struct uic_command uic_cmd = {0};
2361         int ret;
2362
2363         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2364         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2365         if (ret) {
2366                 ufshcd_set_link_off(hba);
2367                 ret = ufshcd_host_reset_and_restore(hba);
2368         }
2369
2370         return ret;
2371 }
2372
2373  /**
2374  * ufshcd_init_pwr_info - setting the POR (power on reset)
2375  * values in hba power info
2376  * @hba: per-adapter instance
2377  */
2378 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2379 {
2380         hba->pwr_info.gear_rx = UFS_PWM_G1;
2381         hba->pwr_info.gear_tx = UFS_PWM_G1;
2382         hba->pwr_info.lane_rx = 1;
2383         hba->pwr_info.lane_tx = 1;
2384         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2385         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2386         hba->pwr_info.hs_rate = 0;
2387 }
2388
2389 /**
2390  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2391  * @hba: per-adapter instance
2392  */
2393 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2394 {
2395         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2396
2397         if (hba->max_pwr_info.is_valid)
2398                 return 0;
2399
2400         pwr_info->pwr_tx = FASTAUTO_MODE;
2401         pwr_info->pwr_rx = FASTAUTO_MODE;
2402         pwr_info->hs_rate = PA_HS_MODE_B;
2403
2404         /* Get the connected lane count */
2405         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2406                         &pwr_info->lane_rx);
2407         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2408                         &pwr_info->lane_tx);
2409
2410         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2411                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2412                                 __func__,
2413                                 pwr_info->lane_rx,
2414                                 pwr_info->lane_tx);
2415                 return -EINVAL;
2416         }
2417
2418         /*
2419          * First, get the maximum gears of HS speed.
2420          * If a zero value, it means there is no HSGEAR capability.
2421          * Then, get the maximum gears of PWM speed.
2422          */
2423         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2424         if (!pwr_info->gear_rx) {
2425                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2426                                 &pwr_info->gear_rx);
2427                 if (!pwr_info->gear_rx) {
2428                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2429                                 __func__, pwr_info->gear_rx);
2430                         return -EINVAL;
2431                 }
2432                 pwr_info->pwr_rx = SLOWAUTO_MODE;
2433         }
2434
2435         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2436                         &pwr_info->gear_tx);
2437         if (!pwr_info->gear_tx) {
2438                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2439                                 &pwr_info->gear_tx);
2440                 if (!pwr_info->gear_tx) {
2441                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2442                                 __func__, pwr_info->gear_tx);
2443                         return -EINVAL;
2444                 }
2445                 pwr_info->pwr_tx = SLOWAUTO_MODE;
2446         }
2447
2448         hba->max_pwr_info.is_valid = true;
2449         return 0;
2450 }
2451
2452 static int ufshcd_change_power_mode(struct ufs_hba *hba,
2453                              struct ufs_pa_layer_attr *pwr_mode)
2454 {
2455         int ret;
2456
2457         /* if already configured to the requested pwr_mode */
2458         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2459             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2460             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2461             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2462             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2463             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2464             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2465                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2466                 return 0;
2467         }
2468
2469         /*
2470          * Configure attributes for power mode change with below.
2471          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2472          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2473          * - PA_HSSERIES
2474          */
2475         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2476         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2477                         pwr_mode->lane_rx);
2478         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2479                         pwr_mode->pwr_rx == FAST_MODE)
2480                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2481         else
2482                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
2483
2484         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2485         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2486                         pwr_mode->lane_tx);
2487         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2488                         pwr_mode->pwr_tx == FAST_MODE)
2489                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2490         else
2491                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
2492
2493         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2494             pwr_mode->pwr_tx == FASTAUTO_MODE ||
2495             pwr_mode->pwr_rx == FAST_MODE ||
2496             pwr_mode->pwr_tx == FAST_MODE)
2497                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2498                                                 pwr_mode->hs_rate);
2499
2500         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2501                         | pwr_mode->pwr_tx);
2502
2503         if (ret) {
2504                 dev_err(hba->dev,
2505                         "%s: power mode change failed %d\n", __func__, ret);
2506         } else {
2507                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
2508                                                                 pwr_mode);
2509
2510                 memcpy(&hba->pwr_info, pwr_mode,
2511                         sizeof(struct ufs_pa_layer_attr));
2512         }
2513
2514         return ret;
2515 }
2516
2517 /**
2518  * ufshcd_config_pwr_mode - configure a new power mode
2519  * @hba: per-adapter instance
2520  * @desired_pwr_mode: desired power configuration
2521  */
2522 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2523                 struct ufs_pa_layer_attr *desired_pwr_mode)
2524 {
2525         struct ufs_pa_layer_attr final_params = { 0 };
2526         int ret;
2527
2528         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
2529                                         desired_pwr_mode, &final_params);
2530
2531         if (ret)
2532                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2533
2534         ret = ufshcd_change_power_mode(hba, &final_params);
2535
2536         return ret;
2537 }
2538
2539 /**
2540  * ufshcd_complete_dev_init() - checks device readiness
2541  * hba: per-adapter instance
2542  *
2543  * Set fDeviceInit flag and poll until device toggles it.
2544  */
2545 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2546 {
2547         int i, retries, err = 0;
2548         bool flag_res = 1;
2549
2550         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2551                 /* Set the fDeviceInit flag */
2552                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2553                                         QUERY_FLAG_IDN_FDEVICEINIT, NULL);
2554                 if (!err || err == -ETIMEDOUT)
2555                         break;
2556                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2557         }
2558         if (err) {
2559                 dev_err(hba->dev,
2560                         "%s setting fDeviceInit flag failed with error %d\n",
2561                         __func__, err);
2562                 goto out;
2563         }
2564
2565         /* poll for max. 100 iterations for fDeviceInit flag to clear */
2566         for (i = 0; i < 100 && !err && flag_res; i++) {
2567                 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2568                         err = ufshcd_query_flag(hba,
2569                                         UPIU_QUERY_OPCODE_READ_FLAG,
2570                                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2571                         if (!err || err == -ETIMEDOUT)
2572                                 break;
2573                         dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
2574                                         err);
2575                 }
2576         }
2577         if (err)
2578                 dev_err(hba->dev,
2579                         "%s reading fDeviceInit flag failed with error %d\n",
2580                         __func__, err);
2581         else if (flag_res)
2582                 dev_err(hba->dev,
2583                         "%s fDeviceInit was not cleared by the device\n",
2584                         __func__);
2585
2586 out:
2587         return err;
2588 }
2589
2590 /**
2591  * ufshcd_make_hba_operational - Make UFS controller operational
2592  * @hba: per adapter instance
2593  *
2594  * To bring UFS host controller to operational state,
2595  * 1. Enable required interrupts
2596  * 2. Configure interrupt aggregation
2597  * 3. Program UTRL and UTMRL base addres
2598  * 4. Configure run-stop-registers
2599  *
2600  * Returns 0 on success, non-zero value on failure
2601  */
2602 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2603 {
2604         int err = 0;
2605         u32 reg;
2606
2607         /* Enable required interrupts */
2608         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2609
2610         /* Configure interrupt aggregation */
2611         if (ufshcd_is_intr_aggr_allowed(hba))
2612                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2613         else
2614                 ufshcd_disable_intr_aggr(hba);
2615
2616         /* Configure UTRL and UTMRL base address registers */
2617         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2618                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2619         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2620                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2621         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2622                         REG_UTP_TASK_REQ_LIST_BASE_L);
2623         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2624                         REG_UTP_TASK_REQ_LIST_BASE_H);
2625
2626         /*
2627          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
2628          * DEI, HEI bits must be 0
2629          */
2630         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
2631         if (!(ufshcd_get_lists_status(reg))) {
2632                 ufshcd_enable_run_stop_reg(hba);
2633         } else {
2634                 dev_err(hba->dev,
2635                         "Host controller not ready to process requests");
2636                 err = -EIO;
2637                 goto out;
2638         }
2639
2640 out:
2641         return err;
2642 }
2643
2644 /**
2645  * ufshcd_hba_enable - initialize the controller
2646  * @hba: per adapter instance
2647  *
2648  * The controller resets itself and controller firmware initialization
2649  * sequence kicks off. When controller is ready it will set
2650  * the Host Controller Enable bit to 1.
2651  *
2652  * Returns 0 on success, non-zero value on failure
2653  */
2654 static int ufshcd_hba_enable(struct ufs_hba *hba)
2655 {
2656         int retry;
2657
2658         /*
2659          * msleep of 1 and 5 used in this function might result in msleep(20),
2660          * but it was necessary to send the UFS FPGA to reset mode during
2661          * development and testing of this driver. msleep can be changed to
2662          * mdelay and retry count can be reduced based on the controller.
2663          */
2664         if (!ufshcd_is_hba_active(hba)) {
2665
2666                 /* change controller state to "reset state" */
2667                 ufshcd_hba_stop(hba);
2668
2669                 /*
2670                  * This delay is based on the testing done with UFS host
2671                  * controller FPGA. The delay can be changed based on the
2672                  * host controller used.
2673                  */
2674                 msleep(5);
2675         }
2676
2677         /* UniPro link is disabled at this point */
2678         ufshcd_set_link_off(hba);
2679
2680         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
2681
2682         /* start controller initialization sequence */
2683         ufshcd_hba_start(hba);
2684
2685         /*
2686          * To initialize a UFS host controller HCE bit must be set to 1.
2687          * During initialization the HCE bit value changes from 1->0->1.
2688          * When the host controller completes initialization sequence
2689          * it sets the value of HCE bit to 1. The same HCE bit is read back
2690          * to check if the controller has completed initialization sequence.
2691          * So without this delay the value HCE = 1, set in the previous
2692          * instruction might be read back.
2693          * This delay can be changed based on the controller.
2694          */
2695         msleep(1);
2696
2697         /* wait for the host controller to complete initialization */
2698         retry = 10;
2699         while (ufshcd_is_hba_active(hba)) {
2700                 if (retry) {
2701                         retry--;
2702                 } else {
2703                         dev_err(hba->dev,
2704                                 "Controller enable failed\n");
2705                         return -EIO;
2706                 }
2707                 msleep(5);
2708         }
2709
2710         /* enable UIC related interrupts */
2711         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
2712
2713         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
2714
2715         return 0;
2716 }
2717
2718 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
2719 {
2720         int tx_lanes, i, err = 0;
2721
2722         if (!peer)
2723                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2724                                &tx_lanes);
2725         else
2726                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2727                                     &tx_lanes);
2728         for (i = 0; i < tx_lanes; i++) {
2729                 if (!peer)
2730                         err = ufshcd_dme_set(hba,
2731                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
2732                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
2733                                         0);
2734                 else
2735                         err = ufshcd_dme_peer_set(hba,
2736                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
2737                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
2738                                         0);
2739                 if (err) {
2740                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
2741                                 __func__, peer, i, err);
2742                         break;
2743                 }
2744         }
2745
2746         return err;
2747 }
2748
2749 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
2750 {
2751         return ufshcd_disable_tx_lcc(hba, true);
2752 }
2753
2754 /**
2755  * ufshcd_link_startup - Initialize unipro link startup
2756  * @hba: per adapter instance
2757  *
2758  * Returns 0 for success, non-zero in case of failure
2759  */
2760 static int ufshcd_link_startup(struct ufs_hba *hba)
2761 {
2762         int ret;
2763         int retries = DME_LINKSTARTUP_RETRIES;
2764
2765         do {
2766                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
2767
2768                 ret = ufshcd_dme_link_startup(hba);
2769
2770                 /* check if device is detected by inter-connect layer */
2771                 if (!ret && !ufshcd_is_device_present(hba)) {
2772                         dev_err(hba->dev, "%s: Device not present\n", __func__);
2773                         ret = -ENXIO;
2774                         goto out;
2775                 }
2776
2777                 /*
2778                  * DME link lost indication is only received when link is up,
2779                  * but we can't be sure if the link is up until link startup
2780                  * succeeds. So reset the local Uni-Pro and try again.
2781                  */
2782                 if (ret && ufshcd_hba_enable(hba))
2783                         goto out;
2784         } while (ret && retries--);
2785
2786         if (ret)
2787                 /* failed to get the link up... retire */
2788                 goto out;
2789
2790         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
2791                 ret = ufshcd_disable_device_tx_lcc(hba);
2792                 if (ret)
2793                         goto out;
2794         }
2795
2796         /* Include any host controller configuration via UIC commands */
2797         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
2798         if (ret)
2799                 goto out;
2800
2801         ret = ufshcd_make_hba_operational(hba);
2802 out:
2803         if (ret)
2804                 dev_err(hba->dev, "link startup failed %d\n", ret);
2805         return ret;
2806 }
2807
2808 /**
2809  * ufshcd_verify_dev_init() - Verify device initialization
2810  * @hba: per-adapter instance
2811  *
2812  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
2813  * device Transport Protocol (UTP) layer is ready after a reset.
2814  * If the UTP layer at the device side is not initialized, it may
2815  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
2816  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
2817  */
2818 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
2819 {
2820         int err = 0;
2821         int retries;
2822
2823         ufshcd_hold(hba, false);
2824         mutex_lock(&hba->dev_cmd.lock);
2825         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
2826                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
2827                                                NOP_OUT_TIMEOUT);
2828
2829                 if (!err || err == -ETIMEDOUT)
2830                         break;
2831
2832                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2833         }
2834         mutex_unlock(&hba->dev_cmd.lock);
2835         ufshcd_release(hba);
2836
2837         if (err)
2838                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
2839         return err;
2840 }
2841
2842 /**
2843  * ufshcd_set_queue_depth - set lun queue depth
2844  * @sdev: pointer to SCSI device
2845  *
2846  * Read bLUQueueDepth value and activate scsi tagged command
2847  * queueing. For WLUN, queue depth is set to 1. For best-effort
2848  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
2849  * value that host can queue.
2850  */
2851 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
2852 {
2853         int ret = 0;
2854         u8 lun_qdepth;
2855         struct ufs_hba *hba;
2856
2857         hba = shost_priv(sdev->host);
2858
2859         lun_qdepth = hba->nutrs;
2860         ret = ufshcd_read_unit_desc_param(hba,
2861                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
2862                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
2863                                           &lun_qdepth,
2864                                           sizeof(lun_qdepth));
2865
2866         /* Some WLUN doesn't support unit descriptor */
2867         if (ret == -EOPNOTSUPP)
2868                 lun_qdepth = 1;
2869         else if (!lun_qdepth)
2870                 /* eventually, we can figure out the real queue depth */
2871                 lun_qdepth = hba->nutrs;
2872         else
2873                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2874
2875         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2876                         __func__, lun_qdepth);
2877         scsi_change_queue_depth(sdev, lun_qdepth);
2878 }
2879
2880 /*
2881  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
2882  * @hba: per-adapter instance
2883  * @lun: UFS device lun id
2884  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
2885  *
2886  * Returns 0 in case of success and b_lu_write_protect status would be returned
2887  * @b_lu_write_protect parameter.
2888  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
2889  * Returns -EINVAL in case of invalid parameters passed to this function.
2890  */
2891 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
2892                             u8 lun,
2893                             u8 *b_lu_write_protect)
2894 {
2895         int ret;
2896
2897         if (!b_lu_write_protect)
2898                 ret = -EINVAL;
2899         /*
2900          * According to UFS device spec, RPMB LU can't be write
2901          * protected so skip reading bLUWriteProtect parameter for
2902          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
2903          */
2904         else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
2905                 ret = -ENOTSUPP;
2906         else
2907                 ret = ufshcd_read_unit_desc_param(hba,
2908                                           lun,
2909                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
2910                                           b_lu_write_protect,
2911                                           sizeof(*b_lu_write_protect));
2912         return ret;
2913 }
2914
2915 /**
2916  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
2917  * status
2918  * @hba: per-adapter instance
2919  * @sdev: pointer to SCSI device
2920  *
2921  */
2922 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
2923                                                     struct scsi_device *sdev)
2924 {
2925         if (hba->dev_info.f_power_on_wp_en &&
2926             !hba->dev_info.is_lu_power_on_wp) {
2927                 u8 b_lu_write_protect;
2928
2929                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
2930                                       &b_lu_write_protect) &&
2931                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
2932                         hba->dev_info.is_lu_power_on_wp = true;
2933         }
2934 }
2935
2936 /**
2937  * ufshcd_slave_alloc - handle initial SCSI device configurations
2938  * @sdev: pointer to SCSI device
2939  *
2940  * Returns success
2941  */
2942 static int ufshcd_slave_alloc(struct scsi_device *sdev)
2943 {
2944         struct ufs_hba *hba;
2945
2946         hba = shost_priv(sdev->host);
2947
2948         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
2949         sdev->use_10_for_ms = 1;
2950
2951         /* allow SCSI layer to restart the device in case of errors */
2952         sdev->allow_restart = 1;
2953
2954         /* REPORT SUPPORTED OPERATION CODES is not supported */
2955         sdev->no_report_opcodes = 1;
2956
2957         /* WRITE_SAME command is not supported */
2958         sdev->no_write_same = 1;
2959
2960         ufshcd_set_queue_depth(sdev);
2961
2962         ufshcd_get_lu_power_on_wp_status(hba, sdev);
2963
2964         return 0;
2965 }
2966
2967 /**
2968  * ufshcd_change_queue_depth - change queue depth
2969  * @sdev: pointer to SCSI device
2970  * @depth: required depth to set
2971  *
2972  * Change queue depth and make sure the max. limits are not crossed.
2973  */
2974 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
2975 {
2976         struct ufs_hba *hba = shost_priv(sdev->host);
2977
2978         if (depth > hba->nutrs)
2979                 depth = hba->nutrs;
2980         return scsi_change_queue_depth(sdev, depth);
2981 }
2982
2983 /**
2984  * ufshcd_slave_configure - adjust SCSI device configurations
2985  * @sdev: pointer to SCSI device
2986  */
2987 static int ufshcd_slave_configure(struct scsi_device *sdev)
2988 {
2989         struct request_queue *q = sdev->request_queue;
2990
2991         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2992         blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
2993
2994         return 0;
2995 }
2996
2997 /**
2998  * ufshcd_slave_destroy - remove SCSI device configurations
2999  * @sdev: pointer to SCSI device
3000  */
3001 static void ufshcd_slave_destroy(struct scsi_device *sdev)
3002 {
3003         struct ufs_hba *hba;
3004
3005         hba = shost_priv(sdev->host);
3006         /* Drop the reference as it won't be needed anymore */
3007         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3008                 unsigned long flags;
3009
3010                 spin_lock_irqsave(hba->host->host_lock, flags);
3011                 hba->sdev_ufs_device = NULL;
3012                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3013         }
3014 }
3015
3016 /**
3017  * ufshcd_task_req_compl - handle task management request completion
3018  * @hba: per adapter instance
3019  * @index: index of the completed request
3020  * @resp: task management service response
3021  *
3022  * Returns non-zero value on error, zero on success
3023  */
3024 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
3025 {
3026         struct utp_task_req_desc *task_req_descp;
3027         struct utp_upiu_task_rsp *task_rsp_upiup;
3028         unsigned long flags;
3029         int ocs_value;
3030         int task_result;
3031
3032         spin_lock_irqsave(hba->host->host_lock, flags);
3033
3034         /* Clear completed tasks from outstanding_tasks */
3035         __clear_bit(index, &hba->outstanding_tasks);
3036
3037         task_req_descp = hba->utmrdl_base_addr;
3038         ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3039
3040         if (ocs_value == OCS_SUCCESS) {
3041                 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3042                                 task_req_descp[index].task_rsp_upiu;
3043                 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
3044                 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
3045                 if (resp)
3046                         *resp = (u8)task_result;
3047         } else {
3048                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3049                                 __func__, ocs_value);
3050         }
3051         spin_unlock_irqrestore(hba->host->host_lock, flags);
3052
3053         return ocs_value;
3054 }
3055
3056 /**
3057  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3058  * @lrb: pointer to local reference block of completed command
3059  * @scsi_status: SCSI command status
3060  *
3061  * Returns value base on SCSI command status
3062  */
3063 static inline int
3064 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3065 {
3066         int result = 0;
3067
3068         switch (scsi_status) {
3069         case SAM_STAT_CHECK_CONDITION:
3070                 ufshcd_copy_sense_data(lrbp);
3071         case SAM_STAT_GOOD:
3072                 result |= DID_OK << 16 |
3073                           COMMAND_COMPLETE << 8 |
3074                           scsi_status;
3075                 break;
3076         case SAM_STAT_TASK_SET_FULL:
3077         case SAM_STAT_BUSY:
3078         case SAM_STAT_TASK_ABORTED:
3079                 ufshcd_copy_sense_data(lrbp);
3080                 result |= scsi_status;
3081                 break;
3082         default:
3083                 result |= DID_ERROR << 16;
3084                 break;
3085         } /* end of switch */
3086
3087         return result;
3088 }
3089
3090 /**
3091  * ufshcd_transfer_rsp_status - Get overall status of the response
3092  * @hba: per adapter instance
3093  * @lrb: pointer to local reference block of completed command
3094  *
3095  * Returns result of the command to notify SCSI midlayer
3096  */
3097 static inline int
3098 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3099 {
3100         int result = 0;
3101         int scsi_status;
3102         int ocs;
3103
3104         /* overall command status of utrd */
3105         ocs = ufshcd_get_tr_ocs(lrbp);
3106
3107         switch (ocs) {
3108         case OCS_SUCCESS:
3109                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3110
3111                 switch (result) {
3112                 case UPIU_TRANSACTION_RESPONSE:
3113                         /*
3114                          * get the response UPIU result to extract
3115                          * the SCSI command status
3116                          */
3117                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3118
3119                         /*
3120                          * get the result based on SCSI status response
3121                          * to notify the SCSI midlayer of the command status
3122                          */
3123                         scsi_status = result & MASK_SCSI_STATUS;
3124                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
3125
3126                         if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
3127                                 schedule_work(&hba->eeh_work);
3128                         break;
3129                 case UPIU_TRANSACTION_REJECT_UPIU:
3130                         /* TODO: handle Reject UPIU Response */
3131                         result = DID_ERROR << 16;
3132                         dev_err(hba->dev,
3133                                 "Reject UPIU not fully implemented\n");
3134                         break;
3135                 default:
3136                         result = DID_ERROR << 16;
3137                         dev_err(hba->dev,
3138                                 "Unexpected request response code = %x\n",
3139                                 result);
3140                         break;
3141                 }
3142                 break;
3143         case OCS_ABORTED:
3144                 result |= DID_ABORT << 16;
3145                 break;
3146         case OCS_INVALID_COMMAND_STATUS:
3147                 result |= DID_REQUEUE << 16;
3148                 break;
3149         case OCS_INVALID_CMD_TABLE_ATTR:
3150         case OCS_INVALID_PRDT_ATTR:
3151         case OCS_MISMATCH_DATA_BUF_SIZE:
3152         case OCS_MISMATCH_RESP_UPIU_SIZE:
3153         case OCS_PEER_COMM_FAILURE:
3154         case OCS_FATAL_ERROR:
3155         default:
3156                 result |= DID_ERROR << 16;
3157                 dev_err(hba->dev,
3158                 "OCS error from controller = %x\n", ocs);
3159                 break;
3160         } /* end of switch */
3161
3162         return result;
3163 }
3164
3165 /**
3166  * ufshcd_uic_cmd_compl - handle completion of uic command
3167  * @hba: per adapter instance
3168  * @intr_status: interrupt status generated by the controller
3169  */
3170 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3171 {
3172         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
3173                 hba->active_uic_cmd->argument2 |=
3174                         ufshcd_get_uic_cmd_result(hba);
3175                 hba->active_uic_cmd->argument3 =
3176                         ufshcd_get_dme_attr_val(hba);
3177                 complete(&hba->active_uic_cmd->done);
3178         }
3179
3180         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3181                 complete(hba->uic_async_done);
3182 }
3183
3184 /**
3185  * ufshcd_transfer_req_compl - handle SCSI and query command completion
3186  * @hba: per adapter instance
3187  */
3188 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3189 {
3190         struct ufshcd_lrb *lrbp;
3191         struct scsi_cmnd *cmd;
3192         unsigned long completed_reqs;
3193         u32 tr_doorbell;
3194         int result;
3195         int index;
3196         struct request *req;
3197
3198         /* Resetting interrupt aggregation counters first and reading the
3199          * DOOR_BELL afterward allows us to handle all the completed requests.
3200          * In order to prevent other interrupts starvation the DB is read once
3201          * after reset. The down side of this solution is the possibility of
3202          * false interrupt if device completes another request after resetting
3203          * aggregation and before reading the DB.
3204          */
3205         if (ufshcd_is_intr_aggr_allowed(hba))
3206                 ufshcd_reset_intr_aggr(hba);
3207
3208         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3209         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3210
3211         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3212                 lrbp = &hba->lrb[index];
3213                 cmd = lrbp->cmd;
3214                 if (cmd) {
3215                         result = ufshcd_transfer_rsp_status(hba, lrbp);
3216                         scsi_dma_unmap(cmd);
3217                         cmd->result = result;
3218                         /* Mark completed command as NULL in LRB */
3219                         lrbp->cmd = NULL;
3220                         clear_bit_unlock(index, &hba->lrb_in_use);
3221                         req = cmd->request;
3222                         if (req) {
3223                                 /* Update IO svc time latency histogram */
3224                                 if (req->lat_hist_enabled) {
3225                                         ktime_t completion;
3226                                         u_int64_t delta_us;
3227
3228                                         completion = ktime_get();
3229                                         delta_us = ktime_us_delta(completion,
3230                                                   req->lat_hist_io_start);
3231                                         blk_update_latency_hist(
3232                                                 (rq_data_dir(req) == READ) ?
3233                                                 &hba->io_lat_read :
3234                                                 &hba->io_lat_write, delta_us);
3235                                 }
3236                         }
3237                         /* Do not touch lrbp after scsi done */
3238                         cmd->scsi_done(cmd);
3239                         __ufshcd_release(hba);
3240                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
3241                         if (hba->dev_cmd.complete)
3242                                 complete(hba->dev_cmd.complete);
3243                 }
3244         }
3245
3246         /* clear corresponding bits of completed commands */
3247         hba->outstanding_reqs ^= completed_reqs;
3248
3249         ufshcd_clk_scaling_update_busy(hba);
3250
3251         /* we might have free'd some tags above */
3252         wake_up(&hba->dev_cmd.tag_wq);
3253 }
3254
3255 /**
3256  * ufshcd_disable_ee - disable exception event
3257  * @hba: per-adapter instance
3258  * @mask: exception event to disable
3259  *
3260  * Disables exception event in the device so that the EVENT_ALERT
3261  * bit is not set.
3262  *
3263  * Returns zero on success, non-zero error value on failure.
3264  */
3265 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3266 {
3267         int err = 0;
3268         u32 val;
3269
3270         if (!(hba->ee_ctrl_mask & mask))
3271                 goto out;
3272
3273         val = hba->ee_ctrl_mask & ~mask;
3274         val &= 0xFFFF; /* 2 bytes */
3275         err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3276                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3277         if (!err)
3278                 hba->ee_ctrl_mask &= ~mask;
3279 out:
3280         return err;
3281 }
3282
3283 /**
3284  * ufshcd_enable_ee - enable exception event
3285  * @hba: per-adapter instance
3286  * @mask: exception event to enable
3287  *
3288  * Enable corresponding exception event in the device to allow
3289  * device to alert host in critical scenarios.
3290  *
3291  * Returns zero on success, non-zero error value on failure.
3292  */
3293 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3294 {
3295         int err = 0;
3296         u32 val;
3297
3298         if (hba->ee_ctrl_mask & mask)
3299                 goto out;
3300
3301         val = hba->ee_ctrl_mask | mask;
3302         val &= 0xFFFF; /* 2 bytes */
3303         err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3304                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3305         if (!err)
3306                 hba->ee_ctrl_mask |= mask;
3307 out:
3308         return err;
3309 }
3310
3311 /**
3312  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3313  * @hba: per-adapter instance
3314  *
3315  * Allow device to manage background operations on its own. Enabling
3316  * this might lead to inconsistent latencies during normal data transfers
3317  * as the device is allowed to manage its own way of handling background
3318  * operations.
3319  *
3320  * Returns zero on success, non-zero on failure.
3321  */
3322 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3323 {
3324         int err = 0;
3325
3326         if (hba->auto_bkops_enabled)
3327                 goto out;
3328
3329         err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3330                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
3331         if (err) {
3332                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3333                                 __func__, err);
3334                 goto out;
3335         }
3336
3337         hba->auto_bkops_enabled = true;
3338
3339         /* No need of URGENT_BKOPS exception from the device */
3340         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3341         if (err)
3342                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3343                                 __func__, err);
3344 out:
3345         return err;
3346 }
3347
3348 /**
3349  * ufshcd_disable_auto_bkops - block device in doing background operations
3350  * @hba: per-adapter instance
3351  *
3352  * Disabling background operations improves command response latency but
3353  * has drawback of device moving into critical state where the device is
3354  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3355  * host is idle so that BKOPS are managed effectively without any negative
3356  * impacts.
3357  *
3358  * Returns zero on success, non-zero on failure.
3359  */
3360 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3361 {
3362         int err = 0;
3363
3364         if (!hba->auto_bkops_enabled)
3365                 goto out;
3366
3367         /*
3368          * If host assisted BKOPs is to be enabled, make sure
3369          * urgent bkops exception is allowed.
3370          */
3371         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3372         if (err) {
3373                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3374                                 __func__, err);
3375                 goto out;
3376         }
3377
3378         err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
3379                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
3380         if (err) {
3381                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3382                                 __func__, err);
3383                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3384                 goto out;
3385         }
3386
3387         hba->auto_bkops_enabled = false;
3388 out:
3389         return err;
3390 }
3391
3392 /**
3393  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
3394  * @hba: per adapter instance
3395  *
3396  * After a device reset the device may toggle the BKOPS_EN flag
3397  * to default value. The s/w tracking variables should be updated
3398  * as well. This function would change the auto-bkops state based on
3399  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
3400  */
3401 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3402 {
3403         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
3404                 hba->auto_bkops_enabled = false;
3405                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3406                 ufshcd_enable_auto_bkops(hba);
3407         } else {
3408                 hba->auto_bkops_enabled = true;
3409                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
3410                 ufshcd_disable_auto_bkops(hba);
3411         }
3412 }
3413
3414 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3415 {
3416         return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3417                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3418 }
3419
3420 /**
3421  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
3422  * @hba: per-adapter instance
3423  * @status: bkops_status value
3424  *
3425  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3426  * flag in the device to permit background operations if the device
3427  * bkops_status is greater than or equal to "status" argument passed to
3428  * this function, disable otherwise.
3429  *
3430  * Returns 0 for success, non-zero in case of failure.
3431  *
3432  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3433  * to know whether auto bkops is enabled or disabled after this function
3434  * returns control to it.
3435  */
3436 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3437                              enum bkops_status status)
3438 {
3439         int err;
3440         u32 curr_status = 0;
3441
3442         err = ufshcd_get_bkops_status(hba, &curr_status);
3443         if (err) {
3444                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3445                                 __func__, err);
3446                 goto out;
3447         } else if (curr_status > BKOPS_STATUS_MAX) {
3448                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3449                                 __func__, curr_status);
3450                 err = -EINVAL;
3451                 goto out;
3452         }
3453
3454         if (curr_status >= status)
3455                 err = ufshcd_enable_auto_bkops(hba);
3456         else
3457                 err = ufshcd_disable_auto_bkops(hba);
3458 out:
3459         return err;
3460 }
3461
3462 /**
3463  * ufshcd_urgent_bkops - handle urgent bkops exception event
3464  * @hba: per-adapter instance
3465  *
3466  * Enable fBackgroundOpsEn flag in the device to permit background
3467  * operations.
3468  *
3469  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3470  * and negative error value for any other failure.
3471  */
3472 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3473 {
3474         return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
3475 }
3476
3477 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3478 {
3479         return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3480                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3481 }
3482
3483 /**
3484  * ufshcd_exception_event_handler - handle exceptions raised by device
3485  * @work: pointer to work data
3486  *
3487  * Read bExceptionEventStatus attribute from the device and handle the
3488  * exception event accordingly.
3489  */
3490 static void ufshcd_exception_event_handler(struct work_struct *work)
3491 {
3492         struct ufs_hba *hba;
3493         int err;
3494         u32 status = 0;
3495         hba = container_of(work, struct ufs_hba, eeh_work);
3496
3497         pm_runtime_get_sync(hba->dev);
3498         scsi_block_requests(hba->host);
3499         err = ufshcd_get_ee_status(hba, &status);
3500         if (err) {
3501                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3502                                 __func__, err);
3503                 goto out;
3504         }
3505
3506         status &= hba->ee_ctrl_mask;
3507         if (status & MASK_EE_URGENT_BKOPS) {
3508                 err = ufshcd_urgent_bkops(hba);
3509                 if (err < 0)
3510                         dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3511                                         __func__, err);
3512         }
3513 out:
3514         scsi_unblock_requests(hba->host);
3515         pm_runtime_put_sync(hba->dev);
3516         return;
3517 }
3518
3519 /**
3520  * ufshcd_err_handler - handle UFS errors that require s/w attention
3521  * @work: pointer to work structure
3522  */
3523 static void ufshcd_err_handler(struct work_struct *work)
3524 {
3525         struct ufs_hba *hba;
3526         unsigned long flags;
3527         u32 err_xfer = 0;
3528         u32 err_tm = 0;
3529         int err = 0;
3530         int tag;
3531
3532         hba = container_of(work, struct ufs_hba, eh_work);
3533
3534         pm_runtime_get_sync(hba->dev);
3535         ufshcd_hold(hba, false);
3536
3537         spin_lock_irqsave(hba->host->host_lock, flags);
3538         if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
3539                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3540                 goto out;
3541         }
3542
3543         hba->ufshcd_state = UFSHCD_STATE_RESET;
3544         ufshcd_set_eh_in_progress(hba);
3545
3546         /* Complete requests that have door-bell cleared by h/w */
3547         ufshcd_transfer_req_compl(hba);
3548         ufshcd_tmc_handler(hba);
3549         spin_unlock_irqrestore(hba->host->host_lock, flags);
3550
3551         /* Clear pending transfer requests */
3552         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
3553                 if (ufshcd_clear_cmd(hba, tag))
3554                         err_xfer |= 1 << tag;
3555
3556         /* Clear pending task management requests */
3557         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
3558                 if (ufshcd_clear_tm_cmd(hba, tag))
3559                         err_tm |= 1 << tag;
3560
3561         /* Complete the requests that are cleared by s/w */
3562         spin_lock_irqsave(hba->host->host_lock, flags);
3563         ufshcd_transfer_req_compl(hba);
3564         ufshcd_tmc_handler(hba);
3565         spin_unlock_irqrestore(hba->host->host_lock, flags);
3566
3567         /* Fatal errors need reset */
3568         if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
3569                         ((hba->saved_err & UIC_ERROR) &&
3570                          (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
3571                 err = ufshcd_reset_and_restore(hba);
3572                 if (err) {
3573                         dev_err(hba->dev, "%s: reset and restore failed\n",
3574                                         __func__);
3575                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
3576                 }
3577                 /*
3578                  * Inform scsi mid-layer that we did reset and allow to handle
3579                  * Unit Attention properly.
3580                  */
3581                 scsi_report_bus_reset(hba->host, 0);
3582                 hba->saved_err = 0;
3583                 hba->saved_uic_err = 0;
3584         }
3585         ufshcd_clear_eh_in_progress(hba);
3586
3587 out:
3588         scsi_unblock_requests(hba->host);
3589         ufshcd_release(hba);
3590         pm_runtime_put_sync(hba->dev);
3591 }
3592
3593 /**
3594  * ufshcd_update_uic_error - check and set fatal UIC error flags.
3595  * @hba: per-adapter instance
3596  */
3597 static void ufshcd_update_uic_error(struct ufs_hba *hba)
3598 {
3599         u32 reg;
3600
3601         /* PA_INIT_ERROR is fatal and needs UIC reset */
3602         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
3603         if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
3604                 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
3605
3606         /* UIC NL/TL/DME errors needs software retry */
3607         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
3608         if (reg)
3609                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
3610
3611         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
3612         if (reg)
3613                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
3614
3615         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
3616         if (reg)
3617                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
3618
3619         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
3620                         __func__, hba->uic_error);
3621 }
3622
3623 /**
3624  * ufshcd_check_errors - Check for errors that need s/w attention
3625  * @hba: per-adapter instance
3626  */
3627 static void ufshcd_check_errors(struct ufs_hba *hba)
3628 {
3629         bool queue_eh_work = false;
3630
3631         if (hba->errors & INT_FATAL_ERRORS)
3632                 queue_eh_work = true;
3633
3634         if (hba->errors & UIC_ERROR) {
3635                 hba->uic_error = 0;
3636                 ufshcd_update_uic_error(hba);
3637                 if (hba->uic_error)
3638                         queue_eh_work = true;
3639         }
3640
3641         if (queue_eh_work) {
3642                 /* handle fatal errors only when link is functional */
3643                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
3644                         /* block commands from scsi mid-layer */
3645                         scsi_block_requests(hba->host);
3646
3647                         /* transfer error masks to sticky bits */
3648                         hba->saved_err |= hba->errors;
3649                         hba->saved_uic_err |= hba->uic_error;
3650
3651                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
3652                         schedule_work(&hba->eh_work);
3653                 }
3654         }
3655         /*
3656          * if (!queue_eh_work) -
3657          * Other errors are either non-fatal where host recovers
3658          * itself without s/w intervention or errors that will be
3659          * handled by the SCSI core layer.
3660          */
3661 }
3662
3663 /**
3664  * ufshcd_tmc_handler - handle task management function completion
3665  * @hba: per adapter instance
3666  */
3667 static void ufshcd_tmc_handler(struct ufs_hba *hba)
3668 {
3669         u32 tm_doorbell;
3670
3671         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
3672         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
3673         wake_up(&hba->tm_wq);
3674 }
3675
3676 /**
3677  * ufshcd_sl_intr - Interrupt service routine
3678  * @hba: per adapter instance
3679  * @intr_status: contains interrupts generated by the controller
3680  */
3681 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
3682 {
3683         hba->errors = UFSHCD_ERROR_MASK & intr_status;
3684         if (hba->errors)
3685                 ufshcd_check_errors(hba);
3686
3687         if (intr_status & UFSHCD_UIC_MASK)
3688                 ufshcd_uic_cmd_compl(hba, intr_status);
3689
3690         if (intr_status & UTP_TASK_REQ_COMPL)
3691                 ufshcd_tmc_handler(hba);
3692
3693         if (intr_status & UTP_TRANSFER_REQ_COMPL)
3694                 ufshcd_transfer_req_compl(hba);
3695 }
3696
3697 /**
3698  * ufshcd_intr - Main interrupt service routine
3699  * @irq: irq number
3700  * @__hba: pointer to adapter instance
3701  *
3702  * Returns IRQ_HANDLED - If interrupt is valid
3703  *              IRQ_NONE - If invalid interrupt
3704  */
3705 static irqreturn_t ufshcd_intr(int irq, void *__hba)
3706 {
3707         u32 intr_status;
3708         irqreturn_t retval = IRQ_NONE;
3709         struct ufs_hba *hba = __hba;
3710
3711         spin_lock(hba->host->host_lock);
3712         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
3713
3714         if (intr_status) {
3715                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
3716                 ufshcd_sl_intr(hba, intr_status);
3717                 retval = IRQ_HANDLED;
3718         }
3719         spin_unlock(hba->host->host_lock);
3720         return retval;
3721 }
3722
3723 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
3724 {
3725         int err = 0;
3726         u32 mask = 1 << tag;
3727         unsigned long flags;
3728
3729         if (!test_bit(tag, &hba->outstanding_tasks))
3730                 goto out;
3731
3732         spin_lock_irqsave(hba->host->host_lock, flags);
3733         ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
3734         spin_unlock_irqrestore(hba->host->host_lock, flags);
3735
3736         /* poll for max. 1 sec to clear door bell register by h/w */
3737         err = ufshcd_wait_for_register(hba,
3738                         REG_UTP_TASK_REQ_DOOR_BELL,
3739                         mask, 0, 1000, 1000);
3740 out:
3741         return err;
3742 }
3743
3744 /**
3745  * ufshcd_issue_tm_cmd - issues task management commands to controller
3746  * @hba: per adapter instance
3747  * @lun_id: LUN ID to which TM command is sent
3748  * @task_id: task ID to which the TM command is applicable
3749  * @tm_function: task management function opcode
3750  * @tm_response: task management service response return value
3751  *
3752  * Returns non-zero value on error, zero on success.
3753  */
3754 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
3755                 u8 tm_function, u8 *tm_response)
3756 {
3757         struct utp_task_req_desc *task_req_descp;
3758         struct utp_upiu_task_req *task_req_upiup;
3759         struct Scsi_Host *host;
3760         unsigned long flags;
3761         int free_slot;
3762         int err;
3763         int task_tag;
3764
3765         host = hba->host;
3766
3767         /*
3768          * Get free slot, sleep if slots are unavailable.
3769          * Even though we use wait_event() which sleeps indefinitely,
3770          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
3771          */
3772         wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
3773         ufshcd_hold(hba, false);
3774
3775         spin_lock_irqsave(host->host_lock, flags);
3776         task_req_descp = hba->utmrdl_base_addr;
3777         task_req_descp += free_slot;
3778
3779         /* Configure task request descriptor */
3780         task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
3781         task_req_descp->header.dword_2 =
3782                         cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
3783
3784         /* Configure task request UPIU */
3785         task_req_upiup =
3786                 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
3787         task_tag = hba->nutrs + free_slot;
3788         task_req_upiup->header.dword_0 =
3789                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
3790                                               lun_id, task_tag);
3791         task_req_upiup->header.dword_1 =
3792                 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
3793         /*
3794          * The host shall provide the same value for LUN field in the basic
3795          * header and for Input Parameter.
3796          */
3797         task_req_upiup->input_param1 = cpu_to_be32(lun_id);
3798         task_req_upiup->input_param2 = cpu_to_be32(task_id);
3799
3800         /* send command to the controller */
3801         __set_bit(free_slot, &hba->outstanding_tasks);
3802         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
3803
3804         spin_unlock_irqrestore(host->host_lock, flags);
3805
3806         /* wait until the task management command is completed */
3807         err = wait_event_timeout(hba->tm_wq,
3808                         test_bit(free_slot, &hba->tm_condition),
3809                         msecs_to_jiffies(TM_CMD_TIMEOUT));
3810         if (!err) {
3811                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
3812                                 __func__, tm_function);
3813                 if (ufshcd_clear_tm_cmd(hba, free_slot))
3814                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
3815                                         __func__, free_slot);
3816                 err = -ETIMEDOUT;
3817         } else {
3818                 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
3819         }
3820
3821         clear_bit(free_slot, &hba->tm_condition);
3822         ufshcd_put_tm_slot(hba, free_slot);
3823         wake_up(&hba->tm_tag_wq);
3824
3825         ufshcd_release(hba);
3826         return err;
3827 }
3828
3829 /**
3830  * ufshcd_eh_device_reset_handler - device reset handler registered to
3831  *                                    scsi layer.
3832  * @cmd: SCSI command pointer
3833  *
3834  * Returns SUCCESS/FAILED
3835  */
3836 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
3837 {
3838         struct Scsi_Host *host;
3839         struct ufs_hba *hba;
3840         unsigned int tag;
3841         u32 pos;
3842         int err;
3843         u8 resp = 0xF;
3844         struct ufshcd_lrb *lrbp;
3845         unsigned long flags;
3846
3847         host = cmd->device->host;
3848         hba = shost_priv(host);
3849         tag = cmd->request->tag;
3850
3851         lrbp = &hba->lrb[tag];
3852         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
3853         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3854                 if (!err)
3855                         err = resp;
3856                 goto out;
3857         }
3858
3859         /* clear the commands that were pending for corresponding LUN */
3860         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
3861                 if (hba->lrb[pos].lun == lrbp->lun) {
3862                         err = ufshcd_clear_cmd(hba, pos);
3863                         if (err)
3864                                 break;
3865                 }
3866         }
3867         spin_lock_irqsave(host->host_lock, flags);
3868         ufshcd_transfer_req_compl(hba);
3869         spin_unlock_irqrestore(host->host_lock, flags);
3870 out:
3871         if (!err) {
3872                 err = SUCCESS;
3873         } else {
3874                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3875                 err = FAILED;
3876         }
3877         return err;
3878 }
3879
3880 /**
3881  * ufshcd_abort - abort a specific command
3882  * @cmd: SCSI command pointer
3883  *
3884  * Abort the pending command in device by sending UFS_ABORT_TASK task management
3885  * command, and in host controller by clearing the door-bell register. There can
3886  * be race between controller sending the command to the device while abort is
3887  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
3888  * really issued and then try to abort it.
3889  *
3890  * Returns SUCCESS/FAILED
3891  */
3892 static int ufshcd_abort(struct scsi_cmnd *cmd)
3893 {
3894         struct Scsi_Host *host;
3895         struct ufs_hba *hba;
3896         unsigned long flags;
3897         unsigned int tag;
3898         int err = 0;
3899         int poll_cnt;
3900         u8 resp = 0xF;
3901         struct ufshcd_lrb *lrbp;
3902         u32 reg;
3903
3904         host = cmd->device->host;
3905         hba = shost_priv(host);
3906         tag = cmd->request->tag;
3907
3908         ufshcd_hold(hba, false);
3909         /* If command is already aborted/completed, return SUCCESS */
3910         if (!(test_bit(tag, &hba->outstanding_reqs)))
3911                 goto out;
3912
3913         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3914         if (!(reg & (1 << tag))) {
3915                 dev_err(hba->dev,
3916                 "%s: cmd was completed, but without a notifying intr, tag = %d",
3917                 __func__, tag);
3918         }
3919
3920         lrbp = &hba->lrb[tag];
3921         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
3922                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3923                                 UFS_QUERY_TASK, &resp);
3924                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
3925                         /* cmd pending in the device */
3926                         break;
3927                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3928                         /*
3929                          * cmd not pending in the device, check if it is
3930                          * in transition.
3931                          */
3932                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3933                         if (reg & (1 << tag)) {
3934                                 /* sleep for max. 200us to stabilize */
3935                                 usleep_range(100, 200);
3936                                 continue;
3937                         }
3938                         /* command completed already */
3939                         goto out;
3940                 } else {
3941                         if (!err)
3942                                 err = resp; /* service response error */
3943                         goto out;
3944                 }
3945         }
3946
3947         if (!poll_cnt) {
3948                 err = -EBUSY;
3949                 goto out;
3950         }
3951
3952         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3953                         UFS_ABORT_TASK, &resp);
3954         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3955                 if (!err)
3956                         err = resp; /* service response error */
3957                 goto out;
3958         }
3959
3960         err = ufshcd_clear_cmd(hba, tag);
3961         if (err)
3962                 goto out;
3963
3964         scsi_dma_unmap(cmd);
3965
3966         spin_lock_irqsave(host->host_lock, flags);
3967         __clear_bit(tag, &hba->outstanding_reqs);
3968         hba->lrb[tag].cmd = NULL;
3969         spin_unlock_irqrestore(host->host_lock, flags);
3970
3971         clear_bit_unlock(tag, &hba->lrb_in_use);
3972         wake_up(&hba->dev_cmd.tag_wq);
3973
3974 out:
3975         if (!err) {
3976                 err = SUCCESS;
3977         } else {
3978                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3979                 err = FAILED;
3980         }
3981
3982         /*
3983          * This ufshcd_release() corresponds to the original scsi cmd that got
3984          * aborted here (as we won't get any IRQ for it).
3985          */
3986         ufshcd_release(hba);
3987         return err;
3988 }
3989
3990 /**
3991  * ufshcd_host_reset_and_restore - reset and restore host controller
3992  * @hba: per-adapter instance
3993  *
3994  * Note that host controller reset may issue DME_RESET to
3995  * local and remote (device) Uni-Pro stack and the attributes
3996  * are reset to default state.
3997  *
3998  * Returns zero on success, non-zero on failure
3999  */
4000 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4001 {
4002         int err;
4003         unsigned long flags;
4004
4005         /* Reset the host controller */
4006         spin_lock_irqsave(hba->host->host_lock, flags);
4007         ufshcd_hba_stop(hba);
4008         spin_unlock_irqrestore(hba->host->host_lock, flags);
4009
4010         err = ufshcd_hba_enable(hba);
4011         if (err)
4012                 goto out;
4013
4014         /* Establish the link again and restore the device */
4015         err = ufshcd_probe_hba(hba);
4016
4017         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
4018                 err = -EIO;
4019 out:
4020         if (err)
4021                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4022
4023         return err;
4024 }
4025
4026 /**
4027  * ufshcd_reset_and_restore - reset and re-initialize host/device
4028  * @hba: per-adapter instance
4029  *
4030  * Reset and recover device, host and re-establish link. This
4031  * is helpful to recover the communication in fatal error conditions.
4032  *
4033  * Returns zero on success, non-zero on failure
4034  */
4035 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4036 {
4037         int err = 0;
4038         unsigned long flags;
4039         int retries = MAX_HOST_RESET_RETRIES;
4040
4041         do {
4042                 err = ufshcd_host_reset_and_restore(hba);
4043         } while (err && --retries);
4044
4045         /*
4046          * After reset the door-bell might be cleared, complete
4047          * outstanding requests in s/w here.
4048          */
4049         spin_lock_irqsave(hba->host->host_lock, flags);
4050         ufshcd_transfer_req_compl(hba);
4051         ufshcd_tmc_handler(hba);
4052         spin_unlock_irqrestore(hba->host->host_lock, flags);
4053
4054         return err;
4055 }
4056
4057 /**
4058  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
4059  * @cmd - SCSI command pointer
4060  *
4061  * Returns SUCCESS/FAILED
4062  */
4063 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
4064 {
4065         int err;
4066         unsigned long flags;
4067         struct ufs_hba *hba;
4068
4069         hba = shost_priv(cmd->device->host);
4070
4071         ufshcd_hold(hba, false);
4072         /*
4073          * Check if there is any race with fatal error handling.
4074          * If so, wait for it to complete. Even though fatal error
4075          * handling does reset and restore in some cases, don't assume
4076          * anything out of it. We are just avoiding race here.
4077          */
4078         do {
4079                 spin_lock_irqsave(hba->host->host_lock, flags);
4080                 if (!(work_pending(&hba->eh_work) ||
4081                                 hba->ufshcd_state == UFSHCD_STATE_RESET))
4082                         break;
4083                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4084                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4085                 flush_work(&hba->eh_work);
4086         } while (1);
4087
4088         hba->ufshcd_state = UFSHCD_STATE_RESET;
4089         ufshcd_set_eh_in_progress(hba);
4090         spin_unlock_irqrestore(hba->host->host_lock, flags);
4091
4092         err = ufshcd_reset_and_restore(hba);
4093
4094         spin_lock_irqsave(hba->host->host_lock, flags);
4095         if (!err) {
4096                 err = SUCCESS;
4097                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4098         } else {
4099                 err = FAILED;
4100                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4101         }
4102         ufshcd_clear_eh_in_progress(hba);
4103         spin_unlock_irqrestore(hba->host->host_lock, flags);
4104
4105         ufshcd_release(hba);
4106         return err;
4107 }
4108
4109 /**
4110  * ufshcd_get_max_icc_level - calculate the ICC level
4111  * @sup_curr_uA: max. current supported by the regulator
4112  * @start_scan: row at the desc table to start scan from
4113  * @buff: power descriptor buffer
4114  *
4115  * Returns calculated max ICC level for specific regulator
4116  */
4117 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4118 {
4119         int i;
4120         int curr_uA;
4121         u16 data;
4122         u16 unit;
4123
4124         for (i = start_scan; i >= 0; i--) {
4125                 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
4126                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4127                                                 ATTR_ICC_LVL_UNIT_OFFSET;
4128                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
4129                 switch (unit) {
4130                 case UFSHCD_NANO_AMP:
4131                         curr_uA = curr_uA / 1000;
4132                         break;
4133                 case UFSHCD_MILI_AMP:
4134                         curr_uA = curr_uA * 1000;
4135                         break;
4136                 case UFSHCD_AMP:
4137                         curr_uA = curr_uA * 1000 * 1000;
4138                         break;
4139                 case UFSHCD_MICRO_AMP:
4140                 default:
4141                         break;
4142                 }
4143                 if (sup_curr_uA >= curr_uA)
4144                         break;
4145         }
4146         if (i < 0) {
4147                 i = 0;
4148                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4149         }
4150
4151         return (u32)i;
4152 }
4153
4154 /**
4155  * ufshcd_calc_icc_level - calculate the max ICC level
4156  * In case regulators are not initialized we'll return 0
4157  * @hba: per-adapter instance
4158  * @desc_buf: power descriptor buffer to extract ICC levels from.
4159  * @len: length of desc_buff
4160  *
4161  * Returns calculated ICC level
4162  */
4163 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4164                                                         u8 *desc_buf, int len)
4165 {
4166         u32 icc_level = 0;
4167
4168         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4169                                                 !hba->vreg_info.vccq2) {
4170                 dev_err(hba->dev,
4171                         "%s: Regulator capability was not set, actvIccLevel=%d",
4172                                                         __func__, icc_level);
4173                 goto out;
4174         }
4175
4176         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
4177                 icc_level = ufshcd_get_max_icc_level(
4178                                 hba->vreg_info.vcc->max_uA,
4179                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4180                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4181
4182         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
4183                 icc_level = ufshcd_get_max_icc_level(
4184                                 hba->vreg_info.vccq->max_uA,
4185                                 icc_level,
4186                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4187
4188         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
4189                 icc_level = ufshcd_get_max_icc_level(
4190                                 hba->vreg_info.vccq2->max_uA,
4191                                 icc_level,
4192                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4193 out:
4194         return icc_level;
4195 }
4196
4197 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4198 {
4199         int ret;
4200         int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4201         u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4202
4203         ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4204         if (ret) {
4205                 dev_err(hba->dev,
4206                         "%s: Failed reading power descriptor.len = %d ret = %d",
4207                         __func__, buff_len, ret);
4208                 return;
4209         }
4210
4211         hba->init_prefetch_data.icc_level =
4212                         ufshcd_find_max_sup_active_icc_level(hba,
4213                         desc_buf, buff_len);
4214         dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4215                         __func__, hba->init_prefetch_data.icc_level);
4216
4217         ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4218                         QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4219                         &hba->init_prefetch_data.icc_level);
4220
4221         if (ret)
4222                 dev_err(hba->dev,
4223                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4224                         __func__, hba->init_prefetch_data.icc_level , ret);
4225
4226 }
4227
4228 /**
4229  * ufshcd_scsi_add_wlus - Adds required W-LUs
4230  * @hba: per-adapter instance
4231  *
4232  * UFS device specification requires the UFS devices to support 4 well known
4233  * logical units:
4234  *      "REPORT_LUNS" (address: 01h)
4235  *      "UFS Device" (address: 50h)
4236  *      "RPMB" (address: 44h)
4237  *      "BOOT" (address: 30h)
4238  * UFS device's power management needs to be controlled by "POWER CONDITION"
4239  * field of SSU (START STOP UNIT) command. But this "power condition" field
4240  * will take effect only when its sent to "UFS device" well known logical unit
4241  * hence we require the scsi_device instance to represent this logical unit in
4242  * order for the UFS host driver to send the SSU command for power management.
4243
4244  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4245  * Block) LU so user space process can control this LU. User space may also
4246  * want to have access to BOOT LU.
4247
4248  * This function adds scsi device instances for each of all well known LUs
4249  * (except "REPORT LUNS" LU).
4250  *
4251  * Returns zero on success (all required W-LUs are added successfully),
4252  * non-zero error value on failure (if failed to add any of the required W-LU).
4253  */
4254 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4255 {
4256         int ret = 0;
4257         struct scsi_device *sdev_rpmb;
4258         struct scsi_device *sdev_boot;
4259
4260         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4261                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4262         if (IS_ERR(hba->sdev_ufs_device)) {
4263                 ret = PTR_ERR(hba->sdev_ufs_device);
4264                 hba->sdev_ufs_device = NULL;
4265                 goto out;
4266         }
4267         scsi_device_put(hba->sdev_ufs_device);
4268
4269         sdev_boot = __scsi_add_device(hba->host, 0, 0,
4270                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4271         if (IS_ERR(sdev_boot)) {
4272                 ret = PTR_ERR(sdev_boot);
4273                 goto remove_sdev_ufs_device;
4274         }
4275         scsi_device_put(sdev_boot);
4276
4277         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4278                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4279         if (IS_ERR(sdev_rpmb)) {
4280                 ret = PTR_ERR(sdev_rpmb);
4281                 goto remove_sdev_boot;
4282         }
4283         scsi_device_put(sdev_rpmb);
4284         goto out;
4285
4286 remove_sdev_boot:
4287         scsi_remove_device(sdev_boot);
4288 remove_sdev_ufs_device:
4289         scsi_remove_device(hba->sdev_ufs_device);
4290 out:
4291         return ret;
4292 }
4293
4294 /**
4295  * ufshcd_probe_hba - probe hba to detect device and initialize
4296  * @hba: per-adapter instance
4297  *
4298  * Execute link-startup and verify device initialization
4299  */
4300 static int ufshcd_probe_hba(struct ufs_hba *hba)
4301 {
4302         int ret;
4303
4304         ret = ufshcd_link_startup(hba);
4305         if (ret)
4306                 goto out;
4307
4308         ufshcd_init_pwr_info(hba);
4309
4310         /* UniPro link is active now */
4311         ufshcd_set_link_active(hba);
4312
4313         ret = ufshcd_verify_dev_init(hba);
4314         if (ret)
4315                 goto out;
4316
4317         ret = ufshcd_complete_dev_init(hba);
4318         if (ret)
4319                 goto out;
4320
4321         /* UFS device is also active now */
4322         ufshcd_set_ufs_dev_active(hba);
4323         ufshcd_force_reset_auto_bkops(hba);
4324         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4325         hba->wlun_dev_clr_ua = true;
4326
4327         if (ufshcd_get_max_pwr_mode(hba)) {
4328                 dev_err(hba->dev,
4329                         "%s: Failed getting max supported power mode\n",
4330                         __func__);
4331         } else {
4332                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
4333                 if (ret)
4334                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
4335                                         __func__, ret);
4336         }
4337
4338         /*
4339          * If we are in error handling context or in power management callbacks
4340          * context, no need to scan the host
4341          */
4342         if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4343                 bool flag;
4344
4345                 /* clear any previous UFS device information */
4346                 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
4347                 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4348                                        QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
4349                         hba->dev_info.f_power_on_wp_en = flag;
4350
4351                 if (!hba->is_init_prefetch)
4352                         ufshcd_init_icc_levels(hba);
4353
4354                 /* Add required well known logical units to scsi mid layer */
4355                 if (ufshcd_scsi_add_wlus(hba))
4356                         goto out;
4357
4358                 scsi_scan_host(hba->host);
4359                 pm_runtime_put_sync(hba->dev);
4360         }
4361
4362         if (!hba->is_init_prefetch)
4363                 hba->is_init_prefetch = true;
4364
4365         /* Resume devfreq after UFS device is detected */
4366         if (ufshcd_is_clkscaling_enabled(hba))
4367                 devfreq_resume_device(hba->devfreq);
4368
4369 out:
4370         /*
4371          * If we failed to initialize the device or the device is not
4372          * present, turn off the power/clocks etc.
4373          */
4374         if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4375                 pm_runtime_put_sync(hba->dev);
4376                 ufshcd_hba_exit(hba);
4377         }
4378
4379         return ret;
4380 }
4381
4382 /**
4383  * ufshcd_async_scan - asynchronous execution for probing hba
4384  * @data: data pointer to pass to this function
4385  * @cookie: cookie data
4386  */
4387 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
4388 {
4389         struct ufs_hba *hba = (struct ufs_hba *)data;
4390
4391         ufshcd_probe_hba(hba);
4392 }
4393
4394 static struct scsi_host_template ufshcd_driver_template = {
4395         .module                 = THIS_MODULE,
4396         .name                   = UFSHCD,
4397         .proc_name              = UFSHCD,
4398         .queuecommand           = ufshcd_queuecommand,
4399         .slave_alloc            = ufshcd_slave_alloc,
4400         .slave_configure        = ufshcd_slave_configure,
4401         .slave_destroy          = ufshcd_slave_destroy,
4402         .change_queue_depth     = ufshcd_change_queue_depth,
4403         .eh_abort_handler       = ufshcd_abort,
4404         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
4405         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
4406         .this_id                = -1,
4407         .sg_tablesize           = SG_ALL,
4408         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
4409         .can_queue              = UFSHCD_CAN_QUEUE,
4410         .max_host_blocked       = 1,
4411         .track_queue_depth      = 1,
4412 };
4413
4414 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4415                                    int ua)
4416 {
4417         int ret;
4418
4419         if (!vreg)
4420                 return 0;
4421
4422         /*
4423          * "set_load" operation shall be required on those regulators
4424          * which specifically configured current limitation. Otherwise
4425          * zero max_uA may cause unexpected behavior when regulator is
4426          * enabled or set as high power mode.
4427          */
4428         if (!vreg->max_uA)
4429                 return 0;
4430
4431         ret = regulator_set_load(vreg->reg, ua);
4432         if (ret < 0) {
4433                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
4434                                 __func__, vreg->name, ua, ret);
4435         }
4436
4437         return ret;
4438 }
4439
4440 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4441                                          struct ufs_vreg *vreg)
4442 {
4443         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4444 }
4445
4446 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4447                                          struct ufs_vreg *vreg)
4448 {
4449         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4450 }
4451
4452 static int ufshcd_config_vreg(struct device *dev,
4453                 struct ufs_vreg *vreg, bool on)
4454 {
4455         int ret = 0;
4456         struct regulator *reg;
4457         const char *name;
4458         int min_uV, uA_load;
4459
4460         BUG_ON(!vreg);
4461
4462         reg = vreg->reg;
4463         name = vreg->name;
4464
4465         if (regulator_count_voltages(reg) > 0) {
4466                 if (vreg->min_uV && vreg->max_uV) {
4467                         min_uV = on ? vreg->min_uV : 0;
4468                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
4469                         if (ret) {
4470                                 dev_err(dev,
4471                                         "%s: %s set voltage failed, err=%d\n",
4472                                         __func__, name, ret);
4473                                 goto out;
4474                         }
4475                 }
4476
4477                 uA_load = on ? vreg->max_uA : 0;
4478                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
4479                 if (ret)
4480                         goto out;
4481         }
4482 out:
4483         return ret;
4484 }
4485
4486 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
4487 {
4488         int ret = 0;
4489
4490         if (!vreg || vreg->enabled)
4491                 goto out;
4492
4493         ret = ufshcd_config_vreg(dev, vreg, true);
4494         if (!ret)
4495                 ret = regulator_enable(vreg->reg);
4496
4497         if (!ret)
4498                 vreg->enabled = true;
4499         else
4500                 dev_err(dev, "%s: %s enable failed, err=%d\n",
4501                                 __func__, vreg->name, ret);
4502 out:
4503         return ret;
4504 }
4505
4506 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
4507 {
4508         int ret = 0;
4509
4510         if (!vreg || !vreg->enabled)
4511                 goto out;
4512
4513         ret = regulator_disable(vreg->reg);
4514
4515         if (!ret) {
4516                 /* ignore errors on applying disable config */
4517                 ufshcd_config_vreg(dev, vreg, false);
4518                 vreg->enabled = false;
4519         } else {
4520                 dev_err(dev, "%s: %s disable failed, err=%d\n",
4521                                 __func__, vreg->name, ret);
4522         }
4523 out:
4524         return ret;
4525 }
4526
4527 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
4528 {
4529         int ret = 0;
4530         struct device *dev = hba->dev;
4531         struct ufs_vreg_info *info = &hba->vreg_info;
4532
4533         if (!info)
4534                 goto out;
4535
4536         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
4537         if (ret)
4538                 goto out;
4539
4540         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
4541         if (ret)
4542                 goto out;
4543
4544         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
4545         if (ret)
4546                 goto out;
4547
4548 out:
4549         if (ret) {
4550                 ufshcd_toggle_vreg(dev, info->vccq2, false);
4551                 ufshcd_toggle_vreg(dev, info->vccq, false);
4552                 ufshcd_toggle_vreg(dev, info->vcc, false);
4553         }
4554         return ret;
4555 }
4556
4557 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
4558 {
4559         struct ufs_vreg_info *info = &hba->vreg_info;
4560
4561         if (info)
4562                 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
4563
4564         return 0;
4565 }
4566
4567 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
4568 {
4569         int ret = 0;
4570
4571         if (!vreg)
4572                 goto out;
4573
4574         vreg->reg = devm_regulator_get(dev, vreg->name);
4575         if (IS_ERR(vreg->reg)) {
4576                 ret = PTR_ERR(vreg->reg);
4577                 dev_err(dev, "%s: %s get failed, err=%d\n",
4578                                 __func__, vreg->name, ret);
4579         }
4580 out:
4581         return ret;
4582 }
4583
4584 static int ufshcd_init_vreg(struct ufs_hba *hba)
4585 {
4586         int ret = 0;
4587         struct device *dev = hba->dev;
4588         struct ufs_vreg_info *info = &hba->vreg_info;
4589
4590         if (!info)
4591                 goto out;
4592
4593         ret = ufshcd_get_vreg(dev, info->vcc);
4594         if (ret)
4595                 goto out;
4596
4597         ret = ufshcd_get_vreg(dev, info->vccq);
4598         if (ret)
4599                 goto out;
4600
4601         ret = ufshcd_get_vreg(dev, info->vccq2);
4602 out:
4603         return ret;
4604 }
4605
4606 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
4607 {
4608         struct ufs_vreg_info *info = &hba->vreg_info;
4609
4610         if (info)
4611                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
4612
4613         return 0;
4614 }
4615
4616 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4617                                         bool skip_ref_clk)
4618 {
4619         int ret = 0;
4620         struct ufs_clk_info *clki;
4621         struct list_head *head = &hba->clk_list_head;
4622         unsigned long flags;
4623
4624         if (!head || list_empty(head))
4625                 goto out;
4626
4627         list_for_each_entry(clki, head, list) {
4628                 if (!IS_ERR_OR_NULL(clki->clk)) {
4629                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
4630                                 continue;
4631
4632                         if (on && !clki->enabled) {
4633                                 ret = clk_prepare_enable(clki->clk);
4634                                 if (ret) {
4635                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
4636                                                 __func__, clki->name, ret);
4637                                         goto out;
4638                                 }
4639                         } else if (!on && clki->enabled) {
4640                                 clk_disable_unprepare(clki->clk);
4641                         }
4642                         clki->enabled = on;
4643                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
4644                                         clki->name, on ? "en" : "dis");
4645                 }
4646         }
4647
4648         ret = ufshcd_vops_setup_clocks(hba, on);
4649 out:
4650         if (ret) {
4651                 list_for_each_entry(clki, head, list) {
4652                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4653                                 clk_disable_unprepare(clki->clk);
4654                 }
4655         } else if (on) {
4656                 spin_lock_irqsave(hba->host->host_lock, flags);
4657                 hba->clk_gating.state = CLKS_ON;
4658                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4659         }
4660         return ret;
4661 }
4662
4663 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
4664 {
4665         return  __ufshcd_setup_clocks(hba, on, false);
4666 }
4667
4668 static int ufshcd_init_clocks(struct ufs_hba *hba)
4669 {
4670         int ret = 0;
4671         struct ufs_clk_info *clki;
4672         struct device *dev = hba->dev;
4673         struct list_head *head = &hba->clk_list_head;
4674
4675         if (!head || list_empty(head))
4676                 goto out;
4677
4678         list_for_each_entry(clki, head, list) {
4679                 if (!clki->name)
4680                         continue;
4681
4682                 clki->clk = devm_clk_get(dev, clki->name);
4683                 if (IS_ERR(clki->clk)) {
4684                         ret = PTR_ERR(clki->clk);
4685                         dev_err(dev, "%s: %s clk get failed, %d\n",
4686                                         __func__, clki->name, ret);
4687                         goto out;
4688                 }
4689
4690                 if (clki->max_freq) {
4691                         ret = clk_set_rate(clki->clk, clki->max_freq);
4692                         if (ret) {
4693                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
4694                                         __func__, clki->name,
4695                                         clki->max_freq, ret);
4696                                 goto out;
4697                         }
4698                         clki->curr_freq = clki->max_freq;
4699                 }
4700                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
4701                                 clki->name, clk_get_rate(clki->clk));
4702         }
4703 out:
4704         return ret;
4705 }
4706
4707 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
4708 {
4709         int err = 0;
4710
4711         if (!hba->vops)
4712                 goto out;
4713
4714         err = ufshcd_vops_init(hba);
4715         if (err)
4716                 goto out;
4717
4718         err = ufshcd_vops_setup_regulators(hba, true);
4719         if (err)
4720                 goto out_exit;
4721
4722         goto out;
4723
4724 out_exit:
4725         ufshcd_vops_exit(hba);
4726 out:
4727         if (err)
4728                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
4729                         __func__, ufshcd_get_var_name(hba), err);
4730         return err;
4731 }
4732
4733 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
4734 {
4735         if (!hba->vops)
4736                 return;
4737
4738         ufshcd_vops_setup_clocks(hba, false);
4739
4740         ufshcd_vops_setup_regulators(hba, false);
4741
4742         ufshcd_vops_exit(hba);
4743 }
4744
4745 static int ufshcd_hba_init(struct ufs_hba *hba)
4746 {
4747         int err;
4748
4749         /*
4750          * Handle host controller power separately from the UFS device power
4751          * rails as it will help controlling the UFS host controller power
4752          * collapse easily which is different than UFS device power collapse.
4753          * Also, enable the host controller power before we go ahead with rest
4754          * of the initialization here.
4755          */
4756         err = ufshcd_init_hba_vreg(hba);
4757         if (err)
4758                 goto out;
4759
4760         err = ufshcd_setup_hba_vreg(hba, true);
4761         if (err)
4762                 goto out;
4763
4764         err = ufshcd_init_clocks(hba);
4765         if (err)
4766                 goto out_disable_hba_vreg;
4767
4768         err = ufshcd_setup_clocks(hba, true);
4769         if (err)
4770                 goto out_disable_hba_vreg;
4771
4772         err = ufshcd_init_vreg(hba);
4773         if (err)
4774                 goto out_disable_clks;
4775
4776         err = ufshcd_setup_vreg(hba, true);
4777         if (err)
4778                 goto out_disable_clks;
4779
4780         err = ufshcd_variant_hba_init(hba);
4781         if (err)
4782                 goto out_disable_vreg;
4783
4784         hba->is_powered = true;
4785         goto out;
4786
4787 out_disable_vreg:
4788         ufshcd_setup_vreg(hba, false);
4789 out_disable_clks:
4790         ufshcd_setup_clocks(hba, false);
4791 out_disable_hba_vreg:
4792         ufshcd_setup_hba_vreg(hba, false);
4793 out:
4794         return err;
4795 }
4796
4797 static void ufshcd_hba_exit(struct ufs_hba *hba)
4798 {
4799         if (hba->is_powered) {
4800                 ufshcd_variant_hba_exit(hba);
4801                 ufshcd_setup_vreg(hba, false);
4802                 ufshcd_setup_clocks(hba, false);
4803                 ufshcd_setup_hba_vreg(hba, false);
4804                 hba->is_powered = false;
4805         }
4806 }
4807
4808 static int
4809 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
4810 {
4811         unsigned char cmd[6] = {REQUEST_SENSE,
4812                                 0,
4813                                 0,
4814                                 0,
4815                                 SCSI_SENSE_BUFFERSIZE,
4816                                 0};
4817         char *buffer;
4818         int ret;
4819
4820         buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4821         if (!buffer) {
4822                 ret = -ENOMEM;
4823                 goto out;
4824         }
4825
4826         ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
4827                                 SCSI_SENSE_BUFFERSIZE, NULL,
4828                                 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
4829         if (ret)
4830                 pr_err("%s: failed with err %d\n", __func__, ret);
4831
4832         kfree(buffer);
4833 out:
4834         return ret;
4835 }
4836
4837 /**
4838  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
4839  *                           power mode
4840  * @hba: per adapter instance
4841  * @pwr_mode: device power mode to set
4842  *
4843  * Returns 0 if requested power mode is set successfully
4844  * Returns non-zero if failed to set the requested power mode
4845  */
4846 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4847                                      enum ufs_dev_pwr_mode pwr_mode)
4848 {
4849         unsigned char cmd[6] = { START_STOP };
4850         struct scsi_sense_hdr sshdr;
4851         struct scsi_device *sdp;
4852         unsigned long flags;
4853         int ret;
4854
4855         spin_lock_irqsave(hba->host->host_lock, flags);
4856         sdp = hba->sdev_ufs_device;
4857         if (sdp) {
4858                 ret = scsi_device_get(sdp);
4859                 if (!ret && !scsi_device_online(sdp)) {
4860                         ret = -ENODEV;
4861                         scsi_device_put(sdp);
4862                 }
4863         } else {
4864                 ret = -ENODEV;
4865         }
4866         spin_unlock_irqrestore(hba->host->host_lock, flags);
4867
4868         if (ret)
4869                 return ret;
4870
4871         /*
4872          * If scsi commands fail, the scsi mid-layer schedules scsi error-
4873          * handling, which would wait for host to be resumed. Since we know
4874          * we are functional while we are here, skip host resume in error
4875          * handling context.
4876          */
4877         hba->host->eh_noresume = 1;
4878         if (hba->wlun_dev_clr_ua) {
4879                 ret = ufshcd_send_request_sense(hba, sdp);
4880                 if (ret)
4881                         goto out;
4882                 /* Unit attention condition is cleared now */
4883                 hba->wlun_dev_clr_ua = false;
4884         }
4885
4886         cmd[4] = pwr_mode << 4;
4887
4888         /*
4889          * Current function would be generally called from the power management
4890          * callbacks hence set the REQ_PM flag so that it doesn't resume the
4891          * already suspended childs.
4892          */
4893         ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
4894                                      START_STOP_TIMEOUT, 0, NULL, REQ_PM);
4895         if (ret) {
4896                 sdev_printk(KERN_WARNING, sdp,
4897                             "START_STOP failed for power mode: %d, result %x\n",
4898                             pwr_mode, ret);
4899                 if (driver_byte(ret) & DRIVER_SENSE)
4900                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
4901         }
4902
4903         if (!ret)
4904                 hba->curr_dev_pwr_mode = pwr_mode;
4905 out:
4906         scsi_device_put(sdp);
4907         hba->host->eh_noresume = 0;
4908         return ret;
4909 }
4910
4911 static int ufshcd_link_state_transition(struct ufs_hba *hba,
4912                                         enum uic_link_state req_link_state,
4913                                         int check_for_bkops)
4914 {
4915         int ret = 0;
4916
4917         if (req_link_state == hba->uic_link_state)
4918                 return 0;
4919
4920         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
4921                 ret = ufshcd_uic_hibern8_enter(hba);
4922                 if (!ret)
4923                         ufshcd_set_link_hibern8(hba);
4924                 else
4925                         goto out;
4926         }
4927         /*
4928          * If autobkops is enabled, link can't be turned off because
4929          * turning off the link would also turn off the device.
4930          */
4931         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
4932                    (!check_for_bkops || (check_for_bkops &&
4933                     !hba->auto_bkops_enabled))) {
4934                 /*
4935                  * Change controller state to "reset state" which
4936                  * should also put the link in off/reset state
4937                  */
4938                 ufshcd_hba_stop(hba);
4939                 /*
4940                  * TODO: Check if we need any delay to make sure that
4941                  * controller is reset
4942                  */
4943                 ufshcd_set_link_off(hba);
4944         }
4945
4946 out:
4947         return ret;
4948 }
4949
4950 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
4951 {
4952         /*
4953          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
4954          * power.
4955          *
4956          * If UFS device and link is in OFF state, all power supplies (VCC,
4957          * VCCQ, VCCQ2) can be turned off if power on write protect is not
4958          * required. If UFS link is inactive (Hibern8 or OFF state) and device
4959          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
4960          *
4961          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
4962          * in low power state which would save some power.
4963          */
4964         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4965             !hba->dev_info.is_lu_power_on_wp) {
4966                 ufshcd_setup_vreg(hba, false);
4967         } else if (!ufshcd_is_ufs_dev_active(hba)) {
4968                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4969                 if (!ufshcd_is_link_active(hba)) {
4970                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4971                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
4972                 }
4973         }
4974 }
4975
4976 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
4977 {
4978         int ret = 0;
4979
4980         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4981             !hba->dev_info.is_lu_power_on_wp) {
4982                 ret = ufshcd_setup_vreg(hba, true);
4983         } else if (!ufshcd_is_ufs_dev_active(hba)) {
4984                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
4985                 if (!ret && !ufshcd_is_link_active(hba)) {
4986                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
4987                         if (ret)
4988                                 goto vcc_disable;
4989                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
4990                         if (ret)
4991                                 goto vccq_lpm;
4992                 }
4993         }
4994         goto out;
4995
4996 vccq_lpm:
4997         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4998 vcc_disable:
4999         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5000 out:
5001         return ret;
5002 }
5003
5004 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
5005 {
5006         if (ufshcd_is_link_off(hba))
5007                 ufshcd_setup_hba_vreg(hba, false);
5008 }
5009
5010 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
5011 {
5012         if (ufshcd_is_link_off(hba))
5013                 ufshcd_setup_hba_vreg(hba, true);
5014 }
5015
5016 /**
5017  * ufshcd_suspend - helper function for suspend operations
5018  * @hba: per adapter instance
5019  * @pm_op: desired low power operation type
5020  *
5021  * This function will try to put the UFS device and link into low power
5022  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
5023  * (System PM level).
5024  *
5025  * If this function is called during shutdown, it will make sure that
5026  * both UFS device and UFS link is powered off.
5027  *
5028  * NOTE: UFS device & link must be active before we enter in this function.
5029  *
5030  * Returns 0 for success and non-zero for failure
5031  */
5032 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5033 {
5034         int ret = 0;
5035         enum ufs_pm_level pm_lvl;
5036         enum ufs_dev_pwr_mode req_dev_pwr_mode;
5037         enum uic_link_state req_link_state;
5038
5039         hba->pm_op_in_progress = 1;
5040         if (!ufshcd_is_shutdown_pm(pm_op)) {
5041                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
5042                          hba->rpm_lvl : hba->spm_lvl;
5043                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
5044                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
5045         } else {
5046                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
5047                 req_link_state = UIC_LINK_OFF_STATE;
5048         }
5049
5050         /*
5051          * If we can't transition into any of the low power modes
5052          * just gate the clocks.
5053          */
5054         ufshcd_hold(hba, false);
5055         hba->clk_gating.is_suspended = true;
5056
5057         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
5058                         req_link_state == UIC_LINK_ACTIVE_STATE) {
5059                 goto disable_clks;
5060         }
5061
5062         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
5063             (req_link_state == hba->uic_link_state))
5064                 goto out;
5065
5066         /* UFS device & link must be active before we enter in this function */
5067         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
5068                 ret = -EINVAL;
5069                 goto out;
5070         }
5071
5072         if (ufshcd_is_runtime_pm(pm_op)) {
5073                 if (ufshcd_can_autobkops_during_suspend(hba)) {
5074                         /*
5075                          * The device is idle with no requests in the queue,
5076                          * allow background operations if bkops status shows
5077                          * that performance might be impacted.
5078                          */
5079                         ret = ufshcd_urgent_bkops(hba);
5080                         if (ret)
5081                                 goto enable_gating;
5082                 } else {
5083                         /* make sure that auto bkops is disabled */
5084                         ufshcd_disable_auto_bkops(hba);
5085                 }
5086         }
5087
5088         if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
5089              ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
5090                !ufshcd_is_runtime_pm(pm_op))) {
5091                 /* ensure that bkops is disabled */
5092                 ufshcd_disable_auto_bkops(hba);
5093                 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
5094                 if (ret)
5095                         goto enable_gating;
5096         }
5097
5098         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
5099         if (ret)
5100                 goto set_dev_active;
5101
5102         ufshcd_vreg_set_lpm(hba);
5103
5104 disable_clks:
5105         /*
5106          * The clock scaling needs access to controller registers. Hence, Wait
5107          * for pending clock scaling work to be done before clocks are
5108          * turned off.
5109          */
5110         if (ufshcd_is_clkscaling_enabled(hba)) {
5111                 devfreq_suspend_device(hba->devfreq);
5112                 hba->clk_scaling.window_start_t = 0;
5113         }
5114         /*
5115          * Call vendor specific suspend callback. As these callbacks may access
5116          * vendor specific host controller register space call them before the
5117          * host clocks are ON.
5118          */
5119         ret = ufshcd_vops_suspend(hba, pm_op);
5120         if (ret)
5121                 goto set_link_active;
5122
5123         ret = ufshcd_vops_setup_clocks(hba, false);
5124         if (ret)
5125                 goto vops_resume;
5126
5127         if (!ufshcd_is_link_active(hba))
5128                 ufshcd_setup_clocks(hba, false);
5129         else
5130                 /* If link is active, device ref_clk can't be switched off */
5131                 __ufshcd_setup_clocks(hba, false, true);
5132
5133         hba->clk_gating.state = CLKS_OFF;
5134         /*
5135          * Disable the host irq as host controller as there won't be any
5136          * host controller transaction expected till resume.
5137          */
5138         ufshcd_disable_irq(hba);
5139         /* Put the host controller in low power mode if possible */
5140         ufshcd_hba_vreg_set_lpm(hba);
5141         goto out;
5142
5143 vops_resume:
5144         ufshcd_vops_resume(hba, pm_op);
5145 set_link_active:
5146         ufshcd_vreg_set_hpm(hba);
5147         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5148                 ufshcd_set_link_active(hba);
5149         else if (ufshcd_is_link_off(hba))
5150                 ufshcd_host_reset_and_restore(hba);
5151 set_dev_active:
5152         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5153                 ufshcd_disable_auto_bkops(hba);
5154 enable_gating:
5155         hba->clk_gating.is_suspended = false;
5156         ufshcd_release(hba);
5157 out:
5158         hba->pm_op_in_progress = 0;
5159         return ret;
5160 }
5161
5162 /**
5163  * ufshcd_resume - helper function for resume operations
5164  * @hba: per adapter instance
5165  * @pm_op: runtime PM or system PM
5166  *
5167  * This function basically brings the UFS device, UniPro link and controller
5168  * to active state.
5169  *
5170  * Returns 0 for success and non-zero for failure
5171  */
5172 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5173 {
5174         int ret;
5175         enum uic_link_state old_link_state;
5176
5177         hba->pm_op_in_progress = 1;
5178         old_link_state = hba->uic_link_state;
5179
5180         ufshcd_hba_vreg_set_hpm(hba);
5181         /* Make sure clocks are enabled before accessing controller */
5182         ret = ufshcd_setup_clocks(hba, true);
5183         if (ret)
5184                 goto out;
5185
5186         /* enable the host irq as host controller would be active soon */
5187         ret = ufshcd_enable_irq(hba);
5188         if (ret)
5189                 goto disable_irq_and_vops_clks;
5190
5191         ret = ufshcd_vreg_set_hpm(hba);
5192         if (ret)
5193                 goto disable_irq_and_vops_clks;
5194
5195         /*
5196          * Call vendor specific resume callback. As these callbacks may access
5197          * vendor specific host controller register space call them when the
5198          * host clocks are ON.
5199          */
5200         ret = ufshcd_vops_resume(hba, pm_op);
5201         if (ret)
5202                 goto disable_vreg;
5203
5204         if (ufshcd_is_link_hibern8(hba)) {
5205                 ret = ufshcd_uic_hibern8_exit(hba);
5206                 if (!ret)
5207                         ufshcd_set_link_active(hba);
5208                 else
5209                         goto vendor_suspend;
5210         } else if (ufshcd_is_link_off(hba)) {
5211                 ret = ufshcd_host_reset_and_restore(hba);
5212                 /*
5213                  * ufshcd_host_reset_and_restore() should have already
5214                  * set the link state as active
5215                  */
5216                 if (ret || !ufshcd_is_link_active(hba))
5217                         goto vendor_suspend;
5218         }
5219
5220         if (!ufshcd_is_ufs_dev_active(hba)) {
5221                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
5222                 if (ret)
5223                         goto set_old_link_state;
5224         }
5225
5226         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
5227                 ufshcd_enable_auto_bkops(hba);
5228         else
5229                 /*
5230                  * If BKOPs operations are urgently needed at this moment then
5231                  * keep auto-bkops enabled or else disable it.
5232                  */
5233                 ufshcd_urgent_bkops(hba);
5234
5235         hba->clk_gating.is_suspended = false;
5236
5237         if (ufshcd_is_clkscaling_enabled(hba))
5238                 devfreq_resume_device(hba->devfreq);
5239
5240         /* Schedule clock gating in case of no access to UFS device yet */
5241         ufshcd_release(hba);
5242         goto out;
5243
5244 set_old_link_state:
5245         ufshcd_link_state_transition(hba, old_link_state, 0);
5246 vendor_suspend:
5247         ufshcd_vops_suspend(hba, pm_op);
5248 disable_vreg:
5249         ufshcd_vreg_set_lpm(hba);
5250 disable_irq_and_vops_clks:
5251         ufshcd_disable_irq(hba);
5252         ufshcd_setup_clocks(hba, false);
5253 out:
5254         hba->pm_op_in_progress = 0;
5255         return ret;
5256 }
5257
5258 /**
5259  * ufshcd_system_suspend - system suspend routine
5260  * @hba: per adapter instance
5261  * @pm_op: runtime PM or system PM
5262  *
5263  * Check the description of ufshcd_suspend() function for more details.
5264  *
5265  * Returns 0 for success and non-zero for failure
5266  */
5267 int ufshcd_system_suspend(struct ufs_hba *hba)
5268 {
5269         int ret = 0;
5270
5271         if (!hba || !hba->is_powered)
5272                 return 0;
5273
5274         if (pm_runtime_suspended(hba->dev)) {
5275                 if (hba->rpm_lvl == hba->spm_lvl)
5276                         /*
5277                          * There is possibility that device may still be in
5278                          * active state during the runtime suspend.
5279                          */
5280                         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
5281                             hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
5282                                 goto out;
5283
5284                 /*
5285                  * UFS device and/or UFS link low power states during runtime
5286                  * suspend seems to be different than what is expected during
5287                  * system suspend. Hence runtime resume the devic & link and
5288                  * let the system suspend low power states to take effect.
5289                  * TODO: If resume takes longer time, we might have optimize
5290                  * it in future by not resuming everything if possible.
5291                  */
5292                 ret = ufshcd_runtime_resume(hba);
5293                 if (ret)
5294                         goto out;
5295         }
5296
5297         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
5298 out:
5299         if (!ret)
5300                 hba->is_sys_suspended = true;
5301         return ret;
5302 }
5303 EXPORT_SYMBOL(ufshcd_system_suspend);
5304
5305 /**
5306  * ufshcd_system_resume - system resume routine
5307  * @hba: per adapter instance
5308  *
5309  * Returns 0 for success and non-zero for failure
5310  */
5311
5312 int ufshcd_system_resume(struct ufs_hba *hba)
5313 {
5314         if (!hba)
5315                 return -EINVAL;
5316
5317         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
5318                 /*
5319                  * Let the runtime resume take care of resuming
5320                  * if runtime suspended.
5321                  */
5322                 return 0;
5323
5324         return ufshcd_resume(hba, UFS_SYSTEM_PM);
5325 }
5326 EXPORT_SYMBOL(ufshcd_system_resume);
5327
5328 /**
5329  * ufshcd_runtime_suspend - runtime suspend routine
5330  * @hba: per adapter instance
5331  *
5332  * Check the description of ufshcd_suspend() function for more details.
5333  *
5334  * Returns 0 for success and non-zero for failure
5335  */
5336 int ufshcd_runtime_suspend(struct ufs_hba *hba)
5337 {
5338         if (!hba)
5339                 return -EINVAL;
5340
5341         if (!hba->is_powered)
5342                 return 0;
5343
5344         return ufshcd_suspend(hba, UFS_RUNTIME_PM);
5345 }
5346 EXPORT_SYMBOL(ufshcd_runtime_suspend);
5347
5348 /**
5349  * ufshcd_runtime_resume - runtime resume routine
5350  * @hba: per adapter instance
5351  *
5352  * This function basically brings the UFS device, UniPro link and controller
5353  * to active state. Following operations are done in this function:
5354  *
5355  * 1. Turn on all the controller related clocks
5356  * 2. Bring the UniPro link out of Hibernate state
5357  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
5358  *    to active state.
5359  * 4. If auto-bkops is enabled on the device, disable it.
5360  *
5361  * So following would be the possible power state after this function return
5362  * successfully:
5363  *      S1: UFS device in Active state with VCC rail ON
5364  *          UniPro link in Active state
5365  *          All the UFS/UniPro controller clocks are ON
5366  *
5367  * Returns 0 for success and non-zero for failure
5368  */
5369 int ufshcd_runtime_resume(struct ufs_hba *hba)
5370 {
5371         if (!hba)
5372                 return -EINVAL;
5373
5374         if (!hba->is_powered)
5375                 return 0;
5376
5377         return ufshcd_resume(hba, UFS_RUNTIME_PM);
5378 }
5379 EXPORT_SYMBOL(ufshcd_runtime_resume);
5380
5381 int ufshcd_runtime_idle(struct ufs_hba *hba)
5382 {
5383         return 0;
5384 }
5385 EXPORT_SYMBOL(ufshcd_runtime_idle);
5386
5387 /**
5388  * ufshcd_shutdown - shutdown routine
5389  * @hba: per adapter instance
5390  *
5391  * This function would power off both UFS device and UFS link.
5392  *
5393  * Returns 0 always to allow force shutdown even in case of errors.
5394  */
5395 int ufshcd_shutdown(struct ufs_hba *hba)
5396 {
5397         int ret = 0;
5398
5399         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
5400                 goto out;
5401
5402         if (pm_runtime_suspended(hba->dev)) {
5403                 ret = ufshcd_runtime_resume(hba);
5404                 if (ret)
5405                         goto out;
5406         }
5407
5408         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
5409 out:
5410         if (ret)
5411                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
5412         /* allow force shutdown even in case of errors */
5413         return 0;
5414 }
5415 EXPORT_SYMBOL(ufshcd_shutdown);
5416
5417 /*
5418  * Values permitted 0, 1, 2.
5419  * 0 -> Disable IO latency histograms (default)
5420  * 1 -> Enable IO latency histograms
5421  * 2 -> Zero out IO latency histograms
5422  */
5423 static ssize_t
5424 latency_hist_store(struct device *dev, struct device_attribute *attr,
5425                    const char *buf, size_t count)
5426 {
5427         struct ufs_hba *hba = dev_get_drvdata(dev);
5428         long value;
5429
5430         if (kstrtol(buf, 0, &value))
5431                 return -EINVAL;
5432         if (value == BLK_IO_LAT_HIST_ZERO) {
5433                 memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
5434                 memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
5435         } else if (value == BLK_IO_LAT_HIST_ENABLE ||
5436                  value == BLK_IO_LAT_HIST_DISABLE)
5437                 hba->latency_hist_enabled = value;
5438         return count;
5439 }
5440
5441 ssize_t
5442 latency_hist_show(struct device *dev, struct device_attribute *attr,
5443                   char *buf)
5444 {
5445         struct ufs_hba *hba = dev_get_drvdata(dev);
5446         size_t written_bytes;
5447
5448         written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
5449                         buf, PAGE_SIZE);
5450         written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
5451                         buf + written_bytes, PAGE_SIZE - written_bytes);
5452
5453         return written_bytes;
5454 }
5455
5456 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
5457                    latency_hist_show, latency_hist_store);
5458
5459 static void
5460 ufshcd_init_latency_hist(struct ufs_hba *hba)
5461 {
5462         if (device_create_file(hba->dev, &dev_attr_latency_hist))
5463                 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
5464 }
5465
5466 static void
5467 ufshcd_exit_latency_hist(struct ufs_hba *hba)
5468 {
5469         device_create_file(hba->dev, &dev_attr_latency_hist);
5470 }
5471
5472 /**
5473  * ufshcd_remove - de-allocate SCSI host and host memory space
5474  *              data structure memory
5475  * @hba - per adapter instance
5476  */
5477 void ufshcd_remove(struct ufs_hba *hba)
5478 {
5479         scsi_remove_host(hba->host);
5480         /* disable interrupts */
5481         ufshcd_disable_intr(hba, hba->intr_mask);
5482         ufshcd_hba_stop(hba);
5483
5484         ufshcd_exit_clk_gating(hba);
5485         ufshcd_exit_latency_hist(hba);
5486         if (ufshcd_is_clkscaling_enabled(hba))
5487                 devfreq_remove_device(hba->devfreq);
5488         ufshcd_hba_exit(hba);
5489 }
5490 EXPORT_SYMBOL_GPL(ufshcd_remove);
5491
5492 /**
5493  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
5494  * @hba: pointer to Host Bus Adapter (HBA)
5495  */
5496 void ufshcd_dealloc_host(struct ufs_hba *hba)
5497 {
5498         scsi_host_put(hba->host);
5499 }
5500 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
5501
5502 /**
5503  * ufshcd_set_dma_mask - Set dma mask based on the controller
5504  *                       addressing capability
5505  * @hba: per adapter instance
5506  *
5507  * Returns 0 for success, non-zero for failure
5508  */
5509 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
5510 {
5511         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
5512                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
5513                         return 0;
5514         }
5515         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
5516 }
5517
5518 /**
5519  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
5520  * @dev: pointer to device handle
5521  * @hba_handle: driver private handle
5522  * Returns 0 on success, non-zero value on failure
5523  */
5524 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
5525 {
5526         struct Scsi_Host *host;
5527         struct ufs_hba *hba;
5528         int err = 0;
5529
5530         if (!dev) {
5531                 dev_err(dev,
5532                 "Invalid memory reference for dev is NULL\n");
5533                 err = -ENODEV;
5534                 goto out_error;
5535         }
5536
5537         host = scsi_host_alloc(&ufshcd_driver_template,
5538                                 sizeof(struct ufs_hba));
5539         if (!host) {
5540                 dev_err(dev, "scsi_host_alloc failed\n");
5541                 err = -ENOMEM;
5542                 goto out_error;
5543         }
5544         hba = shost_priv(host);
5545         hba->host = host;
5546         hba->dev = dev;
5547         *hba_handle = hba;
5548
5549 out_error:
5550         return err;
5551 }
5552 EXPORT_SYMBOL(ufshcd_alloc_host);
5553
5554 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5555 {
5556         int ret = 0;
5557         struct ufs_clk_info *clki;
5558         struct list_head *head = &hba->clk_list_head;
5559
5560         if (!head || list_empty(head))
5561                 goto out;
5562
5563         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
5564         if (ret)
5565                 return ret;
5566
5567         list_for_each_entry(clki, head, list) {
5568                 if (!IS_ERR_OR_NULL(clki->clk)) {
5569                         if (scale_up && clki->max_freq) {
5570                                 if (clki->curr_freq == clki->max_freq)
5571                                         continue;
5572                                 ret = clk_set_rate(clki->clk, clki->max_freq);
5573                                 if (ret) {
5574                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5575                                                 __func__, clki->name,
5576                                                 clki->max_freq, ret);
5577                                         break;
5578                                 }
5579                                 clki->curr_freq = clki->max_freq;
5580
5581                         } else if (!scale_up && clki->min_freq) {
5582                                 if (clki->curr_freq == clki->min_freq)
5583                                         continue;
5584                                 ret = clk_set_rate(clki->clk, clki->min_freq);
5585                                 if (ret) {
5586                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5587                                                 __func__, clki->name,
5588                                                 clki->min_freq, ret);
5589                                         break;
5590                                 }
5591                                 clki->curr_freq = clki->min_freq;
5592                         }
5593                 }
5594                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
5595                                 clki->name, clk_get_rate(clki->clk));
5596         }
5597
5598         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
5599
5600 out:
5601         return ret;
5602 }
5603
5604 static int ufshcd_devfreq_target(struct device *dev,
5605                                 unsigned long *freq, u32 flags)
5606 {
5607         int err = 0;
5608         struct ufs_hba *hba = dev_get_drvdata(dev);
5609         bool release_clk_hold = false;
5610         unsigned long irq_flags;
5611
5612         if (!ufshcd_is_clkscaling_enabled(hba))
5613                 return -EINVAL;
5614
5615         spin_lock_irqsave(hba->host->host_lock, irq_flags);
5616         if (ufshcd_eh_in_progress(hba)) {
5617                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
5618                 return 0;
5619         }
5620
5621         if (ufshcd_is_clkgating_allowed(hba) &&
5622             (hba->clk_gating.state != CLKS_ON)) {
5623                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
5624                         /* hold the vote until the scaling work is completed */
5625                         hba->clk_gating.active_reqs++;
5626                         release_clk_hold = true;
5627                         hba->clk_gating.state = CLKS_ON;
5628                 } else {
5629                         /*
5630                          * Clock gating work seems to be running in parallel
5631                          * hence skip scaling work to avoid deadlock between
5632                          * current scaling work and gating work.
5633                          */
5634                         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
5635                         return 0;
5636                 }
5637         }
5638         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
5639
5640         if (*freq == UINT_MAX)
5641                 err = ufshcd_scale_clks(hba, true);
5642         else if (*freq == 0)
5643                 err = ufshcd_scale_clks(hba, false);
5644
5645         spin_lock_irqsave(hba->host->host_lock, irq_flags);
5646         if (release_clk_hold)
5647                 __ufshcd_release(hba);
5648         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
5649
5650         return err;
5651 }
5652
5653 static int ufshcd_devfreq_get_dev_status(struct device *dev,
5654                 struct devfreq_dev_status *stat)
5655 {
5656         struct ufs_hba *hba = dev_get_drvdata(dev);
5657         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
5658         unsigned long flags;
5659
5660         if (!ufshcd_is_clkscaling_enabled(hba))
5661                 return -EINVAL;
5662
5663         memset(stat, 0, sizeof(*stat));
5664
5665         spin_lock_irqsave(hba->host->host_lock, flags);
5666         if (!scaling->window_start_t)
5667                 goto start_window;
5668
5669         if (scaling->is_busy_started)
5670                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
5671                                         scaling->busy_start_t));
5672
5673         stat->total_time = jiffies_to_usecs((long)jiffies -
5674                                 (long)scaling->window_start_t);
5675         stat->busy_time = scaling->tot_busy_t;
5676 start_window:
5677         scaling->window_start_t = jiffies;
5678         scaling->tot_busy_t = 0;
5679
5680         if (hba->outstanding_reqs) {
5681                 scaling->busy_start_t = ktime_get();
5682                 scaling->is_busy_started = true;
5683         } else {
5684                 scaling->busy_start_t = ktime_set(0, 0);
5685                 scaling->is_busy_started = false;
5686         }
5687         spin_unlock_irqrestore(hba->host->host_lock, flags);
5688         return 0;
5689 }
5690
5691 static struct devfreq_dev_profile ufs_devfreq_profile = {
5692         .polling_ms     = 100,
5693         .target         = ufshcd_devfreq_target,
5694         .get_dev_status = ufshcd_devfreq_get_dev_status,
5695 };
5696
5697 /**
5698  * ufshcd_init - Driver initialization routine
5699  * @hba: per-adapter instance
5700  * @mmio_base: base register address
5701  * @irq: Interrupt line of device
5702  * Returns 0 on success, non-zero value on failure
5703  */
5704 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5705 {
5706         int err;
5707         struct Scsi_Host *host = hba->host;
5708         struct device *dev = hba->dev;
5709
5710         if (!mmio_base) {
5711                 dev_err(hba->dev,
5712                 "Invalid memory reference for mmio_base is NULL\n");
5713                 err = -ENODEV;
5714                 goto out_error;
5715         }
5716
5717         hba->mmio_base = mmio_base;
5718         hba->irq = irq;
5719
5720         err = ufshcd_hba_init(hba);
5721         if (err)
5722                 goto out_error;
5723
5724         /* Read capabilities registers */
5725         ufshcd_hba_capabilities(hba);
5726
5727         /* Get UFS version supported by the controller */
5728         hba->ufs_version = ufshcd_get_ufs_version(hba);
5729
5730         /* Get Interrupt bit mask per version */
5731         hba->intr_mask = ufshcd_get_intr_mask(hba);
5732
5733         err = ufshcd_set_dma_mask(hba);
5734         if (err) {
5735                 dev_err(hba->dev, "set dma mask failed\n");
5736                 goto out_disable;
5737         }
5738
5739         /* Allocate memory for host memory space */
5740         err = ufshcd_memory_alloc(hba);
5741         if (err) {
5742                 dev_err(hba->dev, "Memory allocation failed\n");
5743                 goto out_disable;
5744         }
5745
5746         /* Configure LRB */
5747         ufshcd_host_memory_configure(hba);
5748
5749         host->can_queue = hba->nutrs;
5750         host->cmd_per_lun = hba->nutrs;
5751         host->max_id = UFSHCD_MAX_ID;
5752         host->max_lun = UFS_MAX_LUNS;
5753         host->max_channel = UFSHCD_MAX_CHANNEL;
5754         host->unique_id = host->host_no;
5755         host->max_cmd_len = MAX_CDB_SIZE;
5756
5757         hba->max_pwr_info.is_valid = false;
5758
5759         /* Initailize wait queue for task management */
5760         init_waitqueue_head(&hba->tm_wq);
5761         init_waitqueue_head(&hba->tm_tag_wq);
5762
5763         /* Initialize work queues */
5764         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
5765         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
5766
5767         /* Initialize UIC command mutex */
5768         mutex_init(&hba->uic_cmd_mutex);
5769
5770         /* Initialize mutex for device management commands */
5771         mutex_init(&hba->dev_cmd.lock);
5772
5773         /* Initialize device management tag acquire wait queue */
5774         init_waitqueue_head(&hba->dev_cmd.tag_wq);
5775
5776         ufshcd_init_clk_gating(hba);
5777         /* IRQ registration */
5778         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
5779         if (err) {
5780                 dev_err(hba->dev, "request irq failed\n");
5781                 goto exit_gating;
5782         } else {
5783                 hba->is_irq_enabled = true;
5784         }
5785
5786         err = scsi_add_host(host, hba->dev);
5787         if (err) {
5788                 dev_err(hba->dev, "scsi_add_host failed\n");
5789                 goto exit_gating;
5790         }
5791
5792         /* Host controller enable */
5793         err = ufshcd_hba_enable(hba);
5794         if (err) {
5795                 dev_err(hba->dev, "Host controller enable failed\n");
5796                 goto out_remove_scsi_host;
5797         }
5798
5799         if (ufshcd_is_clkscaling_enabled(hba)) {
5800                 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
5801                                                    "simple_ondemand", NULL);
5802                 if (IS_ERR(hba->devfreq)) {
5803                         dev_err(hba->dev, "Unable to register with devfreq %ld\n",
5804                                         PTR_ERR(hba->devfreq));
5805                         goto out_remove_scsi_host;
5806                 }
5807                 /* Suspend devfreq until the UFS device is detected */
5808                 devfreq_suspend_device(hba->devfreq);
5809                 hba->clk_scaling.window_start_t = 0;
5810         }
5811
5812         /* Hold auto suspend until async scan completes */
5813         pm_runtime_get_sync(dev);
5814
5815         ufshcd_init_latency_hist(hba);
5816
5817         /*
5818          * The device-initialize-sequence hasn't been invoked yet.
5819          * Set the device to power-off state
5820          */
5821         ufshcd_set_ufs_dev_poweroff(hba);
5822
5823         async_schedule(ufshcd_async_scan, hba);
5824
5825         return 0;
5826
5827 out_remove_scsi_host:
5828         scsi_remove_host(hba->host);
5829 exit_gating:
5830         ufshcd_exit_clk_gating(hba);
5831         ufshcd_exit_latency_hist(hba);
5832 out_disable:
5833         hba->is_irq_enabled = false;
5834         ufshcd_hba_exit(hba);
5835 out_error:
5836         return err;
5837 }
5838 EXPORT_SYMBOL_GPL(ufshcd_init);
5839
5840 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
5841 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
5842 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
5843 MODULE_LICENSE("GPL");
5844 MODULE_VERSION(UFSHCD_DRIVER_VERSION);