1 /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spi/spi.h>
20 #include <linux/regmap.h>
21 #include <linux/component.h>
22 #include <linux/ratelimit.h>
23 #include <sound/wcd-dsp-mgr.h>
24 #include <sound/wcd-spi.h>
25 #include "wcd-spi-registers.h"
27 /* Byte manipulations */
28 #define SHIFT_1_BYTES (8)
29 #define SHIFT_2_BYTES (16)
30 #define SHIFT_3_BYTES (24)
33 #define WCD_SPI_CMD_NOP (0x00)
34 #define WCD_SPI_CMD_WREN (0x06)
35 #define WCD_SPI_CMD_CLKREQ (0xDA)
36 #define WCD_SPI_CMD_RDSR (0x05)
37 #define WCD_SPI_CMD_IRR (0x81)
38 #define WCD_SPI_CMD_IRW (0x82)
39 #define WCD_SPI_CMD_MIOR (0x83)
40 #define WCD_SPI_CMD_FREAD (0x0B)
41 #define WCD_SPI_CMD_MIOW (0x02)
42 #define WCD_SPI_WRITE_FRAME_OPCODE \
43 (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
44 #define WCD_SPI_READ_FRAME_OPCODE \
45 (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
46 #define WCD_SPI_FREAD_FRAME_OPCODE \
47 (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
50 #define WCD_SPI_OPCODE_LEN (0x01)
51 #define WCD_SPI_CMD_NOP_LEN (0x01)
52 #define WCD_SPI_CMD_WREN_LEN (0x01)
53 #define WCD_SPI_CMD_CLKREQ_LEN (0x04)
54 #define WCD_SPI_CMD_IRR_LEN (0x04)
55 #define WCD_SPI_CMD_IRW_LEN (0x06)
56 #define WCD_SPI_WRITE_SINGLE_LEN (0x08)
57 #define WCD_SPI_READ_SINGLE_LEN (0x13)
58 #define WCD_SPI_CMD_FREAD_LEN (0x13)
61 #define WCD_SPI_CLKREQ_DELAY_USECS (500)
62 #define WCD_SPI_CLK_OFF_TIMER_MS (500)
63 #define WCD_SPI_RESUME_TIMEOUT_MS 100
66 #define WCD_CMD_ADDR_MASK \
68 (0xFF << SHIFT_1_BYTES) | \
69 (0xFF << SHIFT_2_BYTES))
71 /* Clock ctrl request related */
72 #define WCD_SPI_CLK_ENABLE true
73 #define WCD_SPI_CLK_DISABLE false
74 #define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
75 #define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
77 /* Internal addresses */
78 #define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
80 /* Word sizes and min/max lengths */
81 #define WCD_SPI_WORD_BYTE_CNT (4)
82 #define WCD_SPI_RW_MULTI_MIN_LEN (16)
84 /* Max size is 32 bytes less than 64Kbytes */
85 #define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
88 * Max size for the pre-allocated buffers is the max
89 * possible read/write length + 32 bytes for the SPI
90 * read/write command header itself.
92 #define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
94 /* Alignment requirements */
95 #define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
96 #define WCD_SPI_RW_MULTI_ALIGN (16)
98 /* Status mask bits */
99 #define WCD_SPI_CLK_STATE_ENABLED BIT(0)
100 #define WCD_SPI_IS_SUSPENDED BIT(1)
102 /* Locking related */
103 #define WCD_SPI_MUTEX_LOCK(spi, lock) \
105 dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
106 __func__, __stringify_1(lock)); \
110 #define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
112 dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
113 __func__, __stringify_1(lock)); \
114 mutex_unlock(&lock); \
117 struct wcd_spi_debug_data {
123 struct wcd_spi_priv {
124 struct spi_device *spi;
127 struct regmap *regmap;
129 /* Message for single transfer */
130 struct spi_message msg1;
131 struct spi_transfer xfer1;
133 /* Message for two transfers */
134 struct spi_message msg2;
135 struct spi_transfer xfer2[2];
137 /* Register access related */
141 /* Clock requests related */
142 struct mutex clk_mutex;
144 unsigned long status_mask;
145 struct delayed_work clk_dwork;
147 /* Transaction related */
148 struct mutex xfer_mutex;
150 struct device *m_dev;
151 struct wdsp_mgr_ops *m_ops;
153 /* Debugfs related information */
154 struct wcd_spi_debug_data debug_data;
156 /* Completion object to indicate system resume completion */
157 struct completion resume_comp;
159 /* Buffers to hold memory used for transfers */
170 static char *wcd_spi_xfer_req_str(enum xfer_request req)
172 if (req == WCD_SPI_XFER_WRITE)
174 else if (req == WCD_SPI_XFER_READ)
177 return "xfer_invalid";
180 static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
184 xfer->delay_usecs = 0;
188 static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
190 return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
193 static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
195 struct spi_device *spi = wcd_spi->spi;
197 if (wcd_spi->clk_users > 0 ||
198 test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
199 dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
200 __func__, wcd_spi->clk_users);
207 static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
209 struct spi_device *spi = wcd_spi->spi;
212 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
213 /* If the system is already in resumed state, return right away */
214 if (!wcd_spi_is_suspended(wcd_spi))
217 /* If suspended then wait for resume to happen */
218 reinit_completion(&wcd_spi->resume_comp);
219 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
220 rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
221 msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
222 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
224 dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
225 __func__, WCD_SPI_RESUME_TIMEOUT_MS);
230 dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
233 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
237 static int wcd_spi_read_single(struct spi_device *spi,
238 u32 remote_addr, u32 *val)
240 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
241 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
242 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
243 u8 *tx_buf = wcd_spi->tx_buf;
247 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
248 __func__, remote_addr);
251 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
256 frame |= WCD_SPI_READ_FRAME_OPCODE;
257 frame |= remote_addr & WCD_CMD_ADDR_MASK;
259 wcd_spi_reinit_xfer(tx_xfer);
260 frame = cpu_to_be32(frame);
261 memcpy(tx_buf, &frame, sizeof(frame));
262 tx_xfer->tx_buf = tx_buf;
263 tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
265 wcd_spi_reinit_xfer(rx_xfer);
266 rx_xfer->rx_buf = val;
267 rx_xfer->len = sizeof(*val);
269 ret = spi_sync(spi, &wcd_spi->msg2);
274 static int wcd_spi_read_multi(struct spi_device *spi,
275 u32 remote_addr, u8 *data,
278 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
279 struct spi_transfer *xfer = &wcd_spi->xfer1;
280 u8 *tx_buf = wcd_spi->tx_buf;
281 u8 *rx_buf = wcd_spi->rx_buf;
285 dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
286 __func__, remote_addr, len);
288 frame |= WCD_SPI_FREAD_FRAME_OPCODE;
289 frame |= remote_addr & WCD_CMD_ADDR_MASK;
291 if (!tx_buf || !rx_buf) {
292 dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
293 (!tx_buf) ? "tx_buf" : "rx_buf");
297 wcd_spi_reinit_xfer(xfer);
298 frame = cpu_to_be32(frame);
299 memcpy(tx_buf, &frame, sizeof(frame));
300 xfer->tx_buf = tx_buf;
301 xfer->rx_buf = rx_buf;
302 xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
304 ret = spi_sync(spi, &wcd_spi->msg1);
306 dev_err(&spi->dev, "%s: failed, err = %d\n",
311 memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
316 static int wcd_spi_write_single(struct spi_device *spi,
317 u32 remote_addr, u32 val)
319 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
320 struct spi_transfer *xfer = &wcd_spi->xfer1;
321 u8 buf[WCD_SPI_WRITE_SINGLE_LEN];
324 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
325 __func__, remote_addr, val);
327 memset(buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
328 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
329 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
331 frame = cpu_to_be32(frame);
332 memcpy(buf, &frame, sizeof(frame));
333 memcpy(buf + sizeof(frame), &val, sizeof(val));
335 wcd_spi_reinit_xfer(xfer);
337 xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
339 return spi_sync(spi, &wcd_spi->msg1);
342 static int wcd_spi_write_multi(struct spi_device *spi,
343 u32 remote_addr, u8 *data,
346 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
347 struct spi_transfer *xfer = &wcd_spi->xfer1;
349 u8 *tx_buf = wcd_spi->tx_buf;
352 dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
353 __func__, remote_addr, len);
355 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
356 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
358 frame = cpu_to_be32(frame);
359 xfer_len = len + sizeof(frame);
362 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
367 memcpy(tx_buf, &frame, sizeof(frame));
368 memcpy(tx_buf + sizeof(frame), data, len);
370 wcd_spi_reinit_xfer(xfer);
371 xfer->tx_buf = tx_buf;
372 xfer->len = xfer_len;
374 ret = spi_sync(spi, &wcd_spi->msg1);
375 if (IS_ERR_VALUE(ret))
377 "%s: Failed, addr = 0x%x, len = %zd\n",
378 __func__, remote_addr, len);
382 static int wcd_spi_transfer_split(struct spi_device *spi,
383 struct wcd_spi_msg *data_msg,
384 enum xfer_request xfer_req)
386 u32 addr = data_msg->remote_addr;
387 u8 *data = data_msg->data;
388 int remain_size = data_msg->len;
389 int to_xfer, loop_cnt, ret = 0;
391 /* Perform single writes until multi word alignment is met */
393 while (remain_size &&
394 !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
395 if (xfer_req == WCD_SPI_XFER_WRITE)
396 ret = wcd_spi_write_single(spi, addr,
399 ret = wcd_spi_read_single(spi, addr,
401 if (IS_ERR_VALUE(ret)) {
403 "%s: %s fail iter(%d) start-word addr (0x%x)\n",
404 __func__, wcd_spi_xfer_req_str(xfer_req),
409 addr += WCD_SPI_WORD_BYTE_CNT;
410 data += WCD_SPI_WORD_BYTE_CNT;
411 remain_size -= WCD_SPI_WORD_BYTE_CNT;
415 /* Perform multi writes for max allowed multi writes */
417 while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
418 if (xfer_req == WCD_SPI_XFER_WRITE)
419 ret = wcd_spi_write_multi(spi, addr, data,
420 WCD_SPI_RW_MULTI_MAX_LEN);
422 ret = wcd_spi_read_multi(spi, addr, data,
423 WCD_SPI_RW_MULTI_MAX_LEN);
424 if (IS_ERR_VALUE(ret)) {
426 "%s: %s fail iter(%d) max-write addr (0x%x)\n",
427 __func__, wcd_spi_xfer_req_str(xfer_req),
432 addr += WCD_SPI_RW_MULTI_MAX_LEN;
433 data += WCD_SPI_RW_MULTI_MAX_LEN;
434 remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
439 * Perform write for max possible data that is multiple
440 * of the minimum size for multi-write commands.
442 to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
443 if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
445 if (xfer_req == WCD_SPI_XFER_WRITE)
446 ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
448 ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
449 if (IS_ERR_VALUE(ret)) {
451 "%s: %s fail write addr (0x%x), size (0x%x)\n",
452 __func__, wcd_spi_xfer_req_str(xfer_req),
459 remain_size -= to_xfer;
462 /* Perform single writes for the last remaining data */
464 while (remain_size > 0) {
465 if (xfer_req == WCD_SPI_XFER_WRITE)
466 ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
468 ret = wcd_spi_read_single(spi, addr, (u32 *) data);
469 if (IS_ERR_VALUE(ret)) {
471 "%s: %s fail iter(%d) end-write addr (0x%x)\n",
472 __func__, wcd_spi_xfer_req_str(xfer_req),
477 addr += WCD_SPI_WORD_BYTE_CNT;
478 data += WCD_SPI_WORD_BYTE_CNT;
479 remain_size -= WCD_SPI_WORD_BYTE_CNT;
487 static int wcd_spi_cmd_nop(struct spi_device *spi)
489 u8 nop = WCD_SPI_CMD_NOP;
491 return spi_write(spi, &nop, WCD_SPI_CMD_NOP_LEN);
494 static int wcd_spi_cmd_clkreq(struct spi_device *spi)
496 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
497 struct spi_transfer *xfer = &wcd_spi->xfer1;
498 u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
502 wcd_spi_reinit_xfer(xfer);
504 xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
505 xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
507 return spi_sync(spi, &wcd_spi->msg1);
510 static int wcd_spi_cmd_wr_en(struct spi_device *spi)
512 u8 wr_en = WCD_SPI_CMD_WREN;
514 return spi_write(spi, &wr_en, WCD_SPI_CMD_WREN_LEN);
517 static int wcd_spi_cmd_rdsr(struct spi_device *spi,
520 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
521 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
522 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
527 rdsr_cmd = WCD_SPI_CMD_RDSR;
528 wcd_spi_reinit_xfer(tx_xfer);
529 tx_xfer->tx_buf = &rdsr_cmd;
530 tx_xfer->len = sizeof(rdsr_cmd);
533 wcd_spi_reinit_xfer(rx_xfer);
534 rx_xfer->rx_buf = &status;
535 rx_xfer->len = sizeof(status);
537 ret = spi_sync(spi, &wcd_spi->msg2);
538 if (IS_ERR_VALUE(ret)) {
539 dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
544 *rdsr_status = be32_to_cpu(status);
546 dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
547 __func__, *rdsr_status);
552 static int wcd_spi_clk_enable(struct spi_device *spi)
554 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
558 ret = wcd_spi_cmd_nop(spi);
559 if (IS_ERR_VALUE(ret)) {
560 dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
565 ret = wcd_spi_cmd_clkreq(spi);
566 if (IS_ERR_VALUE(ret)) {
567 dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
572 ret = wcd_spi_cmd_nop(spi);
573 if (IS_ERR_VALUE(ret)) {
574 dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
578 wcd_spi_cmd_rdsr(spi, &rd_status);
580 * Read status zero means reads are not
581 * happenning on the bus, possibly because
582 * clock request failed.
585 set_bit(WCD_SPI_CLK_STATE_ENABLED,
586 &wcd_spi->status_mask);
588 dev_err(&spi->dev, "%s: RDSR status is zero\n",
596 static int wcd_spi_clk_disable(struct spi_device *spi)
598 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
601 ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
602 if (IS_ERR_VALUE(ret))
603 dev_err(&spi->dev, "%s: Failed, err = %d\n",
606 * clear this bit even if clock disable failed
607 * as the source clocks might get turned off.
609 clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
614 static int wcd_spi_clk_ctrl(struct spi_device *spi,
615 bool request, u32 flags)
617 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
619 const char *delay_str;
621 delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
622 "delayed" : "immediate";
624 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
626 /* Reject any unbalanced disable request */
627 if (wcd_spi->clk_users < 0 ||
628 (!request && wcd_spi->clk_users == 0)) {
629 dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
630 __func__, wcd_spi->clk_users,
631 request ? "enable" : "disable");
634 /* Reset the clk_users to 0 */
635 wcd_spi->clk_users = 0;
640 if (request == WCD_SPI_CLK_ENABLE) {
642 * If the SPI bus is suspended, then return error
643 * as the transaction cannot be completed.
645 if (wcd_spi_is_suspended(wcd_spi)) {
647 "%s: SPI suspended, cannot enable clk\n",
653 /* Cancel the disable clk work */
654 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
655 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
656 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
658 wcd_spi->clk_users++;
661 * If clk state is already set,
662 * then clk wasnt really disabled
664 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
666 else if (wcd_spi->clk_users == 1)
667 ret = wcd_spi_clk_enable(spi);
670 wcd_spi->clk_users--;
672 /* Clock is still voted for */
673 if (wcd_spi->clk_users > 0)
677 * If we are here, clk_users must be 0 and needs
678 * to be disabled. Call the disable based on the
681 if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
682 schedule_delayed_work(&wcd_spi->clk_dwork,
683 msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
685 ret = wcd_spi_clk_disable(spi);
686 if (IS_ERR_VALUE(ret))
688 "%s: Failed to disable clk err = %d\n",
694 dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
695 __func__, wcd_spi->clk_users, request ? "enable" : "disable",
696 request ? "" : delay_str);
697 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
702 static int wcd_spi_init(struct spi_device *spi)
704 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
707 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
708 WCD_SPI_CLK_FLAG_IMMEDIATE);
709 if (IS_ERR_VALUE(ret))
712 ret = wcd_spi_cmd_wr_en(spi);
713 if (IS_ERR_VALUE(ret))
717 * In case spi_init is called after component deinit,
718 * it is possible hardware register state is also reset.
719 * Sync the regcache here so hardware state is updated
720 * to reflect the cache.
722 regcache_sync(wcd_spi->regmap);
724 regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
727 /* Write the MTU to max allowed size */
728 regmap_update_bits(wcd_spi->regmap,
729 WCD_SPI_SLAVE_TRNS_LEN,
730 0xFFFF0000, 0xFFFF0000);
732 wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
733 WCD_SPI_CLK_FLAG_IMMEDIATE);
738 static void wcd_spi_clk_work(struct work_struct *work)
740 struct delayed_work *dwork;
741 struct wcd_spi_priv *wcd_spi;
742 struct spi_device *spi;
745 dwork = to_delayed_work(work);
746 wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
749 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
750 ret = wcd_spi_clk_disable(spi);
751 if (IS_ERR_VALUE(ret))
753 "%s: Failed to disable clk, err = %d\n",
755 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
758 static int __wcd_spi_data_xfer(struct spi_device *spi,
759 struct wcd_spi_msg *msg,
760 enum xfer_request xfer_req)
762 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
765 /* Check for minimum alignment requirements */
766 if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
768 "%s addr 0x%x is not aligned to 0x%x\n",
769 __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
771 } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
773 "%s len 0x%zx is not multiple of %d\n",
774 __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
778 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
779 if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
780 if (xfer_req == WCD_SPI_XFER_WRITE)
781 ret = wcd_spi_write_single(spi, msg->remote_addr,
782 (*((u32 *)msg->data)));
784 ret = wcd_spi_read_single(spi, msg->remote_addr,
787 ret = wcd_spi_transfer_split(spi, msg, xfer_req);
789 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
794 static int wcd_spi_data_xfer(struct spi_device *spi,
795 struct wcd_spi_msg *msg,
796 enum xfer_request req)
801 dev_err(&spi->dev, "%s: Invalid size %zd\n",
806 /* Request for clock */
807 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
808 WCD_SPI_CLK_FLAG_IMMEDIATE);
809 if (IS_ERR_VALUE(ret)) {
810 dev_err(&spi->dev, "%s: clk enable failed %d\n",
815 /* Perform the transaction */
816 ret = __wcd_spi_data_xfer(spi, msg, req);
817 if (IS_ERR_VALUE(ret))
819 "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
820 __func__, wcd_spi_xfer_req_str(req),
821 msg->remote_addr, msg->len, ret);
823 /* Release the clock even if xfer failed */
824 ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
825 WCD_SPI_CLK_FLAG_DELAYED);
826 if (IS_ERR_VALUE(ret1))
827 dev_err(&spi->dev, "%s: clk disable failed %d\n",
834 * wcd_spi_data_write: Write data to WCD SPI
835 * @spi: spi_device struct
836 * @msg: msg that needs to be written to WCD
838 * This API writes length of data to address specified. These details
839 * about the write are encapsulated in @msg. Write size should be multiple
840 * of 4 bytes and write address should be 4-byte aligned.
842 int wcd_spi_data_write(struct spi_device *spi,
843 struct wcd_spi_msg *msg)
846 pr_err("%s: Invalid %s\n", __func__,
847 (!spi) ? "spi device" : "msg");
851 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
852 __func__, msg->remote_addr, msg->len);
853 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
855 EXPORT_SYMBOL(wcd_spi_data_write);
858 * wcd_spi_data_read: Read data from WCD SPI
859 * @spi: spi_device struct
860 * @msg: msg that needs to be read from WCD
862 * This API reads length of data from address specified. These details
863 * about the read are encapsulated in @msg. Read size should be multiple
864 * of 4 bytes and read address should be 4-byte aligned.
866 int wcd_spi_data_read(struct spi_device *spi,
867 struct wcd_spi_msg *msg)
870 pr_err("%s: Invalid %s\n", __func__,
871 (!spi) ? "spi device" : "msg");
875 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
876 __func__, msg->remote_addr, msg->len);
877 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
879 EXPORT_SYMBOL(wcd_spi_data_read);
881 static int wdsp_spi_dload_section(struct spi_device *spi,
884 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
885 struct wdsp_img_section *sec = data;
886 struct wcd_spi_msg msg;
889 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
890 __func__, sec->addr, sec->size);
892 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
893 msg.data = sec->data;
896 ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
897 if (IS_ERR_VALUE(ret))
898 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
899 __func__, msg.remote_addr, msg.len);
903 static int wdsp_spi_read_section(struct spi_device *spi, void *data)
905 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
906 struct wdsp_img_section *sec = data;
907 struct wcd_spi_msg msg;
910 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
911 msg.data = sec->data;
914 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
915 __func__, msg.remote_addr, msg.len);
917 ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
918 if (IS_ERR_VALUE(ret))
919 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
920 __func__, msg.remote_addr, msg.len);
924 static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
925 enum wdsp_event_type event,
928 struct spi_device *spi = to_spi_device(dev);
929 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
932 dev_dbg(&spi->dev, "%s: event type %d\n",
936 case WDSP_EVENT_POST_SHUTDOWN:
937 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
938 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
939 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
940 wcd_spi_clk_disable(spi);
941 wcd_spi->clk_users = 0;
942 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
945 case WDSP_EVENT_PRE_DLOAD_CODE:
946 case WDSP_EVENT_PRE_DLOAD_DATA:
947 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
948 WCD_SPI_CLK_FLAG_IMMEDIATE);
949 if (IS_ERR_VALUE(ret))
950 dev_err(&spi->dev, "%s: clk_req failed %d\n",
954 case WDSP_EVENT_POST_DLOAD_CODE:
955 case WDSP_EVENT_POST_DLOAD_DATA:
956 case WDSP_EVENT_DLOAD_FAILED:
958 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
959 WCD_SPI_CLK_FLAG_IMMEDIATE);
960 if (IS_ERR_VALUE(ret))
961 dev_err(&spi->dev, "%s: clk unvote failed %d\n",
965 case WDSP_EVENT_DLOAD_SECTION:
966 ret = wdsp_spi_dload_section(spi, data);
969 case WDSP_EVENT_READ_SECTION:
970 ret = wdsp_spi_read_section(spi, data);
973 case WDSP_EVENT_SUSPEND:
974 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
975 if (!wcd_spi_can_suspend(wcd_spi))
977 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
980 case WDSP_EVENT_RESUME:
981 ret = wcd_spi_wait_for_resume(wcd_spi);
985 dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
993 static int wcd_spi_bus_gwrite(void *context, const void *reg,
994 size_t reg_len, const void *val,
997 struct device *dev = context;
998 struct spi_device *spi = to_spi_device(dev);
999 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1000 u8 tx_buf[WCD_SPI_CMD_IRW_LEN];
1002 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1003 val_len != wcd_spi->val_bytes) {
1005 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1006 __func__, reg_len, val_len);
1010 tx_buf[0] = WCD_SPI_CMD_IRW;
1011 tx_buf[1] = *((u8 *)reg);
1012 memcpy(&tx_buf[WCD_SPI_OPCODE_LEN + reg_len],
1015 return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
1018 static int wcd_spi_bus_write(void *context, const void *data,
1021 struct device *dev = context;
1022 struct spi_device *spi = to_spi_device(dev);
1023 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1025 if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
1026 dev_err(&spi->dev, "%s: Invalid size %zd\n",
1032 return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
1033 data + wcd_spi->reg_bytes,
1034 count - wcd_spi->reg_bytes);
1037 static int wcd_spi_bus_read(void *context, const void *reg,
1038 size_t reg_len, void *val,
1041 struct device *dev = context;
1042 struct spi_device *spi = to_spi_device(dev);
1043 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1044 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
1045 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
1046 u8 tx_buf[WCD_SPI_CMD_IRR_LEN];
1048 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1049 val_len != wcd_spi->val_bytes) {
1051 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1052 __func__, reg_len, val_len);
1056 memset(tx_buf, 0, WCD_SPI_OPCODE_LEN);
1057 tx_buf[0] = WCD_SPI_CMD_IRR;
1058 tx_buf[1] = *((u8 *)reg);
1060 wcd_spi_reinit_xfer(tx_xfer);
1061 tx_xfer->tx_buf = tx_buf;
1062 tx_xfer->rx_buf = NULL;
1063 tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
1065 wcd_spi_reinit_xfer(rx_xfer);
1066 rx_xfer->tx_buf = NULL;
1067 rx_xfer->rx_buf = val;
1068 rx_xfer->len = val_len;
1070 return spi_sync(spi, &wcd_spi->msg2);
1073 static struct regmap_bus wcd_spi_regmap_bus = {
1074 .write = wcd_spi_bus_write,
1075 .gather_write = wcd_spi_bus_gwrite,
1076 .read = wcd_spi_bus_read,
1077 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
1078 .val_format_endian_default = REGMAP_ENDIAN_BIG,
1081 static int wcd_spi_state_show(struct seq_file *f, void *ptr)
1083 struct spi_device *spi = f->private;
1084 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1085 const char *clk_state, *clk_mutex, *xfer_mutex;
1087 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
1088 clk_state = "enabled";
1090 clk_state = "disabled";
1092 clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
1093 "locked" : "unlocked";
1095 xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
1096 "locked" : "unlocked";
1098 seq_printf(f, "clk_state = %s\nclk_users = %d\n"
1099 "clk_mutex = %s\nxfer_mutex = %s\n",
1100 clk_state, wcd_spi->clk_users, clk_mutex,
1105 static int wcd_spi_state_open(struct inode *inode, struct file *file)
1107 return single_open(file, wcd_spi_state_show, inode->i_private);
1110 static const struct file_operations state_fops = {
1111 .open = wcd_spi_state_open,
1113 .llseek = seq_lseek,
1114 .release = single_release,
1117 static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
1118 size_t count, loff_t *ppos)
1120 struct spi_device *spi = file->private_data;
1121 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1122 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1123 struct wcd_spi_msg msg;
1124 ssize_t buf_size, read_count = 0;
1128 if (*ppos < 0 || !count)
1131 if (dbg_data->size == 0 || dbg_data->addr == 0) {
1133 "%s: Invalid request, size = %u, addr = 0x%x\n",
1134 __func__, dbg_data->size, dbg_data->addr);
1138 buf_size = count < dbg_data->size ? count : dbg_data->size;
1139 buf = kzalloc(buf_size, GFP_KERNEL);
1144 msg.remote_addr = dbg_data->addr;
1148 ret = wcd_spi_data_read(spi, &msg);
1149 if (IS_ERR_VALUE(ret)) {
1151 "%s: Failed to read %zu bytes from addr 0x%x\n",
1152 __func__, buf_size, msg.remote_addr);
1156 read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
1166 static const struct file_operations mem_read_fops = {
1167 .open = simple_open,
1168 .read = wcd_spi_debugfs_mem_read,
1171 static int wcd_spi_debugfs_init(struct spi_device *spi)
1173 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1174 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1177 dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
1178 if (IS_ERR_OR_NULL(dbg_data->dir)) {
1179 dbg_data->dir = NULL;
1184 debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
1185 debugfs_create_u32("addr", S_IRUGO | S_IWUSR, dbg_data->dir,
1187 debugfs_create_u32("size", S_IRUGO | S_IWUSR, dbg_data->dir,
1190 debugfs_create_file("mem_read", S_IRUGO, dbg_data->dir,
1191 spi, &mem_read_fops);
1197 static const struct reg_default wcd_spi_defaults[] = {
1198 {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
1199 {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
1200 {WCD_SPI_SLAVE_STATUS, 0x80100000},
1201 {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
1202 {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
1203 {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
1204 {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
1205 {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
1206 {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
1207 {WCD_SPI_SLAVE_TX, 0x00000000},
1208 {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
1209 {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
1210 {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
1211 {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
1212 {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
1213 {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
1214 {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
1215 {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
1216 {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
1217 {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
1218 {WCD_SPI_SLAVE_GENERICS, 0x80000000},
1219 {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
1222 static bool wcd_spi_is_volatile_reg(struct device *dev,
1226 case WCD_SPI_SLAVE_SANITY:
1227 case WCD_SPI_SLAVE_STATUS:
1228 case WCD_SPI_SLAVE_IRQ_STATUS:
1229 case WCD_SPI_SLAVE_TX:
1230 case WCD_SPI_SLAVE_SW_RST_IRQ:
1231 case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
1232 case WCD_SPI_SLAVE_FIFO_LEVEL:
1233 case WCD_SPI_SLAVE_GENERICS:
1240 static bool wcd_spi_is_readable_reg(struct device *dev,
1244 case WCD_SPI_SLAVE_SW_RESET:
1245 case WCD_SPI_SLAVE_IRQ_CLR:
1246 case WCD_SPI_SLAVE_IRQ_FORCE:
1253 static struct regmap_config wcd_spi_regmap_cfg = {
1256 .cache_type = REGCACHE_RBTREE,
1257 .reg_defaults = wcd_spi_defaults,
1258 .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
1259 .max_register = WCD_SPI_MAX_REGISTER,
1260 .volatile_reg = wcd_spi_is_volatile_reg,
1261 .readable_reg = wcd_spi_is_readable_reg,
1264 static int wdsp_spi_init(struct device *dev, void *priv_data)
1266 struct spi_device *spi = to_spi_device(dev);
1269 ret = wcd_spi_init(spi);
1270 if (IS_ERR_VALUE(ret))
1271 dev_err(&spi->dev, "%s: Init failed, err = %d\n",
1276 static int wdsp_spi_deinit(struct device *dev, void *priv_data)
1278 struct spi_device *spi = to_spi_device(dev);
1279 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1282 * Deinit means the hardware is reset. Mark the cache
1283 * as dirty here, so init will sync the cache
1285 regcache_mark_dirty(wcd_spi->regmap);
1290 static struct wdsp_cmpnt_ops wdsp_spi_ops = {
1291 .init = wdsp_spi_init,
1292 .deinit = wdsp_spi_deinit,
1293 .event_handler = wdsp_spi_event_handler,
1296 static int wcd_spi_component_bind(struct device *dev,
1297 struct device *master,
1300 struct spi_device *spi = to_spi_device(dev);
1301 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1304 wcd_spi->m_dev = master;
1305 wcd_spi->m_ops = data;
1307 if (wcd_spi->m_ops &&
1308 wcd_spi->m_ops->register_cmpnt_ops)
1309 ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
1313 dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
1318 wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
1319 wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
1321 wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
1322 &spi->dev, &wcd_spi_regmap_cfg);
1323 if (IS_ERR(wcd_spi->regmap)) {
1324 ret = PTR_ERR(wcd_spi->regmap);
1325 dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
1330 if (wcd_spi_debugfs_init(spi))
1331 dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
1333 spi_message_init(&wcd_spi->msg1);
1334 spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
1336 spi_message_init(&wcd_spi->msg2);
1337 spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
1338 spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
1340 /* Pre-allocate the buffers */
1341 wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
1342 GFP_KERNEL | GFP_DMA);
1343 if (!wcd_spi->tx_buf) {
1348 wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
1349 GFP_KERNEL | GFP_DMA);
1350 if (!wcd_spi->rx_buf) {
1351 kfree(wcd_spi->tx_buf);
1352 wcd_spi->tx_buf = NULL;
1360 static void wcd_spi_component_unbind(struct device *dev,
1361 struct device *master,
1364 struct spi_device *spi = to_spi_device(dev);
1365 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1367 wcd_spi->m_dev = NULL;
1368 wcd_spi->m_ops = NULL;
1370 spi_transfer_del(&wcd_spi->xfer1);
1371 spi_transfer_del(&wcd_spi->xfer2[0]);
1372 spi_transfer_del(&wcd_spi->xfer2[1]);
1374 kfree(wcd_spi->tx_buf);
1375 kfree(wcd_spi->rx_buf);
1376 wcd_spi->tx_buf = NULL;
1377 wcd_spi->rx_buf = NULL;
1380 static const struct component_ops wcd_spi_component_ops = {
1381 .bind = wcd_spi_component_bind,
1382 .unbind = wcd_spi_component_unbind,
1385 static int wcd_spi_probe(struct spi_device *spi)
1387 struct wcd_spi_priv *wcd_spi;
1390 wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
1395 ret = of_property_read_u32(spi->dev.of_node,
1396 "qcom,mem-base-addr",
1397 &wcd_spi->mem_base_addr);
1398 if (IS_ERR_VALUE(ret)) {
1399 dev_err(&spi->dev, "%s: Missing %s DT entry",
1400 __func__, "qcom,mem-base-addr");
1405 "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
1407 mutex_init(&wcd_spi->clk_mutex);
1408 mutex_init(&wcd_spi->xfer_mutex);
1409 INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
1410 init_completion(&wcd_spi->resume_comp);
1413 spi_set_drvdata(spi, wcd_spi);
1415 ret = component_add(&spi->dev, &wcd_spi_component_ops);
1417 dev_err(&spi->dev, "%s: component_add failed err = %d\n",
1419 goto err_component_add;
1425 mutex_destroy(&wcd_spi->clk_mutex);
1426 mutex_destroy(&wcd_spi->xfer_mutex);
1428 devm_kfree(&spi->dev, wcd_spi);
1429 spi_set_drvdata(spi, NULL);
1433 static int wcd_spi_remove(struct spi_device *spi)
1435 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1437 component_del(&spi->dev, &wcd_spi_component_ops);
1439 mutex_destroy(&wcd_spi->clk_mutex);
1440 mutex_destroy(&wcd_spi->xfer_mutex);
1442 devm_kfree(&spi->dev, wcd_spi);
1443 spi_set_drvdata(spi, NULL);
1449 static int wcd_spi_suspend(struct device *dev)
1451 struct spi_device *spi = to_spi_device(dev);
1452 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1455 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1456 if (!wcd_spi_can_suspend(wcd_spi)) {
1462 * If we are here, it is okay to let the suspend go
1463 * through for this driver. But, still need to notify
1464 * the master to make sure all other components can suspend
1467 if (wcd_spi->m_dev && wcd_spi->m_ops &&
1468 wcd_spi->m_ops->suspend) {
1469 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1470 rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
1471 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1475 set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1477 dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
1480 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1484 static int wcd_spi_resume(struct device *dev)
1486 struct spi_device *spi = to_spi_device(dev);
1487 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1489 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1490 clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1491 complete(&wcd_spi->resume_comp);
1492 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1497 static const struct dev_pm_ops wcd_spi_pm_ops = {
1498 .suspend = wcd_spi_suspend,
1499 .resume = wcd_spi_resume,
1503 static const struct of_device_id wcd_spi_of_match[] = {
1504 { .compatible = "qcom,wcd-spi-v2", },
1507 MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
1509 static struct spi_driver wcd_spi_driver = {
1511 .name = "wcd-spi-v2",
1512 .of_match_table = wcd_spi_of_match,
1514 .pm = &wcd_spi_pm_ops,
1517 .probe = wcd_spi_probe,
1518 .remove = wcd_spi_remove,
1521 module_spi_driver(wcd_spi_driver);
1523 MODULE_DESCRIPTION("WCD SPI driver");
1524 MODULE_LICENSE("GPL v2");