1 // SPDX-License-Identifier: GPL-2.0-only
3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
5 * Copyright (c) 2009, Intel Corporation.
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/preempt.h>
12 #include <linux/highmem.h>
13 #include <linux/delay.h>
14 #include <linux/slab.h>
15 #include <linux/spi/spi.h>
16 #include <linux/spi/spi-mem.h>
17 #include <linux/string.h>
22 #ifdef CONFIG_DEBUG_FS
23 #include <linux/debugfs.h>
26 /* Slave spi_device related */
29 u32 rx_sample_dly; /* RX sample delay */
32 #ifdef CONFIG_DEBUG_FS
34 #define DW_SPI_DBGFS_REG(_name, _off) \
40 static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
41 DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
42 DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
43 DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
44 DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
45 DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
46 DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
47 DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
48 DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
49 DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
50 DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
51 DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
52 DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
53 DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
54 DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
55 DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
56 DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
59 static int dw_spi_debugfs_init(struct dw_spi *dws)
63 snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
64 dws->debugfs = debugfs_create_dir(name, NULL);
68 dws->regset.regs = dw_spi_dbgfs_regs;
69 dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
70 dws->regset.base = dws->regs;
71 debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
76 static void dw_spi_debugfs_remove(struct dw_spi *dws)
78 debugfs_remove_recursive(dws->debugfs);
82 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
87 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
90 #endif /* CONFIG_DEBUG_FS */
92 void dw_spi_set_cs(struct spi_device *spi, bool enable)
94 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
95 bool cs_high = !!(spi->mode & SPI_CS_HIGH);
98 * DW SPI controller demands any native CS being set in order to
99 * proceed with data transfer. So in order to activate the SPI
100 * communications we must set a corresponding bit in the Slave
101 * Enable register no matter whether the SPI core is configured to
102 * support active-high or active-low CS level.
104 if (cs_high == enable)
105 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
107 dw_writel(dws, DW_SPI_SER, 0);
109 EXPORT_SYMBOL_GPL(dw_spi_set_cs);
111 /* Return the max entries we can fill into tx fifo */
112 static inline u32 tx_max(struct dw_spi *dws)
114 u32 tx_room, rxtx_gap;
116 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
119 * Another concern is about the tx/rx mismatch, we
120 * though to use (dws->fifo_len - rxflr - txflr) as
121 * one maximum value for tx, but it doesn't cover the
122 * data which is out of tx/rx fifo and inside the
123 * shift registers. So a control from sw point of
126 rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
128 return min3((u32)dws->tx_len, tx_room, rxtx_gap);
131 /* Return the max entries we should read out of rx fifo */
132 static inline u32 rx_max(struct dw_spi *dws)
134 return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
137 static void dw_writer(struct dw_spi *dws)
139 u32 max = tx_max(dws);
144 if (dws->n_bytes == 1)
145 txw = *(u8 *)(dws->tx);
147 txw = *(u16 *)(dws->tx);
149 dws->tx += dws->n_bytes;
151 dw_write_io_reg(dws, DW_SPI_DR, txw);
156 static void dw_reader(struct dw_spi *dws)
158 u32 max = rx_max(dws);
162 rxw = dw_read_io_reg(dws, DW_SPI_DR);
164 if (dws->n_bytes == 1)
165 *(u8 *)(dws->rx) = rxw;
167 *(u16 *)(dws->rx) = rxw;
169 dws->rx += dws->n_bytes;
175 int dw_spi_check_status(struct dw_spi *dws, bool raw)
181 irq_status = dw_readl(dws, DW_SPI_RISR);
183 irq_status = dw_readl(dws, DW_SPI_ISR);
185 if (irq_status & SPI_INT_RXOI) {
186 dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
190 if (irq_status & SPI_INT_RXUI) {
191 dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
195 if (irq_status & SPI_INT_TXOI) {
196 dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
200 /* Generically handle the erroneous situation */
203 if (dws->master->cur_msg)
204 dws->master->cur_msg->status = ret;
209 EXPORT_SYMBOL_GPL(dw_spi_check_status);
211 static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
213 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
215 if (dw_spi_check_status(dws, false)) {
216 spi_finalize_current_transfer(dws->master);
221 * Read data from the Rx FIFO every time we've got a chance executing
222 * this method. If there is nothing left to receive, terminate the
223 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
224 * final stage of the transfer. By doing so we'll get the next IRQ
225 * right when the leftover incoming data is received.
229 spi_mask_intr(dws, 0xff);
230 spi_finalize_current_transfer(dws->master);
231 } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
232 dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
236 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
237 * disabled after the data transmission is finished so not to
238 * have the TXE IRQ flood at the final stage of the transfer.
240 if (irq_status & SPI_INT_TXEI) {
243 spi_mask_intr(dws, SPI_INT_TXEI);
249 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
251 struct spi_controller *master = dev_id;
252 struct dw_spi *dws = spi_controller_get_devdata(master);
253 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
258 if (!master->cur_msg) {
259 spi_mask_intr(dws, 0xff);
263 return dws->transfer_handler(dws);
266 static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
270 if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
271 /* CTRLR0[ 5: 4] Frame Format */
272 cr0 |= SSI_MOTO_SPI << SPI_FRF_OFFSET;
275 * SPI mode (SCPOL|SCPH)
276 * CTRLR0[ 6] Serial Clock Phase
277 * CTRLR0[ 7] Serial Clock Polarity
279 cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET;
280 cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET;
282 /* CTRLR0[11] Shift Register Loop */
283 cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET;
285 /* CTRLR0[ 7: 6] Frame Format */
286 cr0 |= SSI_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET;
289 * SPI mode (SCPOL|SCPH)
290 * CTRLR0[ 8] Serial Clock Phase
291 * CTRLR0[ 9] Serial Clock Polarity
293 cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
294 cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
296 /* CTRLR0[13] Shift Register Loop */
297 cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
299 if (dws->caps & DW_SPI_CAP_KEEMBAY_MST)
300 cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST;
306 void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
307 struct dw_spi_cfg *cfg)
309 struct chip_data *chip = spi_get_ctldata(spi);
314 /* CTRLR0[ 4/3: 0] Data Frame Size */
315 cr0 |= (cfg->dfs - 1);
317 if (!(dws->caps & DW_SPI_CAP_DWC_SSI))
318 /* CTRLR0[ 9:8] Transfer Mode */
319 cr0 |= cfg->tmode << SPI_TMOD_OFFSET;
321 /* CTRLR0[11:10] Transfer Mode */
322 cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
324 dw_writel(dws, DW_SPI_CTRLR0, cr0);
326 if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO)
327 dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
329 /* Note DW APB SSI clock divider doesn't support odd numbers */
330 clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
331 speed_hz = dws->max_freq / clk_div;
333 if (dws->current_freq != speed_hz) {
334 spi_set_clk(dws, clk_div);
335 dws->current_freq = speed_hz;
338 /* Update RX sample delay if required */
339 if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
340 dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
341 dws->cur_rx_sample_dly = chip->rx_sample_dly;
344 EXPORT_SYMBOL_GPL(dw_spi_update_config);
346 static void dw_spi_irq_setup(struct dw_spi *dws)
352 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
353 * will be adjusted at the final stage of the IRQ-based SPI transfer
354 * execution so not to lose the leftover of the incoming data.
356 level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
357 dw_writel(dws, DW_SPI_TXFTLR, level);
358 dw_writel(dws, DW_SPI_RXFTLR, level - 1);
360 imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI |
362 spi_umask_intr(dws, imask);
364 dws->transfer_handler = dw_spi_transfer_handler;
368 * The iterative procedure of the poll-based transfer is simple: write as much
369 * as possible to the Tx FIFO, wait until the pending to receive data is ready
370 * to be read, read it from the Rx FIFO and check whether the performed
371 * procedure has been successful.
373 * Note this method the same way as the IRQ-based transfer won't work well for
374 * the SPI devices connected to the controller with native CS due to the
375 * automatic CS assertion/de-assertion.
377 static int dw_spi_poll_transfer(struct dw_spi *dws,
378 struct spi_transfer *transfer)
380 struct spi_delay delay;
384 delay.unit = SPI_DELAY_UNIT_SCK;
385 nbits = dws->n_bytes * BITS_PER_BYTE;
390 delay.value = nbits * (dws->rx_len - dws->tx_len);
391 spi_delay_exec(&delay, transfer);
395 ret = dw_spi_check_status(dws, true);
398 } while (dws->rx_len);
403 static int dw_spi_transfer_one(struct spi_controller *master,
404 struct spi_device *spi, struct spi_transfer *transfer)
406 struct dw_spi *dws = spi_controller_get_devdata(master);
407 struct dw_spi_cfg cfg = {
408 .tmode = SPI_TMOD_TR,
409 .dfs = transfer->bits_per_word,
410 .freq = transfer->speed_hz,
415 dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
416 dws->tx = (void *)transfer->tx_buf;
417 dws->tx_len = transfer->len / dws->n_bytes;
418 dws->rx = transfer->rx_buf;
419 dws->rx_len = dws->tx_len;
421 /* Ensure the data above is visible for all CPUs */
424 spi_enable_chip(dws, 0);
426 dw_spi_update_config(dws, spi, &cfg);
428 transfer->effective_speed_hz = dws->current_freq;
430 /* Check if current transfer is a DMA transaction */
431 if (master->can_dma && master->can_dma(master, spi, transfer))
432 dws->dma_mapped = master->cur_msg_mapped;
434 /* For poll mode just disable all interrupts */
435 spi_mask_intr(dws, 0xff);
437 if (dws->dma_mapped) {
438 ret = dws->dma_ops->dma_setup(dws, transfer);
443 spi_enable_chip(dws, 1);
446 return dws->dma_ops->dma_transfer(dws, transfer);
447 else if (dws->irq == IRQ_NOTCONNECTED)
448 return dw_spi_poll_transfer(dws, transfer);
450 dw_spi_irq_setup(dws);
455 static void dw_spi_handle_err(struct spi_controller *master,
456 struct spi_message *msg)
458 struct dw_spi *dws = spi_controller_get_devdata(master);
461 dws->dma_ops->dma_stop(dws);
466 static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
468 if (op->data.dir == SPI_MEM_DATA_IN)
469 op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1);
474 static bool dw_spi_supports_mem_op(struct spi_mem *mem,
475 const struct spi_mem_op *op)
477 if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
478 op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
481 return spi_mem_default_supports_op(mem, op);
484 static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
486 unsigned int i, j, len;
490 * Calculate the total length of the EEPROM command transfer and
491 * either use the pre-allocated buffer or create a temporary one.
493 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
494 if (op->data.dir == SPI_MEM_DATA_OUT)
495 len += op->data.nbytes;
497 if (len <= SPI_BUF_SIZE) {
500 out = kzalloc(len, GFP_KERNEL);
506 * Collect the operation code, address and dummy bytes into the single
507 * buffer. If it's a transfer with data to be sent, also copy it into the
508 * single buffer in order to speed the data transmission up.
510 for (i = 0; i < op->cmd.nbytes; ++i)
511 out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
512 for (j = 0; j < op->addr.nbytes; ++i, ++j)
513 out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
514 for (j = 0; j < op->dummy.nbytes; ++i, ++j)
517 if (op->data.dir == SPI_MEM_DATA_OUT)
518 memcpy(&out[i], op->data.buf.out, op->data.nbytes);
523 if (op->data.dir == SPI_MEM_DATA_IN) {
524 dws->rx = op->data.buf.in;
525 dws->rx_len = op->data.nbytes;
534 static void dw_spi_free_mem_buf(struct dw_spi *dws)
536 if (dws->tx != dws->buf)
540 static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
542 u32 room, entries, sts;
547 * At initial stage we just pre-fill the Tx FIFO in with no rush,
548 * since native CS hasn't been enabled yet and the automatic data
549 * transmission won't start til we do that.
551 len = min(dws->fifo_len, dws->tx_len);
554 dw_write_io_reg(dws, DW_SPI_DR, *buf++);
557 * After setting any bit in the SER register the transmission will
558 * start automatically. We have to keep up with that procedure
559 * otherwise the CS de-assertion will happen whereupon the memory
560 * operation will be pre-terminated.
562 len = dws->tx_len - ((void *)buf - dws->tx);
563 dw_spi_set_cs(spi, false);
565 entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
567 dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
570 room = min(dws->fifo_len - entries, len);
571 for (; room; --room, --len)
572 dw_write_io_reg(dws, DW_SPI_DR, *buf++);
576 * Data fetching will start automatically if the EEPROM-read mode is
577 * activated. We have to keep up with the incoming data pace to
578 * prevent the Rx FIFO overflow causing the inbound data loss.
583 entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
585 sts = readl_relaxed(dws->regs + DW_SPI_RISR);
586 if (sts & SPI_INT_RXOI) {
587 dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
592 entries = min(entries, len);
593 for (; entries; --entries, --len)
594 *buf++ = dw_read_io_reg(dws, DW_SPI_DR);
600 static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
602 return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
605 static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
607 int retry = SPI_WAIT_RETRIES;
608 struct spi_delay delay;
609 unsigned long ns, us;
612 nents = dw_readl(dws, DW_SPI_TXFLR);
613 ns = NSEC_PER_SEC / dws->current_freq * nents;
614 ns *= dws->n_bytes * BITS_PER_BYTE;
615 if (ns <= NSEC_PER_USEC) {
616 delay.unit = SPI_DELAY_UNIT_NSECS;
619 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
620 delay.unit = SPI_DELAY_UNIT_USECS;
621 delay.value = clamp_val(us, 0, USHRT_MAX);
624 while (dw_spi_ctlr_busy(dws) && retry--)
625 spi_delay_exec(&delay, NULL);
628 dev_err(&dws->master->dev, "Mem op hanged up\n");
635 static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
637 spi_enable_chip(dws, 0);
638 dw_spi_set_cs(spi, true);
639 spi_enable_chip(dws, 1);
643 * The SPI memory operation implementation below is the best choice for the
644 * devices, which are selected by the native chip-select lane. It's
645 * specifically developed to workaround the problem with automatic chip-select
646 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
647 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
650 static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
652 struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
653 struct dw_spi_cfg cfg;
658 * Collect the outbound data into a single buffer to speed the
659 * transmission up at least on the initial stage.
661 ret = dw_spi_init_mem_buf(dws, op);
666 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
667 * operation. Transmit-only mode is suitable for the rest of them.
670 cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
671 if (op->data.dir == SPI_MEM_DATA_IN) {
672 cfg.tmode = SPI_TMOD_EPROMREAD;
673 cfg.ndf = op->data.nbytes;
675 cfg.tmode = SPI_TMOD_TO;
678 spi_enable_chip(dws, 0);
680 dw_spi_update_config(dws, mem->spi, &cfg);
682 spi_mask_intr(dws, 0xff);
684 spi_enable_chip(dws, 1);
687 * DW APB SSI controller has very nasty peculiarities. First originally
688 * (without any vendor-specific modifications) it doesn't provide a
689 * direct way to set and clear the native chip-select signal. Instead
690 * the controller asserts the CS lane if Tx FIFO isn't empty and a
691 * transmission is going on, and automatically de-asserts it back to
692 * the high level if the Tx FIFO doesn't have anything to be pushed
693 * out. Due to that a multi-tasking or heavy IRQs activity might be
694 * fatal, since the transfer procedure preemption may cause the Tx FIFO
695 * getting empty and sudden CS de-assertion, which in the middle of the
696 * transfer will most likely cause the data loss. Secondly the
697 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
698 * data being automatically pulled in into the Rx FIFO. So if the
699 * driver software is late in fetching the data from the FIFO before
700 * it's overflown, new incoming data will be lost. In order to make
701 * sure the executed memory operations are CS-atomic and to prevent the
702 * Rx FIFO overflow we have to disable the local interrupts so to block
703 * any preemption during the subsequent IO operations.
705 * Note. At some circumstances disabling IRQs may not help to prevent
706 * the problems described above. The CS de-assertion and Rx FIFO
707 * overflow may still happen due to the relatively slow system bus or
708 * CPU not working fast enough, so the write-then-read algo implemented
709 * here just won't keep up with the SPI bus data transfer. Such
710 * situation is highly platform specific and is supposed to be fixed by
711 * manually restricting the SPI bus frequency using the
712 * dws->max_mem_freq parameter.
714 local_irq_save(flags);
717 ret = dw_spi_write_then_read(dws, mem->spi);
719 local_irq_restore(flags);
723 * Wait for the operation being finished and check the controller
724 * status only if there hasn't been any run-time error detected. In the
725 * former case it's just pointless. In the later one to prevent an
726 * additional error message printing since any hw error flag being set
727 * would be due to an error detected on the data transfer.
730 ret = dw_spi_wait_mem_op_done(dws);
732 ret = dw_spi_check_status(dws, true);
735 dw_spi_stop_mem_op(dws, mem->spi);
737 dw_spi_free_mem_buf(dws);
743 * Initialize the default memory operations if a glue layer hasn't specified
744 * custom ones. Direct mapping operations will be preserved anyway since DW SPI
745 * controller doesn't have an embedded dirmap interface. Note the memory
746 * operations implemented in this driver is the best choice only for the DW APB
747 * SSI controller with standard native CS functionality. If a hardware vendor
748 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
749 * be safer to use the normal SPI-messages-based transfers implementation.
751 static void dw_spi_init_mem_ops(struct dw_spi *dws)
753 if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
755 dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
756 dws->mem_ops.supports_op = dw_spi_supports_mem_op;
757 dws->mem_ops.exec_op = dw_spi_exec_mem_op;
758 if (!dws->max_mem_freq)
759 dws->max_mem_freq = dws->max_freq;
763 /* This may be called twice for each spi dev */
764 static int dw_spi_setup(struct spi_device *spi)
766 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
767 struct chip_data *chip;
769 /* Only alloc on first setup */
770 chip = spi_get_ctldata(spi);
772 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
773 u32 rx_sample_dly_ns;
775 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
778 spi_set_ctldata(spi, chip);
779 /* Get specific / default rx-sample-delay */
780 if (device_property_read_u32(&spi->dev,
781 "rx-sample-delay-ns",
782 &rx_sample_dly_ns) != 0)
783 /* Use default controller value */
784 rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
785 chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
791 * Update CR0 data each time the setup callback is invoked since
792 * the device parameters could have been changed, for instance, by
793 * the MMC SPI driver or something else.
795 chip->cr0 = dw_spi_prepare_cr0(dws, spi);
800 static void dw_spi_cleanup(struct spi_device *spi)
802 struct chip_data *chip = spi_get_ctldata(spi);
805 spi_set_ctldata(spi, NULL);
808 /* Restart the controller, disable all interrupts, clean rx fifo */
809 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
814 * Try to detect the FIFO depth if not set by interface driver,
815 * the depth could be from 2 to 256 from HW spec
817 if (!dws->fifo_len) {
820 for (fifo = 1; fifo < 256; fifo++) {
821 dw_writel(dws, DW_SPI_TXFTLR, fifo);
822 if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
825 dw_writel(dws, DW_SPI_TXFTLR, 0);
827 dws->fifo_len = (fifo == 1) ? 0 : fifo;
828 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
831 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
832 if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
833 dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
836 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
838 struct spi_controller *master;
844 master = spi_alloc_master(dev, 0);
848 dws->master = master;
849 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
851 spi_controller_set_devdata(master, dws);
854 spi_hw_init(dev, dws);
856 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
858 if (ret < 0 && ret != -ENOTCONN) {
859 dev_err(dev, "can not get IRQ\n");
860 goto err_free_master;
863 dw_spi_init_mem_ops(dws);
865 master->use_gpio_descriptors = true;
866 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
867 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
868 master->bus_num = dws->bus_num;
869 master->num_chipselect = dws->num_cs;
870 master->setup = dw_spi_setup;
871 master->cleanup = dw_spi_cleanup;
873 master->set_cs = dws->set_cs;
875 master->set_cs = dw_spi_set_cs;
876 master->transfer_one = dw_spi_transfer_one;
877 master->handle_err = dw_spi_handle_err;
878 master->mem_ops = &dws->mem_ops;
879 master->max_speed_hz = dws->max_freq;
880 master->dev.of_node = dev->of_node;
881 master->dev.fwnode = dev->fwnode;
882 master->flags = SPI_MASTER_GPIO_SS;
883 master->auto_runtime_pm = true;
885 /* Get default rx sample delay */
886 device_property_read_u32(dev, "rx-sample-delay-ns",
887 &dws->def_rx_sample_dly_ns);
889 if (dws->dma_ops && dws->dma_ops->dma_init) {
890 ret = dws->dma_ops->dma_init(dev, dws);
892 dev_warn(dev, "DMA init failed\n");
894 master->can_dma = dws->dma_ops->can_dma;
895 master->flags |= SPI_CONTROLLER_MUST_TX;
899 ret = spi_register_controller(master);
901 dev_err(&master->dev, "problem registering spi master\n");
905 dw_spi_debugfs_init(dws);
909 if (dws->dma_ops && dws->dma_ops->dma_exit)
910 dws->dma_ops->dma_exit(dws);
911 spi_enable_chip(dws, 0);
912 free_irq(dws->irq, master);
914 spi_controller_put(master);
917 EXPORT_SYMBOL_GPL(dw_spi_add_host);
919 void dw_spi_remove_host(struct dw_spi *dws)
921 dw_spi_debugfs_remove(dws);
923 spi_unregister_controller(dws->master);
925 if (dws->dma_ops && dws->dma_ops->dma_exit)
926 dws->dma_ops->dma_exit(dws);
928 spi_shutdown_chip(dws);
930 free_irq(dws->irq, dws->master);
932 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
934 int dw_spi_suspend_host(struct dw_spi *dws)
938 ret = spi_controller_suspend(dws->master);
942 spi_shutdown_chip(dws);
945 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
947 int dw_spi_resume_host(struct dw_spi *dws)
949 spi_hw_init(&dws->master->dev, dws);
950 return spi_controller_resume(dws->master);
952 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
954 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
955 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
956 MODULE_LICENSE("GPL v2");