2 * SPI driver for NVIDIA's Tegra114 SPI Controller.
4 * Copyright (c) 2013-2016, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/err.h>
26 #include <linux/gpio.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/kthread.h>
32 #include <linux/module.h>
33 #include <linux/platform_device.h>
34 #include <linux/pinctrl/consumer.h>
35 #include <linux/pm_runtime.h>
37 #include <linux/of_device.h>
38 #include <linux/of_gpio.h>
39 #include <linux/spi/spi.h>
40 #include <linux/spi/spi-tegra.h>
41 #include <linux/clk/tegra.h>
42 #include <linux/tegra_prod.h>
44 #define SPI_COMMAND1 0x000
45 #define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
46 #define SPI_PACKED (1 << 5)
47 #define SPI_TX_EN (1 << 11)
48 #define SPI_RX_EN (1 << 12)
49 #define SPI_BOTH_EN_BYTE (1 << 13)
50 #define SPI_BOTH_EN_BIT (1 << 14)
51 #define SPI_LSBYTE_FE (1 << 15)
52 #define SPI_LSBIT_FE (1 << 16)
53 #define SPI_BIDIROE (1 << 17)
54 #define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
55 #define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
56 #define SPI_IDLE_SDA_PULL_LOW (2 << 18)
57 #define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
58 #define SPI_IDLE_SDA_MASK (3 << 18)
59 #define SPI_CS_SS_VAL (1 << 20)
60 #define SPI_CS_SW_HW (1 << 21)
61 /* SPI_CS_POL_INACTIVE bits are default high */
62 #define SPI_CS_POL_INACTIVE 22
63 #define SPI_CS_POL_INACTIVE_0 (1 << 22)
64 #define SPI_CS_POL_INACTIVE_1 (1 << 23)
65 #define SPI_CS_POL_INACTIVE_2 (1 << 24)
66 #define SPI_CS_POL_INACTIVE_3 (1 << 25)
67 #define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
69 #define SPI_CS_SEL_0 (0 << 26)
70 #define SPI_CS_SEL_1 (1 << 26)
71 #define SPI_CS_SEL_2 (2 << 26)
72 #define SPI_CS_SEL_3 (3 << 26)
73 #define SPI_CS_SEL_MASK (3 << 26)
74 #define SPI_CS_SEL(x) (((x) & 0x3) << 26)
75 #define SPI_CONTROL_MODE_0 (0 << 28)
76 #define SPI_CONTROL_MODE_1 (1 << 28)
77 #define SPI_CONTROL_MODE_2 (2 << 28)
78 #define SPI_CONTROL_MODE_3 (3 << 28)
79 #define SPI_CONTROL_MODE_MASK (3 << 28)
80 #define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
81 #define SPI_MODE_VAL(x) (((x) >> 28) & 0x3)
82 #define SPI_M_S (1 << 30)
83 #define SPI_PIO (1 << 31)
85 #define SPI_COMMAND2 0x004
86 #define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
87 #define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
89 #define SPI_CS_TIMING1 0x008
90 #define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
91 #define SPI_CS_SETUP_HOLD(reg, cs, val) \
92 ((((val) & 0xFFu) << ((cs) * 8)) | \
93 ((reg) & ~(0xFFu << ((cs) * 8))))
95 #define SPI_CS_TIMING2 0x00C
96 #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
97 #define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
98 #define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
99 #define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
100 #define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
101 #define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
102 #define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
103 #define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
104 #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
105 (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
106 ((reg) & ~(1 << ((cs) * 8 + 5))))
107 #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
108 (reg = (((val) & 0x1F) << ((cs) * 8)) | \
109 ((reg) & ~(0x1F << ((cs) * 8))))
111 #define SPI_TRANS_STATUS 0x010
112 #define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
113 #define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
114 #define SPI_RDY (1 << 30)
116 #define SPI_FIFO_STATUS 0x014
117 #define SPI_RX_FIFO_EMPTY (1 << 0)
118 #define SPI_RX_FIFO_FULL (1 << 1)
119 #define SPI_TX_FIFO_EMPTY (1 << 2)
120 #define SPI_TX_FIFO_FULL (1 << 3)
121 #define SPI_RX_FIFO_UNF (1 << 4)
122 #define SPI_RX_FIFO_OVF (1 << 5)
123 #define SPI_TX_FIFO_UNF (1 << 6)
124 #define SPI_TX_FIFO_OVF (1 << 7)
125 #define SPI_ERR (1 << 8)
126 #define SPI_TX_FIFO_FLUSH (1 << 14)
127 #define SPI_RX_FIFO_FLUSH (1 << 15)
128 #define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
129 #define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
130 #define SPI_FRAME_END (1 << 30)
131 #define SPI_CS_INACTIVE (1 << 31)
133 #define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
134 SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
135 #define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
137 #define SPI_TX_DATA 0x018
138 #define SPI_RX_DATA 0x01C
140 #define SPI_DMA_CTL 0x020
141 #define SPI_TX_TRIG_1 (0 << 15)
142 #define SPI_TX_TRIG_4 (1 << 15)
143 #define SPI_TX_TRIG_8 (2 << 15)
144 #define SPI_TX_TRIG_16 (3 << 15)
145 #define SPI_TX_TRIG_MASK (3 << 15)
146 #define SPI_RX_TRIG_1 (0 << 19)
147 #define SPI_RX_TRIG_4 (1 << 19)
148 #define SPI_RX_TRIG_8 (2 << 19)
149 #define SPI_RX_TRIG_16 (3 << 19)
150 #define SPI_RX_TRIG_MASK (3 << 19)
151 #define SPI_IE_TX (1 << 28)
152 #define SPI_IE_RX (1 << 29)
153 #define SPI_CONT (1 << 30)
154 #define SPI_DMA (1 << 31)
155 #define SPI_DMA_EN SPI_DMA
157 #define SPI_DMA_BLK 0x024
158 #define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
160 #define SPI_TX_FIFO 0x108
161 #define SPI_RX_FIFO 0x188
163 #define SPI_INTR_MASK 0x18c
164 #define SPI_INTR_RX_FIFO_UNF_MASK (1 << 25)
165 #define SPI_INTR_RX_FIFO_OVF_MASK (1 << 26)
166 #define SPI_INTR_TX_FIFO_UNF_MASK (1 << 27)
167 #define SPI_INTR_TX_FIFO_OVF_MASK (1 << 28)
168 #define SPI_INTR_RDY_MASK (1 << 29)
169 #define SPI_INTR_ALL_MASK (0xfe << 25)
171 #define MAX_CHIP_SELECT 4
172 #define SPI_FIFO_DEPTH 64
173 #define DATA_DIR_TX (1 << 0)
174 #define DATA_DIR_RX (1 << 1)
176 #define SPI_DMA_TIMEOUT (msecs_to_jiffies(10000))
177 #define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
178 #define TX_FIFO_EMPTY_COUNT_MAX (0x40)
179 #define RX_FIFO_FULL_COUNT_ZERO (0)
180 #define MAX_HOLD_CYCLES 16
181 #define SPI_DEFAULT_SPEED 25000000
183 #define MAX_CHIP_SELECT 4
184 #define SPI_FIFO_DEPTH 64
185 #define SPI_FIFO_FLUSH_MAX_DELAY 2000
187 #define SPI_SPEED_TAP_DELAY_MARGIN 35000000
188 #define SPI_DEFAULT_RX_TAP_DELAY 10
189 #define SPI_POLL_TIMEOUT 10000
190 #define SPI_AUTOSUSPEND_DELAY 100 /* 100ms */
192 struct tegra_spi_chip_data {
194 bool set_rx_tap_delay;
197 struct tegra_spi_data {
199 struct spi_master *master;
206 bool clock_always_on;
208 bool boost_reg_access;
210 u32 spi_max_frequency;
214 struct spi_device *cur_spi;
217 unsigned words_per_32bit;
218 unsigned bytes_per_word;
219 unsigned curr_dma_words;
220 unsigned cur_direction;
225 unsigned dma_buf_size;
226 unsigned max_buf_size;
227 bool is_curr_dma_xfer;
229 bool transfer_in_progress;
231 struct completion rx_dma_complete;
232 struct completion tx_dma_complete;
238 unsigned long packed_size;
242 u32 def_command1_reg;
243 u32 def_command2_reg;
247 struct completion xfer_completion;
248 struct spi_transfer *curr_xfer;
249 struct dma_chan *rx_dma_chan;
251 dma_addr_t rx_dma_phys;
252 struct dma_async_tx_descriptor *rx_dma_desc;
254 struct dma_chan *tx_dma_chan;
256 dma_addr_t tx_dma_phys;
257 struct dma_async_tx_descriptor *tx_dma_desc;
258 const struct tegra_spi_chip_data *chip_data;
259 struct tegra_spi_device_controller_data cdata[MAX_CHIP_SELECT];
260 bool cs_gpio_reqstd[MAX_CHIP_SELECT];
261 struct tegra_prod_list *prod_list;
262 struct pinctrl *pinctrl;
263 struct pinctrl_state *enable_interface;
267 static int tegra_spi_runtime_suspend(struct device *dev);
268 static int tegra_spi_runtime_resume(struct device *dev);
269 static int tegra_spi_status_poll(struct tegra_spi_data *tspi);
270 static int tegra_spi_set_clock_rate(struct tegra_spi_data *tspi, u32 speed);
273 static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi,
276 return readl(tspi->base + reg);
279 static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
280 unsigned long val, unsigned long reg)
282 /* Read back register to make sure that register writes completed */
283 if ((reg == SPI_COMMAND1) && (val & SPI_PIO))
284 readl(tspi->base + SPI_COMMAND1);
286 writel(val, tspi->base + reg);
289 static inline int tegra_spi_runtime_get(struct tegra_spi_data *tspi)
291 if (!tspi->runtime_pm)
294 return pm_runtime_get_sync(tspi->dev);
297 static inline int tegra_spi_runtime_put(struct tegra_spi_data *tspi)
299 if (!tspi->runtime_pm)
302 pm_runtime_mark_last_busy(tspi->dev);
303 return pm_runtime_put_autosuspend(tspi->dev);
306 static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
310 /* Write 1 to clear status register */
311 val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
313 tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
315 /* Clear fifo status error if any */
316 tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
317 if (tspi->status_reg & SPI_ERR)
318 tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
322 static unsigned tegra_spi_calculate_curr_xfer_param(
323 struct spi_device *spi, struct tegra_spi_data *tspi,
324 struct spi_transfer *t)
326 unsigned remain_len = t->len - tspi->cur_pos;
328 unsigned bits_per_word ;
330 unsigned total_fifo_words;
332 bits_per_word = t->bits_per_word ? t->bits_per_word :
334 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
336 if ((bits_per_word == 8 || bits_per_word == 16) && (t->len > 3)) {
338 tspi->words_per_32bit = 32/bits_per_word;
341 tspi->words_per_32bit = 1;
344 if (tspi->is_packed) {
345 max_len = min(remain_len, tspi->max_buf_size);
346 tspi->curr_dma_words = max_len/tspi->bytes_per_word;
347 total_fifo_words = (max_len + 3)/4;
349 max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
350 max_word = min(max_word, tspi->max_buf_size/4);
351 tspi->curr_dma_words = max_word;
352 total_fifo_words = max_word;
354 return total_fifo_words;
357 static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
358 struct tegra_spi_data *tspi, struct spi_transfer *t)
361 unsigned tx_empty_count;
362 unsigned max_n_32bit;
365 unsigned int written_words;
366 unsigned fifo_words_left;
367 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
369 tx_empty_count = TX_FIFO_EMPTY_COUNT_MAX;
371 if (tspi->is_packed) {
372 fifo_words_left = tx_empty_count * tspi->words_per_32bit;
373 written_words = min(fifo_words_left, tspi->curr_dma_words);
374 nbytes = written_words * tspi->bytes_per_word;
375 max_n_32bit = DIV_ROUND_UP(nbytes, 4);
376 for (count = 0; count < max_n_32bit; count++) {
378 for (i = 0; (i < 4) && nbytes; i++, nbytes--)
379 x |= (*tx_buf++) << (i*8);
380 tegra_spi_writel(tspi, x, SPI_TX_FIFO);
383 max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
384 written_words = max_n_32bit;
385 nbytes = written_words * tspi->bytes_per_word;
386 for (count = 0; count < max_n_32bit; count++) {
388 for (i = 0; nbytes && (i < tspi->bytes_per_word);
390 x |= ((*tx_buf++) << i*8);
391 tegra_spi_writel(tspi, x, SPI_TX_FIFO);
394 tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
395 return written_words;
398 static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
399 struct tegra_spi_data *tspi, struct spi_transfer *t)
401 unsigned rx_full_count;
402 unsigned long fifo_status;
405 unsigned int read_words = 0;
407 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
409 fifo_status = tspi->status_reg;
410 rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
411 if (tspi->is_packed) {
412 len = tspi->curr_dma_words * tspi->bytes_per_word;
413 for (count = 0; count < rx_full_count; count++) {
414 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
415 for (i = 0; len && (i < 4); i++, len--)
416 *rx_buf++ = (x >> i*8) & 0xFF;
418 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
419 read_words += tspi->curr_dma_words;
421 unsigned int bits_per_word;
423 bits_per_word = t->bits_per_word ? t->bits_per_word :
424 tspi->cur_spi->bits_per_word;
425 for (count = 0; count < rx_full_count; count++) {
426 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
427 for (i = 0; (i < tspi->bytes_per_word); i++)
428 *rx_buf++ = (x >> (i*8)) & 0xFF;
430 tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
431 read_words += rx_full_count;
436 static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
437 struct tegra_spi_data *tspi, struct spi_transfer *t)
441 /* Make the dma buffer to read by cpu */
442 dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
443 tspi->dma_buf_size, DMA_TO_DEVICE);
445 if (tspi->is_packed) {
446 len = tspi->curr_dma_words * tspi->bytes_per_word;
447 memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
451 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
452 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
455 for (count = 0; count < tspi->curr_dma_words; count++) {
457 for (i = 0; consume && (i < tspi->bytes_per_word);
459 x |= ((*tx_buf++) << i * 8);
460 tspi->tx_dma_buf[count] = x;
463 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
465 /* Make the dma buffer to read by dma */
466 dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
467 tspi->dma_buf_size, DMA_TO_DEVICE);
470 static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
471 struct tegra_spi_data *tspi, struct spi_transfer *t)
475 /* Make the dma buffer to read by cpu */
476 dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
477 tspi->dma_buf_size, DMA_FROM_DEVICE);
479 if (tspi->is_packed) {
480 len = tspi->curr_dma_words * tspi->bytes_per_word;
481 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
485 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
487 unsigned int rx_mask, bits_per_word;
489 bits_per_word = t->bits_per_word ? t->bits_per_word :
490 tspi->cur_spi->bits_per_word;
491 rx_mask = (1ULL << bits_per_word) - 1;
492 for (count = 0; count < tspi->curr_dma_words; count++) {
493 x = tspi->rx_dma_buf[count];
495 for (i = 0; (i < tspi->bytes_per_word); i++)
496 *rx_buf++ = (x >> (i*8)) & 0xFF;
499 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
501 /* Make the dma buffer to read by dma */
502 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
503 tspi->dma_buf_size, DMA_FROM_DEVICE);
506 static void tegra_spi_dma_complete(void *args)
508 struct completion *dma_complete = args;
510 complete(dma_complete);
513 static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
515 INIT_COMPLETION(tspi->tx_dma_complete);
516 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
517 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
518 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
519 if (!tspi->tx_dma_desc) {
520 dev_err(tspi->dev, "Not able to get desc for Tx\n");
524 tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
525 tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
527 dmaengine_submit(tspi->tx_dma_desc);
528 dma_async_issue_pending(tspi->tx_dma_chan);
532 static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
534 INIT_COMPLETION(tspi->rx_dma_complete);
535 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
536 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
537 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
538 if (!tspi->rx_dma_desc) {
539 dev_err(tspi->dev, "Not able to get desc for Rx\n");
543 tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
544 tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
546 dmaengine_submit(tspi->rx_dma_desc);
547 dma_async_issue_pending(tspi->rx_dma_chan);
551 static int check_and_clear_fifo(struct tegra_spi_data *tspi)
553 unsigned long status = tspi->status_reg;
554 int cnt = SPI_FIFO_FLUSH_MAX_DELAY;
556 /* Make sure that Rx and Tx fifo are empty */
557 if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
559 status |= (SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH);
560 tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
562 status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
563 if ((status & SPI_FIFO_EMPTY) == SPI_FIFO_EMPTY)
568 "Rx/Tx fifo are not empty status 0x%08lx\n", status);
574 static int tegra_spi_start_dma_based_transfer(
575 struct tegra_spi_data *tspi, struct spi_transfer *t)
577 unsigned long val, cmd1;
580 int ret = 0, maxburst;
581 struct dma_slave_config dma_sconfig;
584 /* Make sure that Rx and Tx fifo are empty */
585 ret = check_and_clear_fifo(tspi);
589 val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
590 tegra_spi_writel(tspi, val, SPI_DMA_BLK);
593 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
596 len = tspi->curr_dma_words * 4;
598 /* Set attention level based on length of transfer */
600 val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
602 } else if (((len) >> 4) & 0x1) {
603 val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
606 val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
610 if (!tspi->chip_data->intr_mask_reg) {
611 if (!tspi->polling_mode) {
612 if (tspi->cur_direction & DATA_DIR_TX)
614 if (tspi->cur_direction & DATA_DIR_RX)
619 tegra_spi_writel(tspi, val, SPI_DMA_CTL);
620 tspi->dma_control_reg = val;
622 if (tspi->cur_direction & DATA_DIR_TX) {
623 dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
624 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
625 dma_sconfig.dst_maxburst = maxburst;
626 dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
628 tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
629 ret = tegra_spi_start_tx_dma(tspi, len);
632 "Starting tx dma failed, err %d\n", ret);
637 if (tspi->cur_direction & DATA_DIR_RX) {
638 /* Make the dma buffer to read by dma */
639 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
640 tspi->dma_buf_size, DMA_FROM_DEVICE);
641 dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
642 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
643 dma_sconfig.src_maxburst = maxburst;
644 dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
646 ret = tegra_spi_start_rx_dma(tspi, len);
649 "Starting rx dma failed, err %d\n", ret);
650 if (tspi->cur_direction & DATA_DIR_TX)
651 dmaengine_terminate_all(tspi->tx_dma_chan);
656 if (tspi->boost_reg_access) {
657 speed = t->speed_hz ? t->speed_hz :
658 tspi->cur_spi->max_speed_hz;
659 ret = tegra_spi_set_clock_rate(tspi, speed);
664 spin_lock_irqsave(&tspi->lock, flags);
665 cmd1 = tspi->command1_reg;
666 if (tspi->cur_direction & DATA_DIR_TX)
668 if (tspi->cur_direction & DATA_DIR_RX)
670 tegra_spi_writel(tspi, cmd1, SPI_COMMAND1);
672 tspi->is_curr_dma_xfer = true;
673 tspi->dma_control_reg = val;
675 tspi->transfer_in_progress = true;
677 tegra_spi_writel(tspi, val, SPI_DMA_CTL);
678 spin_unlock_irqrestore(&tspi->lock, flags);
682 static int tegra_spi_start_cpu_based_transfer(
683 struct tegra_spi_data *tspi, struct spi_transfer *t)
691 ret = check_and_clear_fifo(tspi);
695 if (tspi->cur_direction & DATA_DIR_TX)
696 cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
698 cur_words = tspi->curr_dma_words;
700 val = SPI_DMA_BLK_SET(cur_words - 1);
701 tegra_spi_writel(tspi, val, SPI_DMA_BLK);
705 if (!tspi->chip_data->intr_mask_reg) {
706 if (!tspi->polling_mode) {
707 if (tspi->cur_direction & DATA_DIR_TX)
709 if (tspi->cur_direction & DATA_DIR_RX)
712 tegra_spi_writel(tspi, val, SPI_DMA_CTL);
715 tspi->dma_control_reg = val;
717 if (tspi->boost_reg_access) {
718 speed = t->speed_hz ? t->speed_hz :
719 tspi->cur_spi->max_speed_hz;
720 ret = tegra_spi_set_clock_rate(tspi, speed);
725 spin_lock_irqsave(&tspi->lock, flags);
726 tspi->is_curr_dma_xfer = false;
727 val = tspi->command1_reg;
728 if (tspi->cur_direction & DATA_DIR_TX)
730 if (tspi->cur_direction & DATA_DIR_RX)
733 tspi->transfer_in_progress = true;
735 tegra_spi_writel(tspi, val, SPI_COMMAND1);
736 spin_unlock_irqrestore(&tspi->lock, flags);
740 static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
743 struct dma_chan *dma_chan;
747 struct dma_slave_config dma_sconfig;
749 dma_chan = dma_request_slave_channel_reason(tspi->dev,
750 dma_to_memory ? "rx" : "tx");
751 if (IS_ERR(dma_chan)) {
752 ret = PTR_ERR(dma_chan);
753 if (ret != -EPROBE_DEFER)
755 "Dma channel is not available: %d\n", ret);
759 dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
760 &dma_phys, GFP_KERNEL);
762 dev_err(tspi->dev, "Not able to allocate the dma buffer\n");
763 dma_release_channel(dma_chan);
768 dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
769 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
770 dma_sconfig.src_maxburst = 0;
772 dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
773 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
774 dma_sconfig.dst_maxburst = 0;
777 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
781 tspi->rx_dma_chan = dma_chan;
782 tspi->rx_dma_buf = dma_buf;
783 tspi->rx_dma_phys = dma_phys;
785 tspi->tx_dma_chan = dma_chan;
786 tspi->tx_dma_buf = dma_buf;
787 tspi->tx_dma_phys = dma_phys;
792 dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
793 dma_release_channel(dma_chan);
797 static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
802 struct dma_chan *dma_chan;
805 dma_buf = tspi->rx_dma_buf;
806 dma_chan = tspi->rx_dma_chan;
807 dma_phys = tspi->rx_dma_phys;
808 tspi->rx_dma_chan = NULL;
809 tspi->rx_dma_buf = NULL;
811 dma_buf = tspi->tx_dma_buf;
812 dma_chan = tspi->tx_dma_chan;
813 dma_phys = tspi->tx_dma_phys;
814 tspi->tx_dma_buf = NULL;
815 tspi->tx_dma_chan = NULL;
820 dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
821 dma_release_channel(dma_chan);
824 static void set_best_clk_source(struct tegra_spi_data *tspi,
828 unsigned long err_rate, crate, prate;
829 unsigned int cdiv, fin_err = rate;
831 struct clk *pclk, *fpclk = NULL;
832 const char *pclk_name, *fpclk_name;
833 struct device_node *node;
834 struct property *prop;
836 node = tspi->master->dev.of_node;
837 if (!of_property_count_strings(node, "nvidia,clk-parents"))
840 /* when parent of a clk changes divider is not changed
841 * set a min div with which clk will not cross max rate
843 if (!tspi->min_div) {
844 of_property_for_each_string(node, "nvidia,clk-parents",
846 pclk = clk_get(tspi->dev, pclk_name);
849 prate = clk_get_rate(pclk);
850 crate = tspi->spi_max_frequency;
851 cdiv = DIV_ROUND_UP(prate, crate);
852 if (cdiv > tspi->min_div)
853 tspi->min_div = cdiv;
857 pclk = clk_get_parent(tspi->clk);
858 crate = clk_get_rate(tspi->clk);
859 prate = clk_get_rate(pclk);
860 cdiv = DIV_ROUND_UP(prate, crate);
861 if (cdiv < tspi->min_div) {
862 crate = DIV_ROUND_UP(prate, tspi->min_div);
863 clk_set_rate(tspi->clk, crate);
866 of_property_for_each_string(node, "nvidia,clk-parents",
868 pclk = clk_get(tspi->dev, pclk_name);
872 ret = clk_set_parent(tspi->clk, pclk);
875 "Error in setting parent clk src %s\n",
880 new_rate = clk_round_rate(tspi->clk, rate);
884 err_rate = abs(new_rate - rate);
885 if (err_rate < fin_err) {
888 fpclk_name = pclk_name;
893 dev_dbg(tspi->dev, "Setting clk_src %s\n",
895 clk_set_parent(tspi->clk, fpclk);
899 static int tegra_spi_set_clock_rate(struct tegra_spi_data *tspi, u32 speed)
903 if (speed == tspi->cur_speed)
905 set_best_clk_source(tspi, speed);
906 ret = clk_set_rate(tspi->clk, speed);
908 dev_err(tspi->dev, "Failed to set clk freq %d\n", ret);
911 tspi->cur_speed = speed;
915 static int tegra_spi_start_transfer_one(struct spi_device *spi,
916 struct spi_transfer *t, bool is_first_of_msg,
919 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
921 u32 spi_cs_timing2 = 0;
923 unsigned total_fifo_words;
925 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
926 unsigned long command1;
930 bits_per_word = t->bits_per_word;
931 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
932 /* set max clock for faster register access */
933 if (tspi->boost_reg_access)
934 ret = tegra_spi_set_clock_rate(tspi, tspi->spi_max_frequency);
936 ret = tegra_spi_set_clock_rate(tspi, speed);
942 tspi->cur_rx_pos = 0;
943 tspi->cur_tx_pos = 0;
947 total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
949 /* Check that the all words are available */
950 if (t->len % tspi->bytes_per_word != 0)
953 if (is_first_of_msg) {
954 tegra_spi_clear_status(tspi);
956 command1 = tspi->def_command1_reg;
957 command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
959 command1 &= ~SPI_CONTROL_MODE_MASK;
960 req_mode = spi->mode & 0x3;
961 if (req_mode == SPI_MODE_0)
962 command1 |= SPI_CONTROL_MODE_0;
963 else if (req_mode == SPI_MODE_1)
964 command1 |= SPI_CONTROL_MODE_1;
965 else if (req_mode == SPI_MODE_2)
966 command1 |= SPI_CONTROL_MODE_2;
967 else if (req_mode == SPI_MODE_3)
968 command1 |= SPI_CONTROL_MODE_3;
970 /* Apply mode setting before switching chip select */
971 if (SPI_MODE_VAL(command1) !=
972 SPI_MODE_VAL(tspi->def_command1_reg))
973 tegra_spi_writel(tspi, command1, SPI_COMMAND1);
975 /* possibly use the hw based chip select */
976 tspi->is_hw_based_cs = false;
977 if (cdata && cdata->is_hw_based_cs && is_single_xfer &&
978 ((tspi->curr_dma_words * tspi->bytes_per_word) ==
979 (t->len - tspi->cur_pos))) {
985 set_count = min(cdata->cs_setup_clk_count, 16);
989 hold_count = min(cdata->cs_hold_clk_count, 16);
993 spi_cs_setup = SPI_SETUP_HOLD(set_count,
995 spi_cs_timing = tspi->spi_cs_timing;
996 spi_cs_timing = SPI_CS_SETUP_HOLD(spi_cs_timing,
999 tspi->spi_cs_timing = spi_cs_timing;
1000 tegra_spi_writel(tspi, spi_cs_timing,
1002 tspi->is_hw_based_cs = true;
1005 if (cdata && cdata->cs_inactive_cycles) {
1006 u32 inactive_cycles;
1008 SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing2,
1011 inactive_cycles = min(cdata->cs_inactive_cycles, 32);
1012 SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing2,
1015 tegra_spi_writel(tspi, spi_cs_timing2, SPI_CS_TIMING2);
1016 tspi->is_hw_based_cs = true;
1018 SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing2,
1019 spi->chip_select, 1);
1020 SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing2,
1021 spi->chip_select, 0);
1022 tegra_spi_writel(tspi, spi_cs_timing2, SPI_CS_TIMING2);
1025 if (!tspi->is_hw_based_cs) {
1026 command1 |= SPI_CS_SW_HW;
1027 if (spi->mode & SPI_CS_HIGH)
1028 command1 |= SPI_CS_SS_VAL;
1030 command1 &= ~SPI_CS_SS_VAL;
1032 command1 &= ~SPI_CS_SW_HW;
1033 command1 &= ~SPI_CS_SS_VAL;
1036 if (cdata && gpio_is_valid(cdata->cs_gpio)) {
1038 if (spi->mode & SPI_CS_HIGH)
1040 gpio_set_value(cdata->cs_gpio, gval);
1043 if (tspi->prod_list) {
1044 sprintf(prod_name, "prod_c_cs%d", spi->chip_select);
1045 tegra_prod_set_by_name(&tspi->base, prod_name,
1047 tegra_prod_set_by_name(&tspi->base, "prod",
1051 command2_reg = tspi->def_command2_reg;
1052 if (tspi->chip_data->set_rx_tap_delay) {
1053 if (speed > SPI_SPEED_TAP_DELAY_MARGIN) {
1054 command2_reg = command2_reg &
1055 (~SPI_RX_TAP_DELAY(63));
1056 command2_reg = command2_reg |
1058 SPI_DEFAULT_RX_TAP_DELAY);
1061 if (command2_reg != tspi->def_command2_reg)
1062 tegra_spi_writel(tspi, command2_reg,
1066 command1 = tspi->command1_reg;
1067 command1 &= ~SPI_BIT_LENGTH(~0);
1068 command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
1071 if (spi->mode & SPI_LSBYTE_FIRST)
1072 command1 |= SPI_LSBYTE_FE;
1074 command1 &= ~SPI_LSBYTE_FE;
1076 if (spi->mode & SPI_LSB_FIRST)
1077 command1 |= SPI_LSBIT_FE;
1079 command1 &= ~SPI_LSBIT_FE;
1081 if (spi->mode & SPI_3WIRE)
1082 command1 |= SPI_BIDIROE;
1084 command1 &= ~SPI_BIDIROE;
1086 command1 &= ~SPI_BOTH_EN_BIT;
1087 if ((t->rx_nbits == SPI_NBITS_DUAL) ||
1088 (t->tx_nbits == SPI_NBITS_DUAL))
1089 command1 |= SPI_BOTH_EN_BIT;
1091 if (tspi->is_packed)
1092 command1 |= SPI_PACKED;
1094 command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
1095 tspi->cur_direction = 0;
1097 tspi->cur_direction |= DATA_DIR_RX;
1099 tspi->cur_direction |= DATA_DIR_TX;
1101 command1 |= SPI_CS_SEL(spi->chip_select);
1102 tspi->command1_reg = command1;
1104 dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n",
1105 tspi->def_command1_reg, command1);
1107 tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1109 if (total_fifo_words > SPI_FIFO_DEPTH)
1110 ret = tegra_spi_start_dma_based_transfer(tspi, t);
1112 ret = tegra_spi_start_cpu_based_transfer(tspi, t);
1116 static struct tegra_spi_device_controller_data
1117 *tegra_spi_get_cdata_dt(struct spi_device *spi,
1118 struct tegra_spi_data *tspi)
1120 struct tegra_spi_device_controller_data *cdata;
1121 struct device_node *slave_np, *data_np;
1124 slave_np = spi->dev.of_node;
1126 dev_dbg(&spi->dev, "device node not found\n");
1130 data_np = of_get_child_by_name(slave_np, "controller-data");
1132 dev_dbg(&spi->dev, "child node 'controller-data' not found\n");
1136 cdata = &tspi->cdata[spi->chip_select];
1137 memset(cdata, 0, sizeof(*cdata));
1139 ret = of_property_read_bool(data_np, "nvidia,enable-hw-based-cs");
1141 cdata->is_hw_based_cs = 1;
1143 of_property_read_u32(data_np, "nvidia,cs-setup-clk-count",
1144 &cdata->cs_setup_clk_count);
1145 of_property_read_u32(data_np, "nvidia,cs-hold-clk-count",
1146 &cdata->cs_hold_clk_count);
1147 of_property_read_u32(data_np, "nvidia,rx-clk-tap-delay",
1148 &cdata->rx_clk_tap_delay);
1149 of_property_read_u32(data_np, "nvidia,tx-clk-tap-delay",
1150 &cdata->tx_clk_tap_delay);
1151 of_property_read_u32(data_np, "nvidia,cs-inactive-cycles",
1152 &cdata->cs_inactive_cycles);
1153 of_property_read_u32(data_np, "nvidia,clk-delay-between-packets",
1154 &cdata->clk_delay_between_packets);
1156 if (cdata->cs_inactive_cycles && cdata->clk_delay_between_packets) {
1158 "CS inactive time and packet delay cannot coexist\n");
1162 if (cdata->clk_delay_between_packets)
1163 cdata->cs_inactive_cycles = cdata->clk_delay_between_packets;
1165 cdata->cs_gpio = -EINVAL;
1166 if (of_find_property(data_np, "nvidia,chipselect-gpio", NULL))
1167 cdata->cs_gpio = of_get_named_gpio(data_np,
1168 "nvidia,chipselect-gpio", 0);
1169 if ((cdata->cs_gpio < 0) && (cdata->cs_gpio != -EINVAL)) {
1171 "CS GPIO is not found on node %s: %d\n",
1172 data_np->name, cdata->cs_gpio);
1175 if ((cdata->cs_gpio < 0) && (cdata->clk_delay_between_packets)) {
1177 "CS packet delay requires gpio chip select\n");
1181 of_node_put(data_np);
1185 static int tegra_spi_setup(struct spi_device *spi)
1187 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
1188 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
1190 unsigned long flags;
1191 unsigned long intr_mask;
1193 unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
1194 SPI_CS_POL_INACTIVE_0,
1195 SPI_CS_POL_INACTIVE_1,
1196 SPI_CS_POL_INACTIVE_2,
1197 SPI_CS_POL_INACTIVE_3,
1200 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
1202 spi->mode & SPI_CPOL ? "" : "~",
1203 spi->mode & SPI_CPHA ? "" : "~",
1207 cdata = tegra_spi_get_cdata_dt(spi, tspi);
1208 spi->controller_data = cdata;
1211 if (cdata->clk_delay_between_packets)
1212 cdata->cs_inactive_cycles =
1213 cdata->clk_delay_between_packets;
1215 /* Set speed to the spi max fequency if spi device has not set */
1216 spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
1218 ret = tegra_spi_runtime_get(tspi);
1220 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
1224 if (cdata && gpio_is_valid(cdata->cs_gpio)) {
1225 if (!tspi->cs_gpio_reqstd[spi->chip_select]) {
1226 int gpio_flag = GPIOF_OUT_INIT_HIGH;
1227 if (spi->mode & SPI_CS_HIGH)
1228 gpio_flag = GPIOF_OUT_INIT_LOW;
1230 ret = devm_gpio_request_one(tspi->dev, cdata->cs_gpio,
1231 gpio_flag, "cs_gpio");
1234 "GPIO request failed: %d\n", ret);
1237 tspi->cs_gpio_reqstd[spi->chip_select] = true;
1239 int val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1240 gpio_set_value(cdata->cs_gpio, val);
1244 if (tspi->chip_data->intr_mask_reg) {
1245 intr_mask = tegra_spi_readl(tspi, SPI_INTR_MASK);
1246 if (!tspi->polling_mode)
1247 intr_mask &= ~(SPI_INTR_ALL_MASK);
1249 intr_mask |= SPI_INTR_ALL_MASK;
1250 tegra_spi_writel(tspi, intr_mask, SPI_INTR_MASK);
1252 spin_lock_irqsave(&tspi->lock, flags);
1253 val = tspi->def_command1_reg;
1254 if (spi->mode & SPI_CS_HIGH)
1255 val &= ~cs_pol_bit[spi->chip_select];
1257 val |= cs_pol_bit[spi->chip_select];
1258 if (tspi->def_chip_select == spi->chip_select)
1259 val |= SPI_MODE_SEL(spi->mode & 0x3);
1261 tspi->def_command1_reg = val;
1262 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1263 spin_unlock_irqrestore(&tspi->lock, flags);
1265 tegra_spi_runtime_put(tspi);
1269 static int tegra_spi_cs_low(struct spi_device *spi,
1272 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
1273 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
1276 unsigned long flags;
1277 unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
1278 SPI_CS_POL_INACTIVE_0,
1279 SPI_CS_POL_INACTIVE_1,
1280 SPI_CS_POL_INACTIVE_2,
1281 SPI_CS_POL_INACTIVE_3,
1284 ret = tegra_spi_runtime_get(tspi);
1286 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
1290 if (cdata && gpio_is_valid(cdata->cs_gpio))
1291 gpio_set_value(cdata->cs_gpio, 0);
1293 spin_lock_irqsave(&tspi->lock, flags);
1294 if (!(spi->mode & SPI_CS_HIGH)) {
1295 val = tegra_spi_readl(tspi, SPI_COMMAND1);
1297 val &= ~cs_pol_bit[spi->chip_select];
1299 val |= cs_pol_bit[spi->chip_select];
1300 tegra_spi_writel(tspi, val, SPI_COMMAND1);
1303 spin_unlock_irqrestore(&tspi->lock, flags);
1304 tegra_spi_runtime_put(tspi);
1309 static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
1312 u32 fifo_status_reg;
1314 u32 trans_status_reg;
1316 command1_reg = tegra_spi_readl(tspi, SPI_COMMAND1);
1317 fifo_status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1318 dma_ctrl_reg = tegra_spi_readl(tspi, SPI_DMA_CTL);
1319 trans_status_reg = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
1322 "SPI_ERR: CMD_0: 0x%08x, FIFO_STS: 0x%08x\n",
1323 command1_reg, fifo_status_reg);
1325 "SPI_ERR: DMA_CTL: 0x%08x, TRANS_STS: 0x%08x\n",
1326 dma_ctrl_reg, trans_status_reg);
1329 static int tegra_spi_wait_on_message_xfer(struct tegra_spi_data *tspi)
1333 if (tspi->polling_mode)
1334 ret = tegra_spi_status_poll(tspi);
1336 ret = wait_for_completion_timeout(&tspi->xfer_completion,
1338 if (WARN_ON(ret == 0)) {
1340 "spi trasfer timeout, err %d\n", ret);
1341 tegra_spi_dump_regs(tspi);
1342 tegra_periph_reset_assert(tspi->clk);
1344 tegra_periph_reset_deassert(tspi->clk);
1345 if (tspi->is_curr_dma_xfer &&
1346 (tspi->cur_direction & DATA_DIR_TX))
1347 dmaengine_terminate_all(tspi->tx_dma_chan);
1348 if (tspi->is_curr_dma_xfer &&
1349 (tspi->cur_direction & DATA_DIR_RX))
1350 dmaengine_terminate_all(tspi->rx_dma_chan);
1354 if (tspi->tx_status || tspi->rx_status) {
1355 dev_err(tspi->dev, "Error in Transfer\n");
1356 tegra_spi_dump_regs(tspi);
1357 check_and_clear_fifo(tspi);
1364 static int tegra_spi_wait_remain_message(struct tegra_spi_data *tspi,
1365 struct spi_transfer *xfer)
1367 unsigned total_fifo_words;
1370 INIT_COMPLETION(tspi->xfer_completion);
1372 if (tspi->is_curr_dma_xfer) {
1373 total_fifo_words = tegra_spi_calculate_curr_xfer_param(
1374 tspi->cur_spi, tspi, xfer);
1375 if (total_fifo_words > SPI_FIFO_DEPTH)
1376 ret = tegra_spi_start_dma_based_transfer(tspi, xfer);
1378 ret = tegra_spi_start_cpu_based_transfer(tspi, xfer);
1380 tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, xfer);
1381 tegra_spi_start_cpu_based_transfer(tspi, xfer);
1384 ret = tegra_spi_wait_on_message_xfer(tspi);
1389 static int tegra_spi_handle_message(struct tegra_spi_data *tspi,
1390 struct spi_transfer *xfer)
1395 if (tspi->boost_reg_access) {
1396 /* set max clock for faster register access */
1397 ret = tegra_spi_set_clock_rate(tspi, tspi->spi_max_frequency);
1402 if (!tspi->is_curr_dma_xfer) {
1403 if (tspi->cur_direction & DATA_DIR_RX)
1404 tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, xfer);
1405 if (tspi->cur_direction & DATA_DIR_TX)
1406 tspi->cur_pos = tspi->cur_tx_pos;
1407 else if (tspi->cur_direction & DATA_DIR_RX)
1408 tspi->cur_pos = tspi->cur_rx_pos;
1412 if (tspi->cur_direction & DATA_DIR_TX) {
1413 wait_status = wait_for_completion_interruptible_timeout(
1414 &tspi->tx_dma_complete,
1416 if (wait_status <= 0) {
1417 dmaengine_terminate_all(tspi->tx_dma_chan);
1418 dev_err(tspi->dev, "TxDma Xfer failed, wait_status - %ld\n",
1420 tegra_spi_dump_regs(tspi);
1421 tegra_periph_reset_assert(tspi->clk);
1423 tegra_periph_reset_deassert(tspi->clk);
1428 if (tspi->cur_direction & DATA_DIR_RX) {
1429 wait_status = wait_for_completion_interruptible_timeout(
1430 &tspi->rx_dma_complete,
1432 if (wait_status <= 0) {
1433 dmaengine_terminate_all(tspi->rx_dma_chan);
1435 "RxDma Xfer failed, wait_status - %ld\n",
1437 tegra_spi_dump_regs(tspi);
1438 tegra_periph_reset_assert(tspi->clk);
1440 tegra_periph_reset_deassert(tspi->clk);
1445 if (tspi->cur_direction & DATA_DIR_RX)
1446 tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, xfer);
1448 if (tspi->cur_direction & DATA_DIR_TX)
1449 tspi->cur_pos = tspi->cur_tx_pos;
1451 tspi->cur_pos = tspi->cur_rx_pos;
1457 static int tegra_spi_transfer_one_message(struct spi_master *master,
1458 struct spi_message *msg)
1460 bool is_first_msg = true;
1461 bool is_new_msg = true;
1463 struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1464 struct spi_transfer *xfer;
1465 struct spi_device *spi = msg->spi;
1466 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
1471 msg->actual_length = 0;
1473 if (spi->mode & SPI_CS_HIGH)
1476 ret = tegra_spi_runtime_get(tspi);
1478 dev_err(tspi->dev, "runtime PM get failed: %d\n", ret);
1480 spi_finalize_current_message(master);
1484 single_xfer = list_is_singular(&msg->transfers);
1485 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1488 INIT_COMPLETION(tspi->xfer_completion);
1489 ret = tegra_spi_start_transfer_one(spi, xfer,
1490 is_first_msg, single_xfer);
1493 "spi cannot start transfer,err %d\n",
1497 is_first_msg = false;
1499 ret = tegra_spi_wait_on_message_xfer(tspi);
1502 ret = tegra_spi_handle_message(tspi, xfer);
1505 if (tspi->cur_pos == xfer->len) {
1510 ret = tegra_spi_wait_remain_message(tspi, xfer);
1513 ret = tegra_spi_handle_message(tspi, xfer);
1516 if (tspi->cur_pos == xfer->len) {
1521 } /* End of while */
1522 msg->actual_length += xfer->len;
1523 if (xfer->cs_change && xfer->delay_usecs) {
1524 tegra_spi_writel(tspi, tspi->def_command1_reg,
1527 if (cdata && gpio_is_valid(cdata->cs_gpio))
1528 gpio_set_value(cdata->cs_gpio, gval);
1530 udelay(xfer->delay_usecs);
1532 if (cdata && gpio_is_valid(cdata->cs_gpio))
1533 gpio_set_value(cdata->cs_gpio, !gval);
1538 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1539 if (cdata && gpio_is_valid(cdata->cs_gpio))
1540 gpio_set_value(cdata->cs_gpio, gval);
1542 tegra_spi_runtime_put(tspi);
1544 spi_finalize_current_message(master);
1548 static void handle_cpu_based_err_xfer(struct tegra_spi_data *tspi)
1550 unsigned long flags;
1552 spin_lock_irqsave(&tspi->lock, flags);
1553 if (tspi->tx_status || tspi->rx_status) {
1554 dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
1557 if (tspi->status_reg & SPI_TX_FIFO_UNF)
1558 dev_err(tspi->dev, "CpuXfer: TX FIFO UNDERRUN ERROR\n");
1560 if (tspi->status_reg & SPI_TX_FIFO_OVF)
1561 dev_err(tspi->dev, "CpuXfer: TX FIFO OVERFLOW ERROR\n");
1563 if (tspi->status_reg & SPI_RX_FIFO_UNF)
1564 dev_err(tspi->dev, "CpuXfer: RX FIFO UNDERRUN ERROR\n");
1566 if (tspi->status_reg & SPI_RX_FIFO_OVF)
1567 dev_err(tspi->dev, "CpuXfer: RX FIFO OVERFLOW ERROR\n");
1569 dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
1570 tspi->command1_reg, tspi->dma_control_reg);
1571 tegra_spi_dump_regs(tspi);
1572 tegra_periph_reset_assert(tspi->clk);
1574 tegra_periph_reset_deassert(tspi->clk);
1576 spin_unlock_irqrestore(&tspi->lock, flags);
1579 static void handle_dma_based_err_xfer(struct tegra_spi_data *tspi)
1582 unsigned long flags;
1584 spin_lock_irqsave(&tspi->lock, flags);
1585 /* Abort dmas if any error */
1586 if (tspi->cur_direction & DATA_DIR_TX) {
1587 if (tspi->tx_status) {
1588 dmaengine_terminate_all(tspi->tx_dma_chan);
1593 if (tspi->cur_direction & DATA_DIR_RX) {
1594 if (tspi->rx_status) {
1595 dmaengine_terminate_all(tspi->rx_dma_chan);
1601 dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
1604 if (tspi->status_reg & SPI_TX_FIFO_UNF)
1605 dev_err(tspi->dev, "DmaXfer: TX FIFO UNDERRUN ERROR\n");
1607 if (tspi->status_reg & SPI_TX_FIFO_OVF)
1608 dev_err(tspi->dev, "DmaXfer: TX FIFO OVERFLOW ERROR\n");
1610 if (tspi->status_reg & SPI_RX_FIFO_UNF)
1611 dev_err(tspi->dev, "DmaXfer: RX FIFO UNDERRUN ERROR\n");
1613 if (tspi->status_reg & SPI_RX_FIFO_OVF)
1614 dev_err(tspi->dev, "DmaXfer: RX FIFO OVERFLOW ERROR\n");
1616 dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
1617 tspi->command1_reg, tspi->dma_control_reg);
1618 tegra_spi_dump_regs(tspi);
1619 tegra_periph_reset_assert(tspi->clk);
1621 tegra_periph_reset_deassert(tspi->clk);
1623 spin_unlock_irqrestore(&tspi->lock, flags);
1626 static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1628 struct tegra_spi_data *tspi = context_data;
1630 if (tspi->polling_mode)
1631 dev_warn(tspi->dev, "interrupt raised in polling mode\n");
1633 tegra_spi_clear_status(tspi);
1634 if (!tspi->transfer_in_progress) {
1635 dev_err(tspi->dev, "spurious interrupt, status_reg = 0x%x\n",
1639 tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1640 if (tspi->cur_direction & SPI_TX_EN)
1641 tspi->tx_status = tspi->status_reg &
1642 (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
1644 if (tspi->cur_direction & SPI_RX_EN)
1645 tspi->rx_status = tspi->status_reg &
1646 (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
1648 if (!tspi->is_curr_dma_xfer)
1649 handle_cpu_based_err_xfer(tspi);
1651 handle_dma_based_err_xfer(tspi);
1653 tspi->transfer_in_progress = false;
1654 complete(&tspi->xfer_completion);
1658 static int tegra_spi_status_poll(struct tegra_spi_data *tspi)
1660 unsigned int status;
1661 unsigned long timeout;
1663 timeout = SPI_POLL_TIMEOUT;
1665 * Read register would take between 1~3us and 1us delay added in loop
1666 * Calculate timeout taking this into consideration
1669 status = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
1670 if (status & SPI_RDY)
1677 dev_err(tspi->dev, "transfer timeout (polling)\n");
1681 tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1682 if (tspi->cur_direction & DATA_DIR_TX)
1683 tspi->tx_status = tspi->status_reg &
1684 (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
1686 if (tspi->cur_direction & DATA_DIR_RX)
1687 tspi->rx_status = tspi->status_reg &
1688 (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
1690 if (!(tspi->cur_direction & DATA_DIR_TX) &&
1691 !(tspi->cur_direction & DATA_DIR_RX))
1692 dev_err(tspi->dev, "spurious interrupt, status_reg = 0x%x\n",
1695 tegra_spi_clear_status(tspi);
1697 if (!tspi->is_curr_dma_xfer)
1698 handle_cpu_based_err_xfer(tspi);
1700 handle_dma_based_err_xfer(tspi);
1705 static struct tegra_spi_platform_data *tegra_spi_parse_dt(
1706 struct platform_device *pdev)
1708 struct tegra_spi_platform_data *pdata;
1709 const unsigned int *prop;
1710 struct device_node *np = pdev->dev.of_node;
1711 struct device_node *nc = NULL;
1712 struct device_node *found_nc = NULL;
1717 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1719 dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
1723 prop = of_get_property(np, "spi-max-frequency", NULL);
1725 pdata->spi_max_frequency = be32_to_cpup(prop);
1727 if (of_find_property(np, "nvidia,clock-always-on", NULL))
1728 pdata->is_clkon_always = true;
1730 if (of_find_property(np, "nvidia,polling-mode", NULL))
1731 pdata->is_polling_mode = true;
1733 if (of_find_property(np, "nvidia,boost-reg-access", NULL))
1734 pdata->boost_reg_access = true;
1736 if (of_find_property(np, "nvidia,disable-runtime-pm", NULL))
1737 pdata->runtime_pm = false;
1739 pdata->runtime_pm = true;
1741 ret = of_property_read_u32(np, "nvidia,maximum-dma-buffer-size", &pval);
1743 pdata->max_dma_buffer_size = pval;
1745 /* when no client is defined, default chipselect is zero */
1746 pdata->def_chip_select = 0;
1749 * Last child node or first node which has property as default-cs will
1750 * become the default.
1752 for_each_available_child_of_node(np, nc) {
1754 ret = of_property_read_bool(nc, "nvidia,default-chipselect");
1759 prop = of_get_property(found_nc, "reg", &len);
1760 if (!prop || len < sizeof(*prop))
1761 dev_err(&pdev->dev, "%s has no reg property\n",
1762 found_nc->full_name);
1764 pdata->def_chip_select = be32_to_cpup(prop);
1770 static struct tegra_spi_chip_data tegra114_spi_chip_data = {
1771 .intr_mask_reg = false,
1772 .set_rx_tap_delay = false,
1775 static struct tegra_spi_chip_data tegra124_spi_chip_data = {
1776 .intr_mask_reg = false,
1777 .set_rx_tap_delay = true,
1780 static struct tegra_spi_chip_data tegra210_spi_chip_data = {
1781 .intr_mask_reg = true,
1782 .set_rx_tap_delay = false,
1785 static struct of_device_id tegra_spi_of_match[] = {
1787 .compatible = "nvidia,tegra114-spi",
1788 .data = &tegra114_spi_chip_data,
1790 .compatible = "nvidia,tegra124-spi",
1791 .data = &tegra124_spi_chip_data,
1793 .compatible = "nvidia,tegra210-spi",
1794 .data = &tegra210_spi_chip_data,
1796 .compatible = "nvidia,tegra186-spi",
1797 .data = &tegra210_spi_chip_data,
1801 MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
1803 static int tegra_spi_probe(struct platform_device *pdev)
1805 struct spi_master *master;
1806 struct tegra_spi_data *tspi;
1808 struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
1809 const struct of_device_id *match;
1810 const struct tegra_spi_chip_data *chip_data = &tegra114_spi_chip_data;
1814 if (pdev->dev.of_node) {
1815 bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1817 dev_warn(&pdev->dev,
1818 "Dynamic bus number will be registerd\n");
1825 if (!pdata && pdev->dev.of_node)
1826 pdata = tegra_spi_parse_dt(pdev);
1829 dev_err(&pdev->dev, "No platform data, exiting\n");
1833 if (!pdata->spi_max_frequency)
1834 pdata->spi_max_frequency = 25000000; /* 25MHz */
1836 master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1838 dev_err(&pdev->dev, "master allocation failed\n");
1842 /* the spi->mode bits understood by this driver: */
1843 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
1844 SPI_TX_DUAL | SPI_RX_DUAL;
1845 /* supported bpw 4-32 */
1846 master->bits_per_word_mask = (u32) ~(BIT(0)|BIT(1)|BIT(2));
1847 master->setup = tegra_spi_setup;
1848 master->transfer_one_message = tegra_spi_transfer_one_message;
1849 master->num_chipselect = MAX_CHIP_SELECT;
1850 master->bus_num = bus_num;
1851 master->spi_cs_low = tegra_spi_cs_low;
1853 dev_set_drvdata(&pdev->dev, master);
1854 tspi = spi_master_get_devdata(master);
1855 tspi->master = master;
1856 tspi->clock_always_on = pdata->is_clkon_always;
1857 tspi->polling_mode = pdata->is_polling_mode;
1858 tspi->boost_reg_access = pdata->boost_reg_access;
1859 tspi->runtime_pm = pdata->runtime_pm;
1860 if (!tspi->runtime_pm)
1861 tspi->clock_always_on = true;
1862 tspi->def_chip_select = pdata->def_chip_select;
1863 tspi->dev = &pdev->dev;
1865 if (pdev->dev.of_node) {
1866 match = of_match_device(tegra_spi_of_match,
1869 chip_data = match->data;
1871 tspi->chip_data = chip_data;
1873 tspi->prod_list = tegra_prod_get(&pdev->dev, NULL);
1874 if (IS_ERR(tspi->prod_list)) {
1875 dev_err(&pdev->dev, "Prod settings list not initialized\n");
1876 tspi->prod_list = NULL;
1879 spin_lock_init(&tspi->lock);
1881 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1883 dev_err(&pdev->dev, "No IO memory resource\n");
1885 goto exit_free_master;
1887 tspi->phys = r->start;
1888 tspi->base = devm_ioremap_resource(&pdev->dev, r);
1889 if (IS_ERR(tspi->base)) {
1891 "Cannot request memregion/iomap dma address\n");
1892 ret = PTR_ERR(tspi->base);
1893 goto exit_free_master;
1896 spi_irq = platform_get_irq(pdev, 0);
1897 tspi->irq = spi_irq;
1899 tspi->clk = devm_clk_get(&pdev->dev, "spi");
1900 if (IS_ERR(tspi->clk)) {
1901 dev_err(&pdev->dev, "can not get clock\n");
1902 ret = PTR_ERR(tspi->clk);
1903 goto exit_free_master;
1906 tspi->pinctrl = devm_pinctrl_get(&pdev->dev);
1907 if (IS_ERR(tspi->pinctrl)) {
1908 dev_info(&pdev->dev, "Pincontrol not found\n");
1909 tspi->pinctrl = NULL;
1912 if (tspi->pinctrl) {
1913 tspi->enable_interface = pinctrl_lookup_state(tspi->pinctrl,
1914 "interface-enable");
1915 if (IS_ERR(tspi->enable_interface)) {
1916 dev_info(&pdev->dev, "Static pin configuration used\n");
1917 tspi->enable_interface = NULL;
1921 tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
1922 tspi->dma_buf_size = (pdata->max_dma_buffer_size) ?
1923 pdata->max_dma_buffer_size :
1924 DEFAULT_SPI_DMA_BUF_LEN;
1925 tspi->spi_max_frequency = pdata->spi_max_frequency;
1928 ret = tegra_spi_init_dma_param(tspi, true);
1930 goto exit_free_master;
1931 ret = tegra_spi_init_dma_param(tspi, false);
1933 goto exit_rx_dma_free;
1934 tspi->max_buf_size = tspi->dma_buf_size;
1935 init_completion(&tspi->tx_dma_complete);
1936 init_completion(&tspi->rx_dma_complete);
1938 init_completion(&tspi->xfer_completion);
1940 if (tspi->clock_always_on) {
1941 ret = clk_prepare_enable(tspi->clk);
1943 dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1944 goto exit_deinit_dma;
1947 if (tspi->runtime_pm) {
1948 pm_runtime_enable(tspi->dev);
1949 if (!pm_runtime_enabled(tspi->dev)) {
1950 ret = tegra_spi_runtime_resume(tspi->dev);
1952 goto exit_pm_disable;
1955 /* set autosuspend delay for the adapter device */
1956 pm_runtime_set_autosuspend_delay(tspi->dev,
1957 SPI_AUTOSUSPEND_DELAY);
1958 pm_runtime_use_autosuspend(tspi->dev);
1961 ret = tegra_spi_runtime_get(tspi);
1963 dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1964 goto exit_pm_disable;
1967 tegra_periph_reset_assert(tspi->clk);
1969 tegra_periph_reset_deassert(tspi->clk);
1971 tspi->def_command1_reg = SPI_M_S | SPI_LSBYTE_FE;
1972 tspi->def_command1_reg |= SPI_CS_SEL(tspi->def_chip_select);
1973 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1974 tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
1975 tegra_spi_runtime_put(tspi);
1977 ret = devm_request_irq(&pdev->dev, tspi->irq, tegra_spi_isr, 0,
1978 dev_name(&pdev->dev), tspi);
1980 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1982 goto exit_free_master;
1985 master->dev.of_node = pdev->dev.of_node;
1986 ret = spi_register_master(master);
1988 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1989 goto exit_pm_disable;
1992 if (tspi->enable_interface) {
1993 ret = pinctrl_select_state(tspi->pinctrl,
1994 tspi->enable_interface);
1996 dev_err(&pdev->dev, "Enable pin interface failed: %d\n",
1998 goto exit_master_unreg;
2005 spi_unregister_master(master);
2008 pm_runtime_disable(&pdev->dev);
2009 if (!pm_runtime_status_suspended(&pdev->dev))
2010 tegra_spi_runtime_suspend(&pdev->dev);
2011 if (tspi->clock_always_on)
2012 clk_disable_unprepare(tspi->clk);
2014 tegra_spi_deinit_dma_param(tspi, false);
2016 tegra_spi_deinit_dma_param(tspi, true);
2018 spi_master_put(master);
2022 static int tegra_spi_remove(struct platform_device *pdev)
2024 struct spi_master *master = dev_get_drvdata(&pdev->dev);
2025 struct tegra_spi_data *tspi = spi_master_get_devdata(master);
2027 spi_unregister_master(master);
2029 if (tspi->tx_dma_chan)
2030 tegra_spi_deinit_dma_param(tspi, false);
2032 if (tspi->rx_dma_chan)
2033 tegra_spi_deinit_dma_param(tspi, true);
2035 pm_runtime_disable(&pdev->dev);
2036 if (!pm_runtime_status_suspended(&pdev->dev))
2037 tegra_spi_runtime_suspend(&pdev->dev);
2039 if (tspi->clock_always_on)
2040 clk_disable_unprepare(tspi->clk);
2042 if (tspi->prod_list)
2043 tegra_prod_release(&tspi->prod_list);
2048 #ifdef CONFIG_PM_SLEEP
2049 static int tegra_spi_suspend(struct device *dev)
2051 struct spi_master *master = dev_get_drvdata(dev);
2052 struct tegra_spi_data *tspi = spi_master_get_devdata(master);
2055 ret = spi_master_suspend(master);
2057 if (tspi->clock_always_on)
2058 clk_disable_unprepare(tspi->clk);
2063 static int tegra_spi_resume(struct device *dev)
2065 struct spi_master *master = dev_get_drvdata(dev);
2066 struct tegra_spi_data *tspi = spi_master_get_devdata(master);
2067 unsigned long intr_mask;
2070 if (tspi->clock_always_on) {
2071 ret = clk_prepare_enable(tspi->clk);
2073 dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
2078 ret = tegra_spi_runtime_get(tspi);
2080 dev_err(dev, "pm runtime failed, e = %d\n", ret);
2083 tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
2084 tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
2085 if (tspi->chip_data->intr_mask_reg) {
2086 if ((tspi->cur_direction & DATA_DIR_TX) ||
2087 (tspi->cur_direction & DATA_DIR_RX)) {
2088 intr_mask = tegra_spi_readl(tspi, SPI_INTR_MASK);
2089 if (!tspi->polling_mode)
2090 intr_mask &= ~(SPI_INTR_ALL_MASK);
2092 intr_mask |= SPI_INTR_ALL_MASK;
2093 tegra_spi_writel(tspi, intr_mask, SPI_INTR_MASK);
2096 tegra_spi_runtime_put(tspi);
2098 return spi_master_resume(master);
2102 static int tegra_spi_runtime_suspend(struct device *dev)
2104 struct spi_master *master = dev_get_drvdata(dev);
2105 struct tegra_spi_data *tspi = spi_master_get_devdata(master);
2107 /* Flush all write which are in PPSB queue by reading back */
2108 tegra_spi_readl(tspi, SPI_COMMAND1);
2110 clk_disable_unprepare(tspi->clk);
2114 static int tegra_spi_runtime_resume(struct device *dev)
2116 struct spi_master *master = dev_get_drvdata(dev);
2117 struct tegra_spi_data *tspi = spi_master_get_devdata(master);
2120 ret = clk_prepare_enable(tspi->clk);
2122 dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
2128 static const struct dev_pm_ops tegra_spi_pm_ops = {
2129 SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
2130 tegra_spi_runtime_resume, NULL)
2131 SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
2133 static struct platform_driver tegra_spi_driver = {
2135 .name = "spi-tegra114",
2136 .owner = THIS_MODULE,
2137 .pm = &tegra_spi_pm_ops,
2138 .of_match_table = of_match_ptr(tegra_spi_of_match),
2140 .probe = tegra_spi_probe,
2141 .remove = tegra_spi_remove,
2143 module_platform_driver(tegra_spi_driver);
2145 MODULE_ALIAS("platform:spi-tegra114");
2146 MODULE_DESCRIPTION("NVIDIA Tegra114/124 SPI Controller Driver");
2147 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
2148 MODULE_LICENSE("GPL v2");