blob: 855f84a404cabf096cbdc421dfb42af2b16d574c [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * ASR QSPI driver
5 *
6 * Copyright (C) 2019 ASR Micro Limited
7 *
8 */
9
10#include <linux/bitops.h>
11#include <linux/clk.h>
12#include <linux/completion.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/iopoll.h>
21#include <linux/jiffies.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/platform_device.h>
28#include <linux/pm_qos.h>
29#include <linux/pm_runtime.h>
30#include <linux/sizes.h>
31#include <linux/genalloc.h>
32#include <linux/cputype.h>
33
34#include <soc/asr/regs-addr.h>
35#include <linux/spi/spi.h>
36#include <linux/spi/spi-mem.h>
37
38
39/* #define ASR_DUMP_QSPI_REG 0 */
40
41#define QSPI_WAIT_TIMEOUT (300) /* ms */
42#define QSPI_AUTOSUSPEND_TIMEOUT 2000
43#define ASR_MPMU_ACGR 0x1024
44
45/* QSPI PMUap register */
46#define PMUA_QSPI_CLK_RES_CTRL 0x01282860
47#define QSPI_CLK_SEL(x) ((x) << 6)
48#define QSPI_CLK_SEL_MASK GENMASK(8, 6)
49#define QSPI_CLK_EN BIT(4)
50#define QSPI_BUS_CLK_EN BIT(3)
51#define QSPI_CLK_RST BIT(1)
52#define QSPI_BUS_RST BIT(0)
53
54/* QSPI memory base */
55#if 0
56#define QSPI_AMBA_BASE 0x300000
57#define QSPI_FLASH_A1_BASE QSPI_AMBA_BASE
58#define QSPI_FLASH_A1_TOP (QSPI_FLASH_A1_BASE + 0xa00000)
59#define QSPI_FLASH_A2_BASE QSPI_FLASH_A1_TOP
60#define QSPI_FLASH_A2_TOP (QSPI_FLASH_A2_BASE + 0x100000)
61#define QSPI_FLASH_B1_BASE QSPI_FLASH_A2_TOP
62#define QSPI_FLASH_B1_TOP (QSPI_FLASH_B1_BASE + 0x100000)
63#define QSPI_FLASH_B2_BASE QSPI_FLASH_B1_TOP
64#define QSPI_FLASH_B2_TOP (QSPI_FLASH_B2_BASE + 0x100000)
65#else
66/* AHB base addr */
67#define QSPI_AMBA_BASE 0x80000000
68#define QSPI_FLASH_A1_BASE 0x80000000
69#define QSPI_FLASH_A1_TOP 0x88000000
70#define QSPI_FLASH_A2_BASE 0x88000000
71#define QSPI_FLASH_A2_TOP 0x90000000
72#define QSPI_FLASH_B1_BASE 0x90000000
73#define QSPI_FLASH_B1_TOP 0x98000000
74#define QSPI_FLASH_B2_BASE 0x98000000
75#define QSPI_FLASH_B2_TOP 0xa0000000
76
77#endif
78
79/* TX/RX/ABH buffer max */
80#define QSPI_RX_BUFF_MAX SZ_128
81#define QSPI_TX_BUFF_MAX SZ_256
82#define QSPI_TX_BUFF_POP_MIN 16
83#define QSPI_AHB_BUFF_MAX_SIZE SZ_512
84#define QSPI_TX_DMA_BURST SZ_32
85
86#define QSPI_WAIT_BIT_CLEAR 0
87#define QSPI_WAIT_BIT_SET 1
88
89/* clk source from PLL1 */
90#define QSPI_CLK_PLL1_51P2 51200000
91#define QSPI_CLK_PLL1_76P8 76800000
92#define QSPI_CLK_PLL1_102P4 102400000
93#define QSPI_CLK_PLL1_153P6 153600000
94#define QSPI_CLK_PLL1_204P8 204800000
95#define QSPI_CLK_PLL1_307P2 307200000
96#define QSPI_CLK_PLL1_409P6 409600000
97#define ASR_QSPI_DEFAULT_CLK_FREQ (QSPI_CLK_PLL1_102P4 >> 2)
98
99/* QSPI Host Registers used by the driver */
100#define QSPI_MCR 0x00
101#define QSPI_MCR_DQS_INV_EN BIT(26)
102#define QSPI_MCR_DQS_LP_EN BIT(25)
103#define QSPI_MCR_ISD_MASK GENMASK(19, 16)
104#define QSPI_MCR_MDIS_MASK BIT(14)
105#define QSPI_MCR_CLR_TXF_MASK BIT(11)
106#define QSPI_MCR_CLR_RXF_MASK BIT(10)
107#define QSPI_MCR_DDR_EN_MASK BIT(7)
108#define QSPI_MCR_DQS_EN BIT(6)
109#define QSPI_MCR_END_CFG_MASK GENMASK(3, 2)
110#define QSPI_MCR_SWRSTHD_MASK BIT(1)
111#define QSPI_MCR_SWRSTSD_MASK BIT(0)
112
113#define QSPI_TCR 0x04
114#define QSPI_IPCR 0x08
115#define QSPI_IPCR_SEQID(x) ((x) << 24)
116
117#define QSPI_FLSHCR 0x0c
118
119#define QSPI_BUF0CR 0x10
120#define QSPI_BUF1CR 0x14
121#define QSPI_BUF2CR 0x18
122#define QSPI_BUF3CR 0x1c
123#define QSPI_BUF3CR_ALLMST_MASK BIT(31)
124#define QSPI_BUF3CR_ADATSZ(x) ((x) << 8)
125#define QSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
126
127#define QSPI_BFGENCR 0x20
128#define QSPI_BFGENCR_SEQID(x) ((x) << 12)
129
130#define QSPI_SOCCR 0x24
131#define QSPI_SOCCR_DLINE_EN BIT(8)
132
133#define QSPI_DLACR_DLINE_CODE_SHIFT 0
134#define QSPI_DLACR_DLINE_CODE_MASK GENMASK(7, 0)
135#define QSPI_DLACR_DLINE_STEP_SHIFT 8
136#define QSPI_DLACR_DLINE_STEP_MASK GENMASK(15, 8)
137
138#define QSPI_BUF0IND 0x30
139#define QSPI_BUF1IND 0x34
140#define QSPI_BUF2IND 0x38
141#define QSPI_DLACR 0x3C
142
143#define QSPI_SFAR 0x100
144#define QSPI_SFACR 0x104
145
146#define QSPI_SMPR 0x108
147#define QSPI_SMPR_DDRSMP_SHIFT 16
148#define QSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
149#define QSPI_SMPR_FSDLY_MASK BIT(6)
150#define QSPI_SMPR_FSPHS_MASK BIT(5)
151#define QSPI_SMPR_HSENA_MASK BIT(0)
152
153#define QSPI_RBSR 0x10c
154
155#define QSPI_RBCT 0x110
156#define QSPI_RBCT_WMRK_MASK GENMASK(4, 0)
157#define QSPI_RBCT_RXBRD_MASK BIT(8)
158
159#define QSPI_TBSR 0x150
160#define QSPI_TBDR 0x154
161#define QSPI_TBCT 0x158
162#define QSPI_TX_WMRK (QSPI_TX_DMA_BURST / 4 - 1)
163
164#define QSPI_SR 0x15c
165#define QSPI_SR_BUSY BIT(0)
166#define QSPI_SR_IP_ACC_MASK BIT(1)
167#define QSPI_SR_AHB_ACC_MASK BIT(2)
168#define QSPI_SR_TXFULL BIT(27)
169
170#define QSPI_FR 0x160
171#define QSPI_FR_TFF_MASK BIT(0)
172#define QSPI_FR_IPGEF BIT(4)
173#define QSPI_FR_IPIEF BIT(6)
174#define QSPI_FR_IPAEF BIT(7)
175#define QSPI_FR_IUEF BIT(11)
176#define QSPI_FR_ABOF BIT(12)
177#define QSPI_FR_AIBSEF BIT(13)
178#define QSPI_FR_AITEF BIT(14)
179#define QSPI_FR_ABSEF BIT(15)
180#define QSPI_FR_RBDF BIT(16)
181#define QSPI_FR_RBOF BIT(17)
182#define QSPI_FR_ILLINE BIT(23)
183#define QSPI_FR_TBUF BIT(26)
184#define QSPI_FR_TBFF BIT(27)
185#define BUFFER_FR_FLAG (QSPI_FR_ABOF| QSPI_FR_RBOF| \
186 QSPI_FR_TBUF)
187
188#define COMMAND_FR_FLAG (QSPI_FR_ABSEF | QSPI_FR_AITEF | \
189 QSPI_FR_AIBSEF | QSPI_FR_IUEF | \
190 QSPI_FR_IPAEF |QSPI_FR_IPIEF | \
191 QSPI_FR_IPGEF)
192
193#define QSPI_RSER 0x164
194#define QSPI_RSER_TFIE BIT(0)
195#define QSPI_RSER_IPGEIE BIT(4)
196#define QSPI_RSER_IPIEIE BIT(6)
197#define QSPI_RSER_IPAEIE BIT(7)
198#define QSPI_RSER_IUEIE BIT(11)
199#define QSPI_RSER_ABOIE BIT(12)
200#define QSPI_RSER_AIBSIE BIT(13)
201#define QSPI_RSER_AITIE BIT(14)
202#define QSPI_RSER_ABSEIE BIT(15)
203#define QSPI_RSER_RBDIE BIT(16)
204#define QSPI_RSER_RBOIE BIT(17)
205#define QSPI_RSER_RBDDE BIT(21)
206#define QSPI_RSER_ILLINIE BIT(23)
207#define QSPI_RSER_TBFDE BIT(25)
208#define QSPI_RSER_TBUIE BIT(26)
209#define QSPI_RSER_TBFIE BIT(27)
210#define BUFFER_ERROR_INT (QSPI_RSER_ABOIE| QSPI_RSER_RBOIE| \
211 QSPI_RSER_TBUIE)
212
213#define COMMAND_ERROR_INT (QSPI_RSER_ABSEIE | QSPI_RSER_AITIE | \
214 QSPI_RSER_AIBSIE | QSPI_RSER_IUEIE | \
215 QSPI_RSER_IPAEIE |QSPI_RSER_IPIEIE | \
216 QSPI_RSER_IPGEIE)
217
218#define QSPI_SPNDST 0x168
219#define QSPI_SPTRCLR 0x16c
220#define QSPI_SPTRCLR_IPPTRC BIT(8)
221#define QSPI_SPTRCLR_BFPTRC BIT(0)
222
223#define QSPI_SFA1AD 0x180
224#define QSPI_SFA2AD 0x184
225#define QSPI_SFB1AD 0x188
226#define QSPI_SFB2AD 0x18c
227#define QSPI_DLPR 0x190
228#define QSPI_RBDR(x) (0x200 + ((x) * 4))
229
230#define QSPI_LUTKEY 0x300
231#define QSPI_LUTKEY_VALUE 0x5af05af0
232
233#define QSPI_LCKCR 0x304
234#define QSPI_LCKER_LOCK BIT(0)
235#define QSPI_LCKER_UNLOCK BIT(1)
236
237#define QSPI_LUT_BASE 0x310
238/* 16Bytes per sequence */
239#define QSPI_LUT_REG(seqid, i) (QSPI_LUT_BASE + (seqid) * 16 + (i) * 4)
240
241/*
242 * QSPI Sequence index.
243 * index 0 is preset at boot for AHB read,
244 * index 1 is used for other command.
245 */
246#define SEQID_LUT_AHBREAD_ID 0
247#define SEQID_LUT_SHARED_ID 1
248
249/* QSPI Instruction set for the LUT register */
250#define LUT_INSTR_STOP 0
251#define LUT_INSTR_CMD 1
252#define LUT_INSTR_ADDR 2
253#define LUT_INSTR_DUMMY 3
254#define LUT_INSTR_MODE 4
255#define LUT_INSTR_MODE2 5
256#define LUT_INSTR_MODE4 6
257#define LUT_INSTR_READ 7
258#define LUT_INSTR_WRITE 8
259#define LUT_INSTR_JMP_ON_CS 9
260#define LUT_INSTR_ADDR_DDR 10
261#define LUT_INSTR_MODE_DDR 11
262#define LUT_INSTR_MODE2_DDR 12
263#define LUT_INSTR_MODE4_DDR 13
264#define LUT_INSTR_READ_DDR 14
265#define LUT_INSTR_WRITE_DDR 15
266#define LUT_INSTR_DATA_LEARN 16
267#define LUT_INSTR_CMD_DDR 17
268
269/*
270 * The PAD definitions for LUT register.
271 *
272 * The pad stands for the number of IO lines [0:3].
273 * For example, the quad read needs four IO lines,
274 * so you should use LUT_PAD(4).
275 */
276#define LUT_PAD(x) (fls(x) - 1)
277
278/*
279 * One sequence must be consisted of 4 LUT enteries(16Bytes).
280 * LUT entries with the following register layout:
281 * b'31 b'0
282 * ---------------------------------------------------------------------------
283 * |INSTR1[15~10]|PAD1[9~8]|OPRND1[7~0] | INSTR0[15~10]|PAD0[9~8]|OPRND0[7~0]|
284 * ---------------------------------------------------------------------------
285 */
286#define LUT_DEF(idx, ins, pad, opr) \
287 ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) & 0x1) * 16))
288
289#define READ_FROM_CACHE_OP 0x03
290#define READ_FROM_CACHE_OP_Fast 0x0b
291#define READ_FROM_CACHE_OP_X2 0x3b
292#define READ_FROM_CACHE_OP_X4 0x6b
293#define READ_FROM_CACHE_OP_DUALIO 0xbb
294#define READ_FROM_CACHE_OP_QUADIO 0xeb
295
296u32 reg_offset_table[] = {
297 QSPI_MCR, QSPI_TCR, QSPI_IPCR, QSPI_FLSHCR,
298 QSPI_BUF0CR, QSPI_BUF1CR, QSPI_BUF2CR, QSPI_BUF3CR,
299 QSPI_BFGENCR, QSPI_SOCCR, QSPI_BUF0IND, QSPI_BUF1IND,
300 QSPI_BUF2IND, QSPI_SFAR, QSPI_SFACR, QSPI_SMPR,
301 QSPI_RBSR, QSPI_RBCT, QSPI_TBSR, QSPI_TBDR,
302 QSPI_TBCT, QSPI_SR, QSPI_FR, QSPI_RSER,
303 QSPI_SPNDST, QSPI_SPTRCLR, QSPI_SFA1AD, QSPI_SFA2AD,
304 QSPI_SFB1AD, QSPI_SFB2AD, QSPI_DLPR, QSPI_LUTKEY,
305 QSPI_LCKCR
306};
307
308#define QSPI_MAX_SEQ_NUM 16
309
310/* asr qspi host priv */
311struct asr_qspi {
312 struct device *dev;
313 struct spi_controller *ctrl;
314 void __iomem *io_map;
315 phys_addr_t io_phys;
316
317 void __iomem *ahb_map;
318 phys_addr_t memmap_base;
319 u32 memmap_size;
320 struct spi_mem_op *ahb_op;
321
322 struct {
323 struct gen_pool *pool;
324 void __iomem *virt;
325 dma_addr_t dma;
326 } sram;
327
328 u32 sfa1ad;
329 u32 sfa2ad;
330 u32 sfb1ad;
331 u32 sfb2ad;
332
333 u32 pmuap_reg;
334 void __iomem *pmuap_addr;
335
336 u32 rx_buf_size;
337 u32 tx_buf_size;
338 u32 ahb_buf_size;
339 u32 ahb_read_enable;
340 u32 tx_unit_size;
341 u32 rx_unit_size;
342
343 u32 has_dtr;
344 u32 support_dqs;
345 u32 dtr_tx_delay;
346 u32 dtr_rx_delay;
347 u32 cmd_interrupt;
348 u32 fr_error_flag;
349
350 u32 tx_dma_enable;
351 u32 tx_wmrk;
352 struct dma_chan *tx_dma;
353 struct dma_slave_config tx_dma_cfg;
354
355 u32 rx_dma_enable;
356 struct dma_chan *rx_dma;
357
358 struct sg_table sgt;
359 struct completion dma_completion;
360
361 u32 cs_selected;
362 u32 max_hz;
363 u32 endian_xchg;
364 u32 dma_enable;
365
366 struct clk *clk, *bus_clk;
367 struct completion cmd_completion;
368 struct mutex lock;
369 struct pm_qos_request pm_qos_req;
370 struct pm_qos_request pm_ddr_qos;
371 u32 lpm_qos;
372 bool rst_protect;
373
374 /* seq id 0 and 1 is reserved */
375 u8 seq_opcode[QSPI_MAX_SEQ_NUM];
376};
377
378enum qpsi_cs {
379 QSPI_CS_A1 = 0,
380 QSPI_CS_A2,
381 QSPI_CS_B1,
382 QSPI_CS_B2,
383 QSPI_CS_MAX,
384};
385#define QSPI_DEFAULT_CS (QSPI_CS_A1)
386
387
388enum qpsi_mode {
389 QSPI_NORMAL_MODE = 0,
390 QSPI_DISABLE_MODE,
391 QSPI_STOP_MODE,
392};
393
394
395static void qspi_writel(struct asr_qspi *qspi, u32 val, void __iomem *addr)
396{
397 if (qspi->endian_xchg)
398 iowrite32be(val, addr);
399 else
400 iowrite32(val, addr);
401}
402
403static u32 qspi_readl(struct asr_qspi *qspi, void __iomem *addr)
404{
405 if (qspi->endian_xchg)
406 return ioread32be(addr);
407 else
408 return ioread32(addr);
409}
410
411static void qspi_enter_mode(struct asr_qspi *qspi, uint32_t mode)
412{
413 uint32_t mcr;
414
415 mcr = qspi_readl(qspi, qspi->io_map + QSPI_MCR);
416 if (mode == QSPI_NORMAL_MODE)
417 mcr &= ~QSPI_MCR_MDIS_MASK;
418 else if (mode == QSPI_DISABLE_MODE)
419 mcr |= QSPI_MCR_MDIS_MASK;
420 qspi_writel(qspi, mcr, qspi->io_map + QSPI_MCR);
421}
422
423static int asr_qspi_set_default_timing(struct asr_qspi *qspi, int clk_hz)
424{
425 void __iomem *base = qspi->io_map;
426 u32 reg;
427
428 /* clock settings */
429 qspi_enter_mode(qspi, QSPI_DISABLE_MODE);
430
431 /* disable DQS */
432 reg = qspi_readl(qspi, base + QSPI_MCR);
433 reg &= ~(QSPI_MCR_DQS_EN | QSPI_MCR_DQS_LP_EN | QSPI_MCR_DQS_INV_EN);
434 qspi_writel(qspi, reg, base + QSPI_MCR);
435
436 reg = 0;
437 qspi_writel(qspi, reg, base + QSPI_SMPR);
438
439 /* set tx hold time */
440 reg = 0x202;
441 if (qspi->has_dtr)
442 reg |= qspi->dtr_tx_delay << 16;
443 qspi_writel(qspi, reg, base + QSPI_FLSHCR);
444
445 /* Module enabled */
446 qspi_enter_mode(qspi, QSPI_NORMAL_MODE);
447
448 return 0;
449}
450
451static int qspi_set_func_clk(struct asr_qspi *qspi, int max_hz)
452{
453 int ret = 0;
454
455 if (qspi->has_dtr) {
456 qspi->clk = devm_clk_get(qspi->dev, "qspi_clk_dtr");
457 if (IS_ERR_OR_NULL(qspi->clk)) {
458 dev_err(qspi->dev, "can not find the clock\n");
459 return -EINVAL;
460 }
461 } else {
462 qspi->clk = devm_clk_get(qspi->dev, "qspi_clk");
463 if (IS_ERR_OR_NULL(qspi->clk)) {
464 dev_err(qspi->dev, "can not find the clock\n");
465 return -EINVAL;
466 }
467 }
468
469 qspi->bus_clk = devm_clk_get(qspi->dev, "qspi_bus_clk");
470 if (IS_ERR_OR_NULL(qspi->bus_clk)) {
471 dev_err(qspi->dev, "can not find the bus clock\n");
472 return -EINVAL;
473 }
474 clk_prepare_enable(qspi->bus_clk);
475
476 ret = clk_set_rate(qspi->clk, max_hz);
477 if (ret) {
478 dev_err(qspi->dev, "fail to set clk, ret:%d\n", ret);
479 return ret;
480 }
481
482 ret = clk_prepare_enable(qspi->clk);
483 if (ret) {
484 dev_err(qspi->dev, "fail to enable clk, ret:%d\n", ret);
485 return ret;
486 }
487
488 asr_qspi_set_default_timing(qspi, max_hz);
489
490 dev_notice(qspi->dev, "bus clock %dHz, PMUap reg[0x%08x]:0x%08x\n",
491 max_hz, qspi->pmuap_reg, qspi_readl(qspi, qspi->pmuap_addr));
492
493 return 0;
494}
495
496static void qspi_config_mfp(struct asr_qspi *qspi)
497{
498 int cs = qspi->cs_selected;
499
500 /* TODO: only for FPGA */
501#if 0
502 if (cs == QSPI_CS_A1 || cs == QSPI_CS_A2) {
503 writel(0x1002, 0x0101e2c4); // QSPI_DAT3
504 writel(0x1002, 0x0101e2c8); // QSPI_DAT2
505 writel(0x1002, 0x0101e2cc); // QSPI_DAT1
506 writel(0x1002, 0x0101e2d0); // QSPI_DAT0
507 writel(0x1002, 0x0101e2d4); // QSPI_CLK
508 writel(0xd002, 0x0101e2d8); // QSPI_CS1
509 writel(0xd002, 0x0101e2dc); // QSPI_CS2
510 }
511#endif
512 dev_info(qspi->dev, "config mfp for cs:[%d]\n", cs);
513}
514
515static int asr_qspi_readl_poll_tout(struct asr_qspi *qspi, void __iomem *base,
516 u32 mask, u32 timeout_us, u8 wait_set)
517{
518 u32 reg;
519
520 if (qspi->endian_xchg)
521 mask = swab32(mask);
522
523 if (wait_set)
524 return readl_poll_timeout_atomic(base, reg, (reg & mask),
525 10, timeout_us);
526 else
527 return readl_poll_timeout_atomic(base, reg, !(reg & mask),
528 10, timeout_us);
529}
530
531static void qspi_reset(struct asr_qspi *qspi)
532{
533 uint32_t reg;
534 int err;
535
536 /* QSPI_SR[QSPI_SR_BUSY] must be 0 */
537 err = asr_qspi_readl_poll_tout(qspi, qspi->io_map + QSPI_SR,
538 QSPI_SR_BUSY, QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR);
539 if (err) {
540 dev_err(qspi->dev, "failed to reset qspi host.\n");
541 } else {
542 /* qspi softreset first */
543 reg = qspi_readl(qspi, qspi->io_map + QSPI_MCR);
544 reg |= QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK;
545 qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR);
546 reg = qspi_readl(qspi, qspi->io_map + QSPI_MCR);
547 if ((reg & 0x3) != 0x3)
548 dev_info(qspi->dev, "reset ignored 0x%x.\n", reg);
549
550 udelay(1);
551 reg &= ~(QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK);
552 qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR);
553 }
554}
555
556static void qspi_write_sfar(struct asr_qspi *qspi, uint32_t val)
557{
558 int err;
559
560 /* QSPI_SR[IP_ACC] must be 0 */
561 err = asr_qspi_readl_poll_tout(qspi, qspi->io_map + QSPI_SR,
562 QSPI_SR_IP_ACC_MASK, QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR);
563 if (err)
564 dev_err(qspi->dev, "failed to set QSPI_SFAR.\n");
565 else
566 qspi_writel(qspi, val, qspi->io_map + QSPI_SFAR);
567}
568
569/*
570 * IP Command Trigger could not be executed Error Flag may happen for write
571 * access to RBCT/SFAR register, need retry for these two register
572 */
573static void qspi_write_rbct(struct asr_qspi *qspi, uint32_t val)
574{
575 int err;
576
577 /* QSPI_SR[IP_ACC] must be 0 */
578 err = asr_qspi_readl_poll_tout(qspi, qspi->io_map + QSPI_SR,
579 QSPI_SR_IP_ACC_MASK, QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR);
580 if (err)
581 dev_err(qspi->dev, "failed to set QSPI_RBCT.\n");
582 else
583 qspi_writel(qspi, val, qspi->io_map + QSPI_RBCT);
584}
585
586void qspi_init_ahbread(struct asr_qspi *qspi, int seq_id)
587{
588 u32 buf_cfg = 0;
589
590 buf_cfg = QSPI_BUF3CR_ALLMST_MASK |
591 QSPI_BUF3CR_ADATSZ((qspi->ahb_buf_size / 8));
592
593#ifdef CONFIG_CPU_ASR1903
594 /* Disable BUF1~BUF2, use BUF0 for all masters */
595 qspi_writel(qspi, (512/8 - 1) * 8, qspi->io_map + QSPI_BUF0IND);
596 qspi_writel(qspi, 512, qspi->io_map + QSPI_BUF1IND);
597 qspi_writel(qspi, 512, qspi->io_map + QSPI_BUF2IND);
598
599 /* AHB Master port */
600 qspi_writel(qspi, buf_cfg, qspi->io_map + QSPI_BUF0CR); // other masters
601 qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF1CR);
602 qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF2CR);
603 qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF3CR);
604#else
605 /* Disable BUF0~BUF1, use BUF3 for all masters */
606 qspi_writel(qspi, 0, qspi->io_map + QSPI_BUF0IND);
607 qspi_writel(qspi, 0, qspi->io_map + QSPI_BUF1IND);
608 qspi_writel(qspi, 0, qspi->io_map + QSPI_BUF2IND);
609
610 /* AHB Master port */
611 qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF0CR);
612 qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF1CR);
613 qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF2CR);
614 qspi_writel(qspi, buf_cfg, qspi->io_map + QSPI_BUF3CR); // other masters
615#endif
616 /* set AHB read sequence id */
617 qspi_writel(qspi, QSPI_BFGENCR_SEQID(seq_id), qspi->io_map + QSPI_BFGENCR);
618}
619
620void qspi_dump_reg(struct asr_qspi *qspi)
621{
622 u32 reg = 0;
623 void __iomem *base = qspi->io_map;
624 int i;
625
626 dev_notice(qspi->dev, "dump qspi host register:\n");
627 for (i = 0; i < ARRAY_SIZE(reg_offset_table); i++) {
628 if (i > 0 && (i % 4 == 0))
629 dev_notice(qspi->dev, "\n");
630 reg = qspi_readl(qspi, base + reg_offset_table[i]);
631 dev_notice(qspi->dev, "offset[0x%03x]:0x%08x\t\t",
632 reg_offset_table[i], reg);
633 }
634
635 dev_notice(qspi->dev, "\ndump AHB read LUT:\n");
636 for (i = 0; i < 4; i++) {
637 reg = qspi_readl(qspi, base + QSPI_LUT_REG(SEQID_LUT_AHBREAD_ID, i));
638 dev_notice(qspi->dev, "lut_reg[0x%03x]:0x%08x\t\t",
639 QSPI_LUT_REG(SEQID_LUT_AHBREAD_ID, i), reg);
640 }
641
642 dev_notice(qspi->dev, "\ndump shared LUT:\n");
643 for (i = 0; i < 4; i++) {
644 reg = qspi_readl(qspi, base + QSPI_LUT_REG(SEQID_LUT_SHARED_ID, i));
645 dev_notice(qspi->dev, "lut_reg[0x%03x]:0x%08x\t\t",
646 QSPI_LUT_REG(SEQID_LUT_SHARED_ID, i), reg);
647 }
648 dev_notice(qspi->dev, "\n");
649}
650
651/*
652 * If the slave device content being changed by Write/Erase, need to
653 * invalidate the AHB buffer. This can be achieved by doing the reset
654 * of controller after setting MCR0[SWRESET] bit.
655 */
656static inline void asr_qspi_invalid(struct asr_qspi *qspi)
657{
658 u32 reg;
659
660 reg = qspi_readl(qspi, qspi->io_map + QSPI_MCR);
661 reg |= QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK;
662 qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR);
663
664 /*
665 * The minimum delay : 1 AHB + 2 SFCK clocks.
666 * Delay 1 us is enough.
667 */
668 udelay(1);
669
670 reg &= ~(QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK);
671 qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR);
672}
673
674static u8 asr_qspi_prepare_lut(struct asr_qspi *qspi,
675 const struct spi_mem_op *op, u32 seq_id)
676{
677 u32 lutval[4] = {0,};
678 int lutidx = 0;
679 int i;
680 u8 opcode;
681
682 if (seq_id != SEQID_LUT_AHBREAD_ID) {
683 for (i = 2; i < QSPI_MAX_SEQ_NUM; i++) {
684 opcode = qspi->seq_opcode[i];
685 if (!opcode) {
686 seq_id = i;
687 break;
688 } else if (opcode == op->cmd.opcode) {
689 return i;
690 }
691 }
692 }
693
694 /* qspi cmd */
695 lutval[0] |= LUT_DEF(lutidx,
696 (op->cmd.dtr ? LUT_INSTR_CMD_DDR : LUT_INSTR_CMD),
697 LUT_PAD(op->cmd.buswidth),
698 op->cmd.opcode);
699 lutidx++;
700
701 /* addr bytes */
702 if (op->addr.nbytes) {
703 lutval[lutidx / 2] |=
704 LUT_DEF(lutidx,
705 (op->addr.dtr ? LUT_INSTR_ADDR_DDR : LUT_INSTR_ADDR),
706 LUT_PAD(op->addr.buswidth),
707 op->addr.nbytes * 8);
708 lutidx++;
709 }
710
711 /* dummy bytes, if needed */
712 if (op->dummy.nbytes) {
713 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_INSTR_DUMMY,
714 LUT_PAD(op->dummy.buswidth),
715 op->dummy.nbytes * 8 /
716 op->dummy.buswidth /
717 (op->dummy.dtr ? 2 : 1));
718 lutidx++;
719 }
720
721 /* read/write data bytes */
722 if (op->data.buswidth) {
723 u8 inst;
724
725 if ( op->data.dir == SPI_MEM_DATA_IN) {
726 if (op->data.dtr)
727 inst = LUT_INSTR_READ_DDR;
728 else
729 inst = LUT_INSTR_READ;
730 } else {
731 if (op->data.dtr)
732 inst = LUT_INSTR_WRITE_DDR;
733 else
734 inst = LUT_INSTR_WRITE;
735 }
736 lutval[lutidx / 2] |= LUT_DEF(lutidx, inst,
737 LUT_PAD(op->data.buswidth),
738 0);
739 lutidx++;
740 }
741
742 /* stop condition. */
743 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_INSTR_STOP, 0, 0);
744
745 /* unlock LUT */
746 qspi_writel(qspi, QSPI_LUTKEY_VALUE, qspi->io_map + QSPI_LUTKEY);
747 qspi_writel(qspi, QSPI_LCKER_UNLOCK, qspi->io_map + QSPI_LCKCR);
748
749 /* fill LUT register */
750 for (i = 0; i < ARRAY_SIZE(lutval); i++)
751 qspi_writel(qspi, lutval[i], qspi->io_map + QSPI_LUT_REG(seq_id, i));
752
753 /* lock LUT */
754 qspi_writel(qspi, QSPI_LUTKEY_VALUE, qspi->io_map + QSPI_LUTKEY);
755 qspi_writel(qspi, QSPI_LCKER_LOCK, qspi->io_map + QSPI_LCKCR);
756
757 dev_dbg(qspi->dev, "opcode:0x%x, lut_reg[0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x]\n",
758 op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
759
760 qspi->seq_opcode[seq_id] = op->cmd.opcode;
761 return seq_id;
762}
763
764static void asr_qspi_enable_interrupt(struct asr_qspi *qspi, u32 val)
765{
766 u32 resr = 0;
767
768 resr = qspi_readl(qspi, qspi->io_map + QSPI_RSER);
769 resr |= val;
770 qspi_writel(qspi, resr, qspi->io_map + QSPI_RSER);
771}
772
773static void asr_qspi_disable_interrupt(struct asr_qspi *qspi, u32 val)
774{
775 u32 resr = 0;
776
777 resr = qspi_readl(qspi, qspi->io_map + QSPI_RSER);
778 resr &= ~val;
779 qspi_writel(qspi, resr, qspi->io_map + QSPI_RSER);
780}
781
782static void asr_qspi_prepare_dma(struct asr_qspi *qspi)
783{
784 struct dma_slave_config dma_cfg;
785 struct device *dev = qspi->dev;
786 dma_cap_mask_t mask;
787
788 /* RX DMA: DMA_MEMCPY type */
789 dma_cap_zero(mask);
790 dma_cap_set(DMA_MEMCPY, mask);
791
792 if (qspi->rx_dma_enable) {
793 qspi->rx_dma = dma_request_chan_by_mask(&mask);
794 if (IS_ERR_OR_NULL(qspi->rx_dma)) {
795 dev_err(dev, "rx dma request channel failed\n");
796 qspi->rx_dma = NULL;
797 qspi->rx_dma_enable = 0;
798 } else {
799 dev_notice(dev, "rx dma enable, channel:%d\n",
800 qspi->rx_dma->chan_id);
801 }
802 } else {
803 dev_notice(dev, "rx dma not enable\n");
804 }
805
806 /* TX DMA: DMA_SLAVE type */
807 if (qspi->tx_dma_enable) {
808 qspi->tx_dma = dma_request_slave_channel(dev, "tx-dma");
809 if (qspi->tx_dma) {
810 memset(&dma_cfg, 0, sizeof(struct dma_slave_config));
811 dma_cfg.direction = DMA_MEM_TO_DEV;
812 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
813 dma_cfg.dst_addr = qspi->io_phys + QSPI_TBDR - 4;
814 dma_cfg.dst_maxburst = QSPI_TX_DMA_BURST;
815 if (dmaengine_slave_config(qspi->tx_dma, &dma_cfg)) {
816 dev_err(dev, "tx dma slave config failed\n");
817 dma_release_channel(qspi->tx_dma);
818 qspi->tx_dma = NULL;
819 qspi->tx_dma_enable = 0;
820 } else {
821 dev_notice(dev, "tx dma enable, channel:%d\n",
822 qspi->tx_dma->chan_id);
823 }
824 }
825 } else {
826 dev_notice(dev, "tx dma not enable\n");
827 }
828
829 if (qspi->tx_dma || qspi->rx_dma)
830 init_completion(&qspi->dma_completion);
831}
832
833static void asr_qspi_dma_callback(void *arg)
834{
835 struct completion *dma_completion = arg;
836
837 complete(dma_completion);
838}
839
840int asr_qspi_tx_dma_exec(struct asr_qspi *qspi,
841 const struct spi_mem_op *op)
842{
843 struct dma_async_tx_descriptor *desc;
844 enum dma_transfer_direction dma_dir = DMA_MEM_TO_DEV;
845 dma_cookie_t cookie;
846 int err = 0;
847
848 if (qspi->sram.virt) {
849 /* use buffer from sram to avoid tx underrun error */
850 memcpy(qspi->sram.virt, op->data.buf.in, op->data.nbytes);
851
852 desc = dmaengine_prep_slave_single(qspi->tx_dma,
853 qspi->sram.dma, op->data.nbytes, dma_dir,
854 DMA_PREP_INTERRUPT);
855 if (!desc) {
856 dev_err(qspi->dev, "tx dma prep error\n");
857 return -ENOMEM;
858 }
859 } else {
860 if (!virt_addr_valid(op->data.buf.in) ||
861 spi_controller_dma_map_mem_op_data(qspi->ctrl,
862 op, &qspi->sgt)) {
863 dev_err(qspi->dev, "tx dma map error\n");
864 return -EIO;
865 }
866
867 desc = dmaengine_prep_slave_sg(
868 qspi->tx_dma, qspi->sgt.sgl, qspi->sgt.nents,
869 dma_dir, DMA_PREP_INTERRUPT);
870 if (!desc) {
871 dev_err(qspi->dev, "tx dma prep error\n");
872 err = -ENOMEM;
873 goto out;
874 }
875 }
876
877 reinit_completion(&qspi->dma_completion);
878 desc->callback = asr_qspi_dma_callback;
879 desc->callback_param = &qspi->dma_completion;
880
881 cookie = dmaengine_submit(desc);
882 err = dma_submit_error(cookie);
883 if (err) {
884 dev_err(qspi->dev, "tx dma dmaengine_submit error\n");
885 goto out;
886 }
887
888 dma_async_issue_pending(qspi->tx_dma);
889
890 return 0;
891
892out:
893 if (!qspi->sram.virt)
894 spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &qspi->sgt);
895 return err;
896}
897
898int asr_qspi_rx_dma_exec(struct asr_qspi *qspi, dma_addr_t dma_dst,
899 dma_addr_t dma_src, size_t len)
900{
901 dma_cookie_t cookie;
902 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
903 struct dma_async_tx_descriptor *desc;
904 int ret;
905
906 desc = dmaengine_prep_dma_memcpy(qspi->rx_dma, dma_dst, dma_src, len, flags);
907 if (!desc) {
908 dev_err(qspi->dev, "dmaengine_prep_dma_memcpy error\n");
909 return -EIO;
910 }
911
912 reinit_completion(&qspi->dma_completion);
913 desc->callback = asr_qspi_dma_callback;
914 desc->callback_param = &qspi->dma_completion;
915 cookie = dmaengine_submit(desc);
916 ret = dma_submit_error(cookie);
917 if (ret) {
918 dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
919 return -EIO;
920 }
921
922 dma_async_issue_pending(qspi->rx_dma);
923 ret = wait_for_completion_timeout(&qspi->dma_completion,
924 msecs_to_jiffies(len));
925 if (ret <= 0) {
926 dmaengine_terminate_sync(qspi->rx_dma);
927 dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
928 return -ETIMEDOUT;
929 }
930
931 return 0;
932}
933
934static int asr_qspi_rx_dma_sg(struct asr_qspi *qspi, struct sg_table rx_sg,
935 loff_t from)
936{
937 struct scatterlist *sg;
938 dma_addr_t dma_src = qspi->memmap_base + from;
939 dma_addr_t dma_dst;
940 int i, len, ret;
941
942 for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
943 dma_dst = sg_dma_address(sg);
944 len = sg_dma_len(sg);
945 dev_dbg(qspi->dev, "rx dma, dst:0x%08x, src:0x%08x, len:%d\n",
946 dma_dst, dma_src, len);
947 ret = asr_qspi_rx_dma_exec(qspi, dma_dst, dma_src, len);
948 if (ret)
949 return ret;
950 dma_src += len;
951 }
952
953 return 0;
954}
955
956static int asr_qspi_ahb_read(struct asr_qspi *qspi,
957 const struct spi_mem_op *op)
958{
959 int ret = 0;
960 u32 len = op->data.nbytes;
961 u32 from = op->addr.val;
962 struct sg_table sgt;
963
964 /* Read out the data directly from the AHB buffer. */
965 dev_dbg(qspi->dev, "ahb read %d bytes from address:0x%llx\n",
966 len, (qspi->memmap_base + op->addr.val));
967 if (from + len > qspi->memmap_size)
968 return -ENOTSUPP;
969
970 /* firstly try the DMA */
971 if (qspi->rx_dma_enable) {
972 if (virt_addr_valid(op->data.buf.in) &&
973 !spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt)) {
974 ret = asr_qspi_rx_dma_sg(qspi, sgt, from);
975 spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
976 } else {
977 ret = -EIO;
978 dev_err(qspi->dev, "spi_controller_dma_map_mem_op_data error\n");
979 }
980
981 /* DMA completed */
982 if (!ret)
983 return 0;
984 }
985
986 if (qspi->rx_dma_enable && ret) {
987 dev_notice(qspi->dev, "rx dma read fallback to memcpy read.\n");
988 }
989
990restart:
991 qspi->rst_protect = false;
992 if (!qspi->rx_dma_enable || (qspi->rx_dma_enable && ret)) {
993 memcpy_fromio(op->data.buf.in, (qspi->ahb_map + op->addr.val), len);
994 }
995
996 if (qspi->rst_protect) {
997 dev_info_ratelimited(qspi->dev, "retry read for reset protect\n");
998 goto restart;
999 }
1000
1001 return 0;
1002}
1003
1004static int asr_qspi_fill_txfifo(struct asr_qspi *qspi,
1005 const struct spi_mem_op *op)
1006{
1007 void __iomem *base = qspi->io_map;
1008 int i;
1009 u32 val;
1010 u32 tbsr;
1011 u32 wait_cnt;
1012
1013 if (!qspi->tx_dma_enable || op->data.nbytes < QSPI_TX_BUFF_POP_MIN*2 ||
1014 (op->data.nbytes % QSPI_TX_DMA_BURST)) {
1015 int tbdr_cnt = 0;
1016
1017 qspi->tx_wmrk = 0;
1018 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
1019 memcpy(&val, op->data.buf.out + i, 4);
1020 qspi_writel(qspi, val, base + QSPI_TBDR);
1021 tbdr_cnt += 4;
1022 }
1023
1024 if (i < op->data.nbytes) {
1025 memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
1026 qspi_writel(qspi, val, base + QSPI_TBDR);
1027 tbdr_cnt += 4;
1028 }
1029
1030 /*
1031 * There must be at least 128bit data available in TX FIFO
1032 * for any pop operation otherwise QSPI_FR[TBUF] will be set
1033 */
1034 tbdr_cnt = tbdr_cnt % QSPI_TX_BUFF_POP_MIN;
1035 for (i = tbdr_cnt; i < QSPI_TX_BUFF_POP_MIN; i += 4)
1036 qspi_writel(qspi, 0, base + QSPI_TBDR);
1037 } else {
1038 /*
1039 * Note that the number of bytes per DMA loop is determined
1040 * by thee size of the QSPI_TBCT[WMRK].
1041 * bytes per DMA loop = (QSPI_TBCT[WMRK] + 1) * 4.
1042 * set QSPI_TX_WMRK as the TX watermark.
1043 */
1044 qspi->tx_wmrk = QSPI_TX_WMRK;
1045 qspi_writel(qspi, qspi->tx_wmrk, base + QSPI_TBCT);
1046
1047 /* increase ddr freq for tx dma, avoid fifo underrun */
1048 if (!qspi->sram.virt && qspi->tx_unit_size > qspi->tx_buf_size)
1049 pm_qos_update_request_timeout(
1050 &qspi->pm_ddr_qos, INT_MAX, 100*1000);
1051
1052 /* config DMA channel and start */
1053 if (asr_qspi_tx_dma_exec(qspi, op)) {
1054 qspi->tx_wmrk = 0;
1055 dev_err(qspi->dev, "failed to start tx dma\n");
1056 return -EIO;
1057 }
1058 /* enable DMA request */
1059 asr_qspi_enable_interrupt(qspi, QSPI_RSER_TBFDE);
1060
1061 /*
1062 * before trigger qspi to send data to external bus, TX bufer
1063 * need to have some data, or underrun error may happen.
1064 * DMA need some time to write data to TX buffer, so add
1065 * a delay here for this requirement.
1066 */
1067 wait_cnt = 0;
1068 do {
1069 tbsr = qspi_readl(qspi, base + QSPI_TBSR);
1070 tbsr = 4 * (tbsr >> 16);
1071 if (tbsr >= min_t(unsigned int, qspi->tx_buf_size,
1072 op->data.nbytes))
1073 break;
1074
1075 if (wait_cnt > 10050) {
1076 dev_err(qspi->dev,
1077 "TX DMA failed, TBSR=0x%x\n", tbsr);
1078 qspi_dump_reg(qspi);
1079
1080 /* disable all interrupts */
1081 qspi_writel(qspi, 0, qspi->io_map + QSPI_RSER);
1082 dmaengine_dump_status(qspi->tx_dma);
1083 dmaengine_terminate_all(qspi->tx_dma);
1084 if (!qspi->sram.virt)
1085 spi_controller_dma_unmap_mem_op_data(
1086 qspi->ctrl, op, &qspi->sgt);
1087 qspi->tx_wmrk = 0;
1088
1089 return -EAGAIN;
1090 }
1091
1092 if (wait_cnt++ >= 10000)
1093 msleep(10);
1094 else
1095 udelay(1);
1096 } while (1);
1097 }
1098
1099 return 0;
1100}
1101
1102static void asr_qspi_read_rxfifo(struct asr_qspi *qspi,
1103 const struct spi_mem_op *op)
1104{
1105 void __iomem *base = qspi->io_map;
1106 int i;
1107 u8 *buf = op->data.buf.in;
1108 u32 val;
1109
1110 dev_dbg(qspi->dev, "ip read %d bytes\n", op->data.nbytes);
1111 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
1112 val = qspi_readl(qspi, base + QSPI_RBDR(i / 4));
1113 memcpy(buf + i, &val, 4);
1114 }
1115
1116 if (i < op->data.nbytes) {
1117 val = qspi_readl(qspi, base + QSPI_RBDR(i / 4));
1118 memcpy(buf + i, &val, op->data.nbytes - i);
1119 }
1120}
1121
1122static irqreturn_t asr_qspi_reset_handler(int irq, void *dev_id)
1123{
1124 struct asr_qspi *qspi = dev_id;
1125
1126 qspi->rst_protect = true;
1127 dev_info_ratelimited(qspi->dev, "qspi catch reset signal\n");
1128
1129 return IRQ_HANDLED;
1130}
1131
1132static irqreturn_t asr_qspi_irq_handler(int irq, void *dev_id)
1133{
1134 struct asr_qspi *qspi = dev_id;
1135 u32 fr;
1136
1137 fr = qspi_readl(qspi, qspi->io_map + QSPI_FR);
1138 qspi_writel(qspi, fr & ~QSPI_FR_RBDF, qspi->io_map + QSPI_FR);
1139 dev_dbg(qspi->dev, "QSPI_FR:0x%08x\n", fr);
1140
1141 /* check QSPI_FR error flag */
1142 if (fr & (COMMAND_FR_FLAG | BUFFER_FR_FLAG)) {
1143 qspi->fr_error_flag = fr & (COMMAND_FR_FLAG | BUFFER_FR_FLAG);
1144
1145 if (fr & QSPI_FR_IPGEF)
1146 dev_err(qspi->dev, "IP command trigger during AHB grant\n");
1147 if (fr & QSPI_FR_IPIEF)
1148 dev_err(qspi->dev, "IP command trigger could not be executed\n");
1149 if (fr & QSPI_FR_IPAEF)
1150 dev_err(qspi->dev, "IP command trigger during AHB access\n");
1151 if (fr & QSPI_FR_IUEF)
1152 dev_err(qspi->dev, "IP command usage error\n");
1153 if (fr & QSPI_FR_AIBSEF)
1154 dev_err(qspi->dev, "AHB illegal burst size error\n");
1155 if (fr & QSPI_FR_AITEF)
1156 dev_err(qspi->dev, "AHB illegal trancaction error\n");
1157 if (fr & QSPI_FR_ABSEF)
1158 dev_err(qspi->dev, "AHB sequence error\n");
1159
1160 if (fr & QSPI_FR_TBUF)
1161 dev_err_ratelimited(qspi->dev, "TX buffer underrun\n");
1162 if (fr & QSPI_FR_RBOF)
1163 dev_err(qspi->dev, "RX buffer overflow\n");
1164 if (fr & QSPI_FR_ABOF)
1165 dev_err(qspi->dev, "AHB buffer overflow\n");
1166 }
1167
1168 if (qspi->cmd_interrupt && (fr & (QSPI_FR_TFF_MASK | COMMAND_FR_FLAG | BUFFER_FR_FLAG)))
1169 complete(&qspi->cmd_completion);
1170
1171 return IRQ_HANDLED;
1172}
1173
1174static int asr_qspi_do_op(struct asr_qspi *qspi, const struct spi_mem_op *op,
1175 u8 seq_id)
1176{
1177 void __iomem *base = qspi->io_map;
1178 int err = 0;
1179 u32 mcr;
1180 //void __iomem *mpmu_acgr = regs_addr_get_va(REGS_ADDR_MPMU) + ASR_MPMU_ACGR;
1181
1182#ifdef ASR_DUMP_QSPI_REG
1183 /* dump reg if need */
1184 qspi_dump_reg(qspi);
1185#endif
1186
1187 if (qspi->cmd_interrupt) {
1188 asr_qspi_enable_interrupt(qspi, QSPI_RSER_TFIE | BUFFER_ERROR_INT | COMMAND_ERROR_INT);
1189 init_completion(&qspi->cmd_completion);
1190 }
1191
1192 /* trigger LUT */
1193 qspi_writel(qspi, op->data.nbytes | QSPI_IPCR_SEQID(seq_id),
1194 base + QSPI_IPCR);
1195
1196 /* wait for the transaction complete */
1197 if (qspi->cmd_interrupt) {
1198 if (!wait_for_completion_timeout(&qspi->cmd_completion,
1199 msecs_to_jiffies(1000)))
1200 err = -ETIMEDOUT;
1201 } else {
1202 err = asr_qspi_readl_poll_tout(qspi, base + QSPI_FR, QSPI_FR_TFF_MASK,
1203 QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_SET);
1204 }
1205 if (err) {
1206 dev_err(qspi->dev, "opcode:0x%x transaction abort, ret:%d, error flag:0x%08x\n",
1207 op->cmd.opcode, err, qspi->fr_error_flag);
1208 dev_err(qspi->dev, "pmuap[0x%08x]:0x%08x\n", qspi->pmuap_reg, qspi_readl(qspi, qspi->pmuap_addr));
1209 //dev_err(qspi->dev, "mpmu[0x%08x]:0x%08x\n", ASR_MPMU_ACGR, qspi_readl(qspi, mpmu_acgr));
1210 qspi_dump_reg(qspi);
1211 goto tx_dma_unmap;
1212 }
1213
1214 /* read RX buffer for IP command read */
1215 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) {
1216#ifdef ASR_DUMP_QSPI_REG
1217 qspi_dump_reg(qspi);
1218#endif
1219 asr_qspi_read_rxfifo(qspi, op);
1220 }
1221
1222 if (qspi->fr_error_flag & QSPI_FR_TBUF) {
1223 /* abort current dma transfer */
1224 if (qspi->tx_dma_enable)
1225 dmaengine_terminate_all(qspi->tx_dma);
1226
1227 /* clear TX buf */
1228 mcr = qspi_readl(qspi, qspi->io_map + QSPI_MCR);
1229 mcr |= QSPI_MCR_CLR_TXF_MASK ;
1230 qspi_writel(qspi, mcr, qspi->io_map + QSPI_MCR);
1231
1232 err = -EAGAIN;
1233 }
1234
1235tx_dma_unmap:
1236 if (qspi->tx_wmrk) {
1237 if (!qspi->sram.virt)
1238 spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &qspi->sgt);
1239 qspi->tx_wmrk = 0;
1240 }
1241
1242 /* disable all interrupts */
1243 qspi_writel(qspi, 0, qspi->io_map + QSPI_RSER);
1244 return err;
1245}
1246
1247static void dump_spi_mem_op_info(struct asr_qspi *qspi,
1248 const struct spi_mem_op *op)
1249{
1250 dev_dbg(qspi->dev, "cmd.opcode:0x%x\n", op->cmd.opcode);
1251 dev_dbg(qspi->dev, "cmd.buswidth:%d\n", op->cmd.buswidth);
1252 dev_dbg(qspi->dev, "addr.nbytes:%d,\n", op->addr.nbytes);
1253 dev_dbg(qspi->dev, "addr.buswidth:%d\n", op->addr.buswidth);
1254 dev_dbg(qspi->dev, "addr.val:0x%llx\n", op->addr.val);
1255 dev_dbg(qspi->dev, "dummy.nbytes:%d\n", op->dummy.nbytes);
1256 dev_dbg(qspi->dev, "dummy.buswidth:%d\n", op->dummy.buswidth);
1257 dev_dbg(qspi->dev, "%s data.nbytes:%d\n",
1258 (op->data.dir == SPI_MEM_DATA_IN) ? "read" :"write",
1259 op->data.nbytes);
1260 dev_dbg(qspi->dev, "data.buswidth:%d\n", op->data.buswidth);
1261 dev_dbg(qspi->dev, "data.buf:0x%p\n", op->data.buf.in);
1262}
1263
1264static int asr_qspi_check_buswidth(struct asr_qspi *qspi, u8 width)
1265{
1266 switch (width) {
1267 case 1:
1268 case 2:
1269 case 4:
1270 return 0;
1271 }
1272
1273 return -ENOTSUPP;
1274}
1275
1276static bool asr_qspi_supports_op(struct spi_mem *mem,
1277 const struct spi_mem_op *op)
1278{
1279 struct asr_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
1280 int ret;
1281 int op_dtr;
1282
1283 ret = asr_qspi_check_buswidth(qspi, op->cmd.buswidth);
1284
1285 if (op->addr.nbytes)
1286 ret |= asr_qspi_check_buswidth(qspi, op->addr.buswidth);
1287
1288 if (op->dummy.nbytes)
1289 ret |= asr_qspi_check_buswidth(qspi, op->dummy.buswidth);
1290
1291 if (op->data.nbytes)
1292 ret |= asr_qspi_check_buswidth(qspi, op->data.buswidth);
1293
1294 if (ret)
1295 return false;
1296
1297 /* address bytes should be equal to or less than 4 bytes */
1298 if (op->addr.nbytes > 4)
1299 return false;
1300
1301 /* check controller TX/RX buffer limits and alignment */
1302 if (op->data.dir == SPI_MEM_DATA_IN &&
1303 (op->data.nbytes > qspi->rx_unit_size ||
1304 (op->data.nbytes > qspi->rx_buf_size - 4 && !IS_ALIGNED(op->data.nbytes, 4)))) {
1305 return false;
1306 }
1307
1308 if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes > qspi->tx_unit_size)
1309 return false;
1310
1311 /*
1312 * If requested address value is greater than controller assigned
1313 * memory mapped space, return error as it didn't fit in the range.
1314 */
1315 if (op->addr.val >= qspi->memmap_size) {
1316 pr_err("asr_qspi_supports_op: addr.val:%lld greater than the map size\n", op->addr.val);
1317 return false;
1318 }
1319
1320 /* number of dummy clock cycles should be <= 64 cycles */
1321 if (op->dummy.buswidth &&
1322 (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
1323 return false;
1324
1325 if (op->cmd.dtr || op->addr.dtr || op->data.dtr)
1326 op_dtr = 1;
1327 else
1328 op_dtr = 0;
1329
1330 if (!qspi->has_dtr && op_dtr)
1331 return false;
1332
1333 return true;
1334}
1335
1336void asr_qspi_adjust_tx_size(struct spi_mem *mem, u32 reduce_sz)
1337{
1338 struct asr_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
1339
1340 if (qspi->tx_dma_enable) {
1341 if (reduce_sz)
1342 qspi->tx_unit_size = qspi->tx_buf_size;
1343 else
1344 qspi->tx_unit_size = SZ_4K;
1345 }
1346}
1347
1348static int __asr_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1349{
1350 struct asr_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
1351 void __iomem *mpmu_acgr = regs_addr_get_va(REGS_ADDR_MPMU) + ASR_MPMU_ACGR;
1352 void __iomem *base = qspi->io_map;
1353 int err = 0;
1354 u32 mask;
1355 u32 reg;
1356 u8 seq_id;
1357
1358 mutex_lock(&qspi->lock);
1359
1360 dump_spi_mem_op_info(qspi, op);
1361
1362 /* wait for controller being ready */
1363 mask = QSPI_SR_BUSY | QSPI_SR_IP_ACC_MASK | QSPI_SR_AHB_ACC_MASK;
1364 err = asr_qspi_readl_poll_tout(qspi, base + QSPI_SR, mask,
1365 QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR);
1366 if (err) {
1367 dev_err(qspi->dev, "controller not ready!\n");
1368 dev_err(qspi->dev, "pmuap[0x%08x]:0x%08x\n", qspi->pmuap_reg,
1369 qspi_readl(qspi, qspi->pmuap_addr));
1370 dev_err(qspi->dev, "mpmu[0x%08x]:0x%08x\n", ASR_MPMU_ACGR,
1371 qspi_readl(qspi, mpmu_acgr));
1372 qspi_dump_reg(qspi);
1373 mutex_unlock(&qspi->lock);
1374 return err;
1375 }
1376
1377 /* clear TX/RX buffer before transaction */
1378 reg = qspi_readl(qspi, base + QSPI_MCR);
1379 reg |= QSPI_MCR_CLR_TXF_MASK | QSPI_MCR_CLR_RXF_MASK;
1380 qspi_writel(qspi, reg, base + QSPI_MCR);
1381
1382 /*
1383 * reset the sequence pointers whenever the sequence ID is changed by
1384 * updating the SEDID filed in QSPI_IPCR OR QSPI_BFGENCR.
1385 */
1386 reg = qspi_readl(qspi, base + QSPI_SPTRCLR);
1387 reg |= (QSPI_SPTRCLR_IPPTRC | QSPI_SPTRCLR_BFPTRC);
1388 qspi_writel(qspi, reg, base + QSPI_SPTRCLR);
1389
1390 /* set the flash address into the QSPI_SFAR */
1391 qspi_write_sfar(qspi, qspi->memmap_base + op->addr.val);
1392
1393 /* clear QSPI_FR before trigger LUT command */
1394 reg = qspi_readl(qspi, base + QSPI_FR);
1395 if (reg)
1396 qspi_writel(qspi, reg, base + QSPI_FR);
1397 qspi->fr_error_flag = 0;
1398
1399 /* IP command */
1400 seq_id = asr_qspi_prepare_lut(qspi, op, SEQID_LUT_SHARED_ID);
1401 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
1402 err = asr_qspi_fill_txfifo(qspi, op);
1403 if (!err)
1404 err = asr_qspi_do_op(qspi, op, seq_id);
1405
1406 /* invalidate the data in the AHB buffer. */
1407 if (op->cmd.opcode != 0x1F && op->cmd.opcode != 0x0F)
1408 asr_qspi_invalid(qspi);
1409
1410 mutex_unlock(&qspi->lock);
1411
1412 return err;
1413}
1414
1415static int asr_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
1416{
1417 struct asr_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
1418
1419 if (op->data.dir == SPI_MEM_DATA_OUT) {
1420 if (op->data.nbytes > qspi->tx_unit_size)
1421 op->data.nbytes = qspi->tx_unit_size;
1422 } else {
1423 if (op->data.nbytes > qspi->rx_unit_size)
1424 op->data.nbytes = qspi->rx_unit_size;
1425 }
1426
1427 return 0;
1428}
1429
1430static int asr_qspi_config_dqs_clk(struct asr_qspi *qspi, int dcode)
1431{
1432 void __iomem *base = qspi->io_map;
1433 u32 reg;
1434
1435 reg = qspi_readl(qspi, base + QSPI_MCR);
1436
1437 if (dcode <= 0 || dcode > 255) {
1438 reg &= ~(QSPI_MCR_DQS_EN | QSPI_MCR_DQS_LP_EN |
1439 QSPI_MCR_DQS_INV_EN);
1440 qspi_writel(qspi, reg, base + QSPI_MCR);
1441 return -1;
1442 }
1443
1444 reg |= QSPI_MCR_DQS_EN | QSPI_MCR_DQS_LP_EN | QSPI_MCR_DQS_INV_EN;
1445 qspi_writel(qspi, reg, base + QSPI_MCR);
1446
1447 /* DQS enabled, use sample point N/1 */
1448 qspi_writel(qspi, 0x0, base + QSPI_SMPR);
1449
1450 reg = qspi_readl(qspi, base + QSPI_SOCCR);
1451 reg |= QSPI_SOCCR_DLINE_EN;
1452 qspi_writel(qspi, reg, base + QSPI_SOCCR);
1453
1454 reg = qspi_readl(qspi, base + QSPI_DLACR);
1455 reg &= ~QSPI_DLACR_DLINE_STEP_MASK;
1456 reg = 0x7 << QSPI_DLACR_DLINE_STEP_SHIFT;
1457 reg |= dcode & QSPI_DLACR_DLINE_CODE_MASK;
1458 qspi_writel(qspi, reg, base + QSPI_DLACR);
1459
1460 asr_qspi_invalid(qspi);
1461 dev_info(qspi->dev, "enable DQS clock, QSPI_DLACR=0x%x\n", reg);
1462 return 0;
1463}
1464
1465static int __asr_qspi_adjust_timing(struct asr_qspi *qspi,
1466 struct spi_mem_timing *timing, int clk_hz)
1467{
1468 void __iomem *base = qspi->io_map;
1469 u32 t, delay = 0;
1470 u32 reg;
1471 int dcode;
1472
1473 if (clk_hz <= 13000000)
1474 return 0;
1475
1476 t = 1000000000 / (clk_hz/1000); /* in picosecond */
1477
1478 /* clock settings */
1479 qspi_enter_mode(qspi, QSPI_DISABLE_MODE);
1480
1481 if (timing->tclqv == 0) {
1482 timing->tclqv = 8;
1483 timing->tset = timing->thold = 2;
1484 }
1485
1486 delay = (timing->tclqv + timing->tset + 1) * 1000;
1487 if (delay <= t)
1488 reg = 0; /* sample point N1 */
1489 else
1490 reg = QSPI_SMPR_FSPHS_MASK; /* sample point I1 */
1491
1492 if (timing->use_dtr && qspi->has_dtr) {
1493 int ddr_point;
1494
1495 delay -= t/2;
1496 if (delay > 0)
1497 ddr_point = (delay + t/8 - 1) / (t/8);
1498 else
1499 ddr_point = qspi->dtr_rx_delay;
1500
1501 reg |= ddr_point << QSPI_SMPR_DDRSMP_SHIFT;
1502 } else if (qspi->support_dqs && clk_hz > 52000000) {
1503 /*
1504 * Do not use QDS for DDR, since SDR/DDR can not share
1505 * same delay code.
1506 * If DQS enabled, must use sample point N/1, clear SMPR.
1507 *
1508 * delay step: 52ps
1509 */
1510 delay = timing->tclqv * 1000 - t/2;
1511 dcode = delay / 52;
1512 if (!asr_qspi_config_dqs_clk(qspi, dcode))
1513 reg = 0;
1514 }
1515
1516 qspi_writel(qspi, reg, base + QSPI_SMPR);
1517 reg = qspi_readl(qspi, base + QSPI_SMPR);
1518 dev_info(qspi->dev, "QSPI_SMPR=0x%x t=%d\n", reg, t);
1519
1520 /* set tx hold time */
1521 reg = 0x202;
1522 if (timing->use_dtr && qspi->has_dtr)
1523 reg |= qspi->dtr_tx_delay << 16;
1524 qspi_writel(qspi, reg, base + QSPI_FLSHCR);
1525
1526 reg = qspi_readl(qspi, base + QSPI_FLSHCR);
1527 dev_info(qspi->dev, "QSPI_FLSHCR=0x%x, delay=%d\n", reg, delay);
1528
1529 /* Module enabled */
1530 qspi_enter_mode(qspi, QSPI_NORMAL_MODE);
1531
1532 return 0;
1533}
1534
1535static int asr_qspi_adjust_timing(struct spi_mem *mem, struct spi_mem_timing *timing)
1536{
1537 struct asr_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
1538 int ret = 0;
1539
1540 dev_notice(qspi->dev, "tclqv=%dns tset=%dns thold=%dns\n",
1541 timing->tclqv, timing->tset, timing->thold);
1542
1543 if (timing->max_hz > 0 && timing->max_hz < qspi->max_hz)
1544 qspi->max_hz = timing->max_hz;
1545
1546 __asr_qspi_adjust_timing(qspi, timing, qspi->max_hz);
1547
1548 ret = clk_set_rate(qspi->clk, qspi->max_hz);
1549 if (ret) {
1550 dev_err(qspi->dev, "fail to set clk, ret:%d\n", ret);
1551 return ret;
1552 }
1553
1554 dev_notice(qspi->dev, "bus clock %dHz, PMUap reg[0x%08x]:0x%08x\n",
1555 qspi->max_hz, qspi->pmuap_reg,
1556 qspi_readl(qspi, qspi->pmuap_addr));
1557
1558 return 0;
1559}
1560
1561#define ASR_QSPI_MAX_RETRY 3
1562static int asr_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1563{
1564 struct asr_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
1565 int ret, i;
1566
1567restart:
1568 qspi->rst_protect = false;
1569
1570 ret = __asr_qspi_exec_op(mem, op);
1571 if (ret == -EAGAIN) {
1572 /*
1573 * For tx underrun error, reduce data length to be less
1574 * than tx fifo size and try again.
1575 */
1576 asr_qspi_adjust_tx_size(mem, 1);
1577 asr_qspi_adjust_op_size(mem, (struct spi_mem_op *)op);
1578
1579 i = 0;
1580 do {
1581 ret = __asr_qspi_exec_op(mem, op);
1582 } while (ret == -EAGAIN && ++i < ASR_QSPI_MAX_RETRY);
1583
1584 BUG_ON(ret);
1585
1586 dev_dbg(qspi->dev, "pass after %dth retry.\n", i+1);
1587 asr_qspi_adjust_tx_size(mem, 0);
1588 }
1589
1590 if (qspi->rst_protect) {
1591 dev_info_ratelimited(qspi->dev, "retry for reset protect\n");
1592 goto restart;
1593 }
1594
1595 return ret;
1596}
1597
1598static int asr_qspi_host_init(struct asr_qspi *qspi)
1599{
1600 void __iomem *base = qspi->io_map;
1601 u32 reg;
1602
1603 /* rest qspi */
1604 qspi_reset(qspi);
1605
1606 /* clock settings */
1607 qspi_enter_mode(qspi, QSPI_DISABLE_MODE);
1608
1609 /* Fix wirte failure issue*/
1610 reg = qspi_readl(qspi, base + QSPI_SOCCR);
1611 reg &= ~0xFF;
1612 reg |= 0x8;
1613 qspi_writel(qspi, reg, base + QSPI_SOCCR);
1614
1615 /* set the default source address QSPI_AMBA_BASE*/
1616 qspi_write_sfar(qspi, qspi->memmap_base);
1617 qspi_writel(qspi, 0x0, base + QSPI_SFACR);
1618
1619 /* config ahb read */
1620 qspi_init_ahbread(qspi, SEQID_LUT_AHBREAD_ID);
1621
1622 /* set flash memory map */
1623 qspi_writel(qspi, qspi->sfa1ad & 0xfffffc00, base + QSPI_SFA1AD);
1624 qspi_writel(qspi, qspi->sfa2ad & 0xfffffc00, base + QSPI_SFA2AD);
1625 qspi_writel(qspi, qspi->sfb1ad & 0xfffffc00, base + QSPI_SFB1AD);
1626 qspi_writel(qspi, qspi->sfb2ad & 0xfffffc00, base + QSPI_SFB2AD);
1627
1628 /* ISD3FB, ISD2FB, ISD3FA, ISD2FA = 1; END_CFG=0x3 */
1629 reg = qspi_readl(qspi, base + QSPI_MCR);
1630 reg |= QSPI_MCR_END_CFG_MASK | QSPI_MCR_ISD_MASK;
1631 if (qspi->has_dtr)
1632 reg |= QSPI_MCR_DDR_EN_MASK;
1633 else
1634 reg &= ~QSPI_MCR_DDR_EN_MASK;
1635 qspi_writel(qspi, reg, base + QSPI_MCR);
1636
1637 /* Module enabled */
1638 qspi_enter_mode(qspi, QSPI_NORMAL_MODE);
1639
1640 /* Read using the IP Bus registers QSPI_RBDR0 to QSPI_RBDR31*/
1641 qspi_write_rbct(qspi, QSPI_RBCT_RXBRD_MASK);
1642
1643 if (!qspi->cmd_interrupt)
1644 asr_qspi_disable_interrupt(qspi, 0xffffffff);
1645
1646 /* clear all interrupt status */
1647 qspi_writel(qspi, 0xffffffff, base + QSPI_FR);
1648
1649#ifdef ASR_DUMP_QSPI_REG
1650 qspi_dump_reg(qspi);
1651#endif
1652 return 0;
1653}
1654
1655static int asr_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
1656{
1657 struct spi_controller *ctrl = desc->mem->spi->master;
1658 struct asr_qspi *qspi = spi_controller_get_devdata(ctrl);
1659 struct spi_mem_op *op = &desc->info.op_tmpl;
1660
1661 if (op->data.dir != SPI_MEM_DATA_IN || !qspi->ahb_read_enable)
1662 return -ENOTSUPP;
1663
1664 asr_qspi_prepare_lut(qspi, op, SEQID_LUT_AHBREAD_ID);
1665 qspi->ahb_op = op;
1666
1667 if (op->cmd.dtr || op->addr.dtr || op->data.dtr)
1668 printk("enable dtr command 0x%x\n", op->cmd.opcode);
1669
1670 return 0;
1671}
1672
1673static ssize_t asr_qspi_direct_read(struct spi_mem_dirmap_desc *desc,
1674 u64 offs, size_t len, void *buf)
1675{
1676 struct spi_controller *ctrl = desc->mem->spi->master;
1677 struct asr_qspi *qspi = spi_controller_get_devdata(ctrl);
1678 struct spi_mem_op op = desc->info.op_tmpl;
1679 int err;
1680
1681 /* Below check not need for ahb read, comment out */
1682#if 0
1683 void __iomem *base = qspi->io_map;
1684 u32 mask;
1685
1686 mutex_lock(&qspi->lock);
1687
1688 /* wait for controller being ready */
1689 mask = QSPI_SR_BUSY | QSPI_SR_IP_ACC_MASK | QSPI_SR_AHB_ACC_MASK;
1690 err = asr_qspi_readl_poll_tout(base, base + QSPI_SR, mask,
1691 QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR);
1692 if (err) {
1693 dev_err(qspi->dev, "controller not ready!\n");
1694 mutex_unlock(&qspi->lock);
1695 return err;
1696 }
1697
1698 mutex_unlock(&qspi->lock);
1699#endif
1700
1701 op.addr.val = desc->info.offset + offs;
1702 op.data.buf.in = buf;
1703 op.data.nbytes = len;
1704 asr_qspi_adjust_op_size(desc->mem, &op);
1705
1706 err = asr_qspi_ahb_read(qspi, &op);
1707 if (err)
1708 return err;
1709
1710 return op.data.nbytes;
1711}
1712
1713static const struct spi_controller_mem_ops asr_qspi_mem_ops = {
1714 .adjust_op_size = asr_qspi_adjust_op_size,
1715 .adjust_timing = asr_qspi_adjust_timing,
1716 .supports_op = asr_qspi_supports_op,
1717 .exec_op = asr_qspi_exec_op,
1718 .dirmap_create = asr_qspi_dirmap_create,
1719 .dirmap_read = asr_qspi_direct_read,
1720};
1721
1722static int asr_qspi_probe(struct platform_device *pdev)
1723{
1724 struct spi_controller *ctlr;
1725 struct device *dev = &pdev->dev;
1726 struct device_node *np = dev->of_node;
1727 struct asr_qspi *qspi;
1728 struct resource *res;
1729
1730 int ret = 0;
1731 u32 qspi_bus_num = 0;
1732 int host_irq = 0;
1733
1734 ctlr = spi_alloc_master(&pdev->dev, sizeof(struct asr_qspi));
1735 if (!ctlr)
1736 return -ENOMEM;
1737
1738 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD ;
1739 qspi = spi_controller_get_devdata(ctlr);
1740 qspi->dev = dev;
1741 qspi->ctrl = ctlr;
1742
1743 platform_set_drvdata(pdev, qspi);
1744
1745 /* get qspi register base address */
1746 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi-base");
1747 qspi->io_map = devm_ioremap_resource(dev, res);
1748 if (IS_ERR(qspi->io_map)) {
1749 ret = PTR_ERR(qspi->io_map);
1750 goto err_put_ctrl;
1751 }
1752 qspi->io_phys = res->start;
1753
1754 /* get qspi memory-map address */
1755 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi-mmap");
1756 qspi->ahb_map = devm_ioremap_resource(dev, res);
1757 if (IS_ERR(qspi->ahb_map)) {
1758 ret = PTR_ERR(qspi->ahb_map);
1759 goto err_put_ctrl;
1760 }
1761
1762 qspi->memmap_base = res->start;
1763 qspi->memmap_size = resource_size(res);
1764
1765 if (of_property_read_u32(dev->of_node, "asr,qspi-sfa1ad", &qspi->sfa1ad))
1766 qspi->sfa1ad = QSPI_FLASH_A1_TOP;
1767 else
1768 qspi->sfa1ad += qspi->memmap_base;
1769 if (of_property_read_u32(dev->of_node, "asr,qspi-sfa2ad", &qspi->sfa2ad))
1770 qspi->sfa2ad = QSPI_FLASH_A2_TOP;
1771 else
1772 qspi->sfa2ad += qspi->sfa1ad;
1773 if (of_property_read_u32(dev->of_node, "asr,qspi-sfb1ad", &qspi->sfb1ad))
1774 qspi->sfb1ad = QSPI_FLASH_B1_TOP;
1775 else
1776 qspi->sfb1ad = qspi->sfa2ad;
1777 if (of_property_read_u32(dev->of_node, "asr,qspi-sfb2ad", &qspi->sfb2ad))
1778 qspi->sfb2ad = QSPI_FLASH_B2_TOP;
1779 else
1780 qspi->sfb2ad += qspi->sfb1ad;
1781
1782 dev_notice(dev, "asr_qspi_probe:memmap base:0x%08x, memmap size:0x%x\n",
1783 qspi->memmap_base, qspi->memmap_size);
1784
1785 qspi->sram.pool = of_gen_pool_get(dev->of_node, "asr,qspi-sram", 0);
1786 if (qspi->sram.pool) {
1787 qspi->sram.virt =
1788 (void __iomem *)gen_pool_dma_alloc(
1789 qspi->sram.pool, SZ_4K, &qspi->sram.dma);
1790 dev_notice(dev, "use sram as tx buf, virt=0x%x phy=0x%x\n",
1791 (unsigned)qspi->sram.virt, (unsigned)qspi->sram.dma);
1792 }
1793
1794 host_irq = platform_get_irq(pdev, 0);
1795 if (host_irq < 0) {
1796 dev_err(dev, "invalid host irq:%d\n", host_irq);
1797 goto err_put_ctrl;
1798 }
1799 ret = devm_request_irq(dev, host_irq, asr_qspi_irq_handler,
1800 0, pdev->name, qspi);
1801 if (ret) {
1802 dev_err(dev, "failed to request irq:%d\n", ret);
1803 goto err_put_ctrl;
1804 }
1805 init_completion(&qspi->cmd_completion);
1806 dev_notice(qspi->dev, "host_irq:%d\n", host_irq);
1807
1808 host_irq = platform_get_irq(pdev, 1);
1809 if (host_irq >= 0) {
1810 ret = devm_request_irq(dev, host_irq, asr_qspi_reset_handler,
1811 0, pdev->name, qspi);
1812 if (ret) {
1813 dev_err(dev, "failed to request irq:%d\n", ret);
1814 goto err_put_ctrl;
1815 }
1816
1817 dev_notice(qspi->dev, "reset irq:%d\n", host_irq);
1818 }
1819
1820 /* map QSPI PMUap register address */
1821 if (of_property_read_u32(dev->of_node, "asr,qspi-pmuap-reg", &qspi->pmuap_reg)) {
1822 qspi->pmuap_reg = PMUA_QSPI_CLK_RES_CTRL;
1823 }
1824 qspi->pmuap_addr = ioremap(qspi->pmuap_reg, 4);
1825
1826 if (of_property_read_u32(dev->of_node, "asr,qspi-freq", &qspi->max_hz)) {
1827 qspi->max_hz = ASR_QSPI_DEFAULT_CLK_FREQ;
1828 }
1829
1830 if (of_property_read_u32(dev->of_node, "asr,qspi-rx-buf", &qspi->rx_buf_size)) {
1831 qspi->rx_buf_size = QSPI_RX_BUFF_MAX;
1832 }
1833
1834 if (of_property_read_u32(dev->of_node, "asr,qspi-tx-buf", &qspi->tx_buf_size)) {
1835 qspi->tx_buf_size = QSPI_TX_BUFF_MAX;
1836 }
1837
1838 if (of_property_read_u32(dev->of_node, "asr,qspi-ahb-buf", &qspi->ahb_buf_size)) {
1839 qspi->ahb_buf_size = QSPI_AHB_BUFF_MAX_SIZE;
1840 }
1841
1842 if (of_property_read_u32(dev->of_node, "asr,qspi-ahb-enable", &qspi->ahb_read_enable)) {
1843 qspi->ahb_read_enable = 1;
1844 }
1845
1846 if (of_property_read_u32(dev->of_node, "asr,qspi-interrupt", &qspi->cmd_interrupt)) {
1847 qspi->cmd_interrupt = 1;
1848 }
1849
1850 /* RX not use dma default, read from ahb directly show better performance */
1851 if (of_property_read_u32(dev->of_node, "asr,en-rx-dma", &qspi->rx_dma_enable)) {
1852 qspi->rx_dma_enable = 0;
1853 }
1854
1855 if (of_property_read_u32(dev->of_node, "asr,en-tx-dma", &qspi->tx_dma_enable)) {
1856 qspi->tx_dma_enable = 1;
1857 }
1858
1859 if (of_property_read_u32(dev->of_node, "asr,qspi-support-dtr", &qspi->has_dtr)) {
1860 qspi->has_dtr = 0;
1861 } else {
1862 if (of_property_read_u32(dev->of_node, "asr,qspi-dtr-tx-delay", &qspi->dtr_tx_delay))
1863 qspi->dtr_tx_delay = 1;
1864 if (of_property_read_u32(dev->of_node, "asr,qspi-dtr-rx-delay", &qspi->dtr_rx_delay))
1865 qspi->dtr_rx_delay = 0;
1866 }
1867
1868 if (of_property_read_u32(dev->of_node, "asr,qspi-support-dqs", &qspi->support_dqs))
1869 qspi->support_dqs = 0;
1870 if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
1871 qspi->support_dqs = 0;
1872
1873 if (of_property_read_u32(dev->of_node, "asr,qspi-endian-xchg", &qspi->endian_xchg)) {
1874 qspi->endian_xchg = 0;
1875 }
1876
1877 if (of_property_read_u32(dev->of_node, "asr,qspi-cs", &qspi->cs_selected)) {
1878 qspi->cs_selected = QSPI_DEFAULT_CS;
1879 }
1880
1881 if (of_property_read_u32(dev->of_node, "asr,qspi-lpm-qos", &qspi->lpm_qos)) {
1882 qspi->lpm_qos = PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE;
1883 }
1884
1885 asr_qspi_prepare_dma(qspi);
1886 mutex_init(&qspi->lock);
1887
1888 /* set the qspi device default index */
1889 if (of_property_read_u32(dev->of_node, "asr,qspi-id", &qspi_bus_num))
1890 ctlr->bus_num = -1;
1891 else
1892 ctlr->bus_num = qspi_bus_num;
1893 ctlr->num_chipselect = 1;
1894 ctlr->mem_ops = &asr_qspi_mem_ops;
1895
1896 dev_notice(dev, "asr_qspi_probe: rx_buf_size:%d, tx_buf_size:%d\n",
1897 qspi->rx_buf_size, qspi->tx_buf_size);
1898 dev_notice(dev, "asr_qspi_probe: ahb_buf_size:%d, ahb_read:%d\n",
1899 qspi->ahb_buf_size, qspi->ahb_read_enable);
1900
1901 if (qspi->tx_dma_enable)
1902 qspi->tx_unit_size = SZ_4K;
1903 else
1904 qspi->tx_unit_size = qspi->tx_buf_size;
1905
1906 if (qspi->ahb_read_enable)
1907 qspi->rx_unit_size = SZ_4K;
1908 else
1909 qspi->rx_unit_size = qspi->rx_buf_size;
1910
1911 /* config mfp */
1912 qspi_config_mfp(qspi);
1913 /* set PMUap */
1914 qspi_set_func_clk(qspi, 13000000);
1915 asr_qspi_host_init(qspi);
1916 dev_info(qspi->dev, "AHB buf size: %d\n", qspi->ahb_buf_size);
1917 dev_notice(qspi->dev, "qspi host init done.\n");
1918
1919 qspi->pm_qos_req.name = pdev->name;
1920 ctlr->auto_runtime_pm = true;
1921 pm_qos_add_request(&qspi->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
1922 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1923
1924 qspi->pm_ddr_qos.name = pdev->name;
1925 pm_qos_add_request(&qspi->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
1926 PM_QOS_DEFAULT_VALUE);
1927
1928 pm_runtime_get_noresume(&pdev->dev);
1929 pm_runtime_use_autosuspend(&pdev->dev);
1930 pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
1931 pm_runtime_set_active(&pdev->dev);
1932 pm_suspend_ignore_children(&pdev->dev, 0);
1933 pm_runtime_enable(&pdev->dev);
1934
1935 /* get qos */
1936 pm_qos_update_request(&qspi->pm_qos_req, qspi->lpm_qos);
1937 ctlr->dev.of_node = np;
1938 ret = spi_register_controller(ctlr);
1939 if (ret)
1940 goto err_destroy_mutex;
1941
1942 pm_runtime_put_autosuspend(&pdev->dev);
1943
1944 return 0;
1945
1946err_destroy_mutex:
1947 pm_runtime_disable(&pdev->dev);
1948 pm_runtime_put_noidle(&pdev->dev);
1949 pm_qos_remove_request(&qspi->pm_qos_req);
1950 pm_qos_remove_request(&qspi->pm_ddr_qos);
1951
1952 mutex_destroy(&qspi->lock);
1953 iounmap(qspi->pmuap_addr);
1954
1955err_put_ctrl:
1956 spi_controller_put(ctlr);
1957
1958 dev_err(dev, "ASR QSPI probe failed\n");
1959 return ret;
1960}
1961
1962static int asr_qspi_remove(struct platform_device *pdev)
1963{
1964 struct asr_qspi *qspi = platform_get_drvdata(pdev);
1965
1966 pm_runtime_get_sync(&pdev->dev);
1967
1968 /* set disable mode */
1969 qspi_writel(qspi, QSPI_MCR_MDIS_MASK, qspi->io_map + QSPI_MCR);
1970 qspi_writel(qspi, 0x0, qspi->io_map + QSPI_RSER);
1971
1972 pm_runtime_disable(&pdev->dev);
1973 pm_runtime_put_noidle(&pdev->dev);
1974 pm_qos_remove_request(&qspi->pm_qos_req);
1975 pm_qos_remove_request(&qspi->pm_ddr_qos);
1976
1977 if (qspi->tx_dma)
1978 dma_release_channel(qspi->tx_dma);
1979 if (qspi->rx_dma)
1980 dma_release_channel(qspi->rx_dma);
1981
1982 mutex_destroy(&qspi->lock);
1983 iounmap(qspi->pmuap_addr);
1984
1985 clk_disable_unprepare(qspi->clk);
1986 clk_disable_unprepare(qspi->bus_clk);
1987
1988 if (qspi->sram.pool)
1989 gen_pool_free(qspi->sram.pool,
1990 (unsigned long)qspi->sram.virt, SZ_4K);
1991 return 0;
1992}
1993
1994static void asr_qspi_default_setup(struct asr_qspi *qspi)
1995{
1996 struct spi_mem_op *op = qspi->ahb_op;
1997 int i;
1998
1999 asr_qspi_host_init(qspi);
2000
2001 for (i = 0; i < QSPI_MAX_SEQ_NUM; i++)
2002 qspi->seq_opcode[i] = 0;
2003
2004 if (op)
2005 asr_qspi_prepare_lut(qspi, op, SEQID_LUT_AHBREAD_ID);
2006
2007 return;
2008}
2009
2010#ifdef CONFIG_PM_SLEEP
2011static int asr_qspi_suspend(struct device *dev)
2012{
2013 int ret;
2014 u32 sr;
2015 struct asr_qspi *qspi = dev_get_drvdata(dev);
2016
2017 pm_runtime_get_sync(qspi->dev);
2018
2019 sr = qspi_readl(qspi, qspi->io_map + QSPI_SR);
2020 if (sr & QSPI_SR_BUSY) {
2021 dev_err(dev, "qspi busy with ongoing cmd\n");
2022 return -EBUSY;
2023 }
2024
2025 ret = pm_runtime_force_suspend(dev);
2026 if (ret) {
2027 dev_err(dev, "failed to suspend(ret:%d)\n", ret);
2028 return ret;
2029 }
2030
2031 return 0;
2032}
2033
2034static int asr_qspi_resume(struct device *dev)
2035{
2036 struct asr_qspi *qspi = dev_get_drvdata(dev);
2037 int ret;
2038
2039 ret = pm_runtime_force_resume(dev);
2040 if (ret) {
2041 dev_err(dev, "failed to resume(ret:%d)\n", ret);
2042 return ret;
2043 }
2044
2045 /* reset qspi via bus reset */
2046 clk_disable_unprepare(qspi->bus_clk);
2047 udelay(1000);
2048 clk_prepare_enable(qspi->bus_clk);
2049 asr_qspi_default_setup(qspi);
2050
2051 pm_runtime_mark_last_busy(dev);
2052 pm_runtime_put_autosuspend(dev);
2053
2054 return 0;
2055}
2056#endif
2057
2058#ifdef CONFIG_PM
2059static int asr_qspi_runtime_suspend(struct device *dev)
2060{
2061 u32 sr;
2062 struct asr_qspi *qspi = dev_get_drvdata(dev);
2063
2064 mutex_lock(&qspi->lock);
2065 sr = qspi_readl(qspi, qspi->io_map + QSPI_SR);
2066 if (sr & QSPI_SR_BUSY) {
2067 dev_err(dev, "qspi busy with ongoing cmd\n");
2068 mutex_unlock(&qspi->lock);
2069 return -EBUSY;
2070 }
2071 qspi_enter_mode(qspi, QSPI_DISABLE_MODE);
2072 mutex_unlock(&qspi->lock);
2073
2074 clk_disable_unprepare(qspi->clk);
2075
2076 /* put qos */
2077 pm_qos_update_request(&qspi->pm_qos_req, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
2078
2079 return 0;
2080}
2081
2082static int asr_qspi_runtime_resume(struct device *dev)
2083{
2084 struct asr_qspi *qspi = dev_get_drvdata(dev);
2085
2086 /* get qos */
2087 pm_qos_update_request(&qspi->pm_qos_req, qspi->lpm_qos);
2088
2089 clk_prepare_enable(qspi->clk);
2090 qspi_enter_mode(qspi, QSPI_NORMAL_MODE);
2091
2092 return 0;
2093}
2094#endif
2095
2096static const struct dev_pm_ops asr_qspi_pmops = {
2097 SET_SYSTEM_SLEEP_PM_OPS(asr_qspi_suspend, asr_qspi_resume)
2098 SET_RUNTIME_PM_OPS(asr_qspi_runtime_suspend,
2099 asr_qspi_runtime_resume, NULL)
2100};
2101
2102static const struct of_device_id asr_qspi_dt_ids[] = {
2103 { .compatible = "asr,qspi", },
2104 {}
2105};
2106MODULE_DEVICE_TABLE(of, asr_qspi_dt_ids);
2107
2108static struct platform_driver asr_qspi_driver = {
2109 .driver = {
2110 .name = "asr-qspi",
2111 .of_match_table = asr_qspi_dt_ids,
2112 .pm = &asr_qspi_pmops,
2113 },
2114 .probe = asr_qspi_probe,
2115 .remove = asr_qspi_remove,
2116};
2117module_platform_driver(asr_qspi_driver);
2118
2119MODULE_DESCRIPTION("ASR QSPI Host Controller Driver");
2120MODULE_AUTHOR("ASR Micro");
2121MODULE_LICENSE("GPL v2");