blob: e0d173734db73d90c1fa69f67c7ead50de589282 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * zx297520v2 spi controller driver
3 * Author: ZTER
4 * from original zx297520v2 driver
5 *
6 * Copyright (C) 2005, 2006 ZTE Corporation
7 * Author: ZTER
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/delay.h>
30#include <linux/platform_device.h>
31#include <linux/err.h>
32#include <linux/clk.h>
33#include <linux/io.h>
34#include <linux/gpio.h>
35#include <linux/slab.h>
36#include <linux/dmaengine.h>
37#include <linux/dma-mapping.h>
38#include <linux/scatterlist.h>
39#include <linux/pm_runtime.h>
40#include <linux/semaphore.h>
41
42#include <linux/spi/spi.h>
43
44#include <mach/clk.h>
45#include <mach/spi.h>
46#include <mach/gpio.h>
47#include <mach/dma.h>
48#include <mach/iomap.h>
49
50#define CONFIG_SPI_DMA_ENGINE
51/*
52 * This macro is used to define some register default values.
53 * reg is masked with mask, the OR:ed with an (again masked)
54 * val shifted sb steps to the left.
55 */
56#define SPI_WRITE_BITS(reg, val, mask, sb) \
57 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
58
59/*
60 * This macro is also used to define some default values.
61 * It will just shift val by sb steps to the left and mask
62 * the result with mask.
63 */
64#define GEN_MASK_BITS(val, mask, sb) \
65 (((val)<<(sb)) & (mask))
66
67#define DRIVE_TX 0
68#define DO_NOT_DRIVE_TX 1
69
70#define DO_NOT_QUEUE_DMA 0
71#define QUEUE_DMA 1
72
73#define RX_TRANSFER 1
74#define TX_TRANSFER 2
75
76/* registers */
77#define SPI_VER_REG(r) (r + 0x00)
78#define SPI_COM_CTRL(r) (r + 0x04)
79#define SPI_FMT_CTRL(r) (r + 0x08)
80#define SPI_DR(r) (r + 0x0C)
81#define SPI_FIFO_CTRL(r) (r + 0x10)
82#define SPI_FIFO_SR(r) (r + 0x14)
83#define SPI_INTR_EN(r) (r + 0x18)
84#define SPI_INTR_SR_SCLR(r) (r + 0x1C)
85#define SPI_TIMING_SCLR(r) (r + 0x20)
86
87/*
88 * SPI Version Register - SPI_VER_REG
89 */
90#define SPI_VER_REG_MASK_Y (0xFFUL << 16)
91#define SPI_VER_REG_MASK_X (0xFFUL << 24)
92
93/*
94 * SPI Common Control Register - SPI_COM_CTRL
95 */
96#define SPI_COM_CTRL_MASK_LBM (0x1UL << 0)
97#define SPI_COM_CTRL_MASK_SSPE (0x1UL << 1)
98#define SPI_COM_CTRL_MASK_MS (0x1UL << 2)
99#define SPI_COM_CTRL_MASK_SOD (0x1UL << 3)
100
101/*
102 * SPI Format Control Register - SPI_FMT_CTRL
103 */
104#define SPI_FMT_CTRL_MASK_FRF (0x3UL << 0)
105#define SPI_FMT_CTRL_MASK_POL (0x1UL << 2)
106#define SPI_FMT_CTRL_MASK_PHA (0x1UL << 3)
107#define SPI_FMT_CTRL_MASK_DSS (0x1FUL << 4)
108
109/*
110 * SPI FIFO Control Register - SPI_FIFO_CTRL
111 */
112#define SPI_FIFO_CTRL_MASK_RX_DMA_EN (0x1UL << 2)
113#define SPI_FIFO_CTRL_MASK_TX_DMA_EN (0x1UL << 3)
114#define SPI_FIFO_CTRL_MASK_RX_FIFO_THRES (0xFUL << 4)
115#define SPI_FIFO_CTRL_MASK_TX_FIFO_THRES (0xFUL << 8)
116/*
117 * SPI FIFO Status Register - SPI_FIFO_SR
118 */
119
120#define SPI_FIFO_SR_MASK_RX_BEYOND_THRES (0x1UL << 0)
121#define SPI_FIFO_SR_MASK_TX_BEYOND_THRES (0x1UL << 1)
122#define SPI_FIFO_SR_MASK_RX_FIFO_FULL (0x1UL << 2)
123#define SPI_FIFO_SR_MASK_TX_FIFO_EMPTY (0x1UL << 3)
124#define SPI_FIFO_SR_MASK_BUSY (0x1UL << 4)
125#define SPI_FIFO_SR_MASK_RX_FIFO_CNTR (0x1FUL << 5)
126#define SPI_FIFO_SR_MASK_TX_FIFO_CNTR (0x1FUL << 10)
127
128/*
129 * SPI Interrupt Enable Register - SPI_INTR_EN
130 */
131#define SPI_INTR_EN_MASK_RX_OVERRUN_IE (0x1UL << 0)
132#define SPI_INTR_EN_MASK_TX_UNDERRUN_IE (0x1UL << 1)
133#define SPI_INTR_EN_MASK_RX_FULL_IE (0x1UL << 2)
134#define SPI_INTR_EN_MASK_TX_EMPTY_IE (0x1UL << 3)
135#define SPI_INTR_EN_MASK_RX_THRES_IE (0x1UL << 4)
136#define SPI_INTR_EN_MASK_TX_THRES_IE (0x1UL << 5)
137
138/*
139 * SPI Interrupt Status Register OR Interrupt Clear Register - SPI_INTR_SR_SCLR
140 */
141
142#define SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR (0x1UL << 0)
143#define SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR (0x1UL << 1)
144#define SPI_INTR_SR_SCLR_MASK_RX_FULL_INTR (0x1UL << 2)
145#define SPI_INTR_SR_SCLR_MASK_TX_EMPTY_INTR (0x1UL << 3)
146#define SPI_INTR_SR_SCLR_MASK_RX_THRES_INTR (0x1UL << 4)
147#define SPI_INTR_SR_SCLR_MASK_TX_THRES_INTR (0x1UL << 5)
148
149/* SPI State */
150#define SPI_RUNNING 0
151#define SPI_SHUTDOWN 1
152
153/* SPI WCLK Freqency */
154#define SPI_SPICLK_FREQ_104M 104000000
155
156#define CLEAR_ALL_INTERRUPTS 0x3FUL
157#define ENABLE_ALL_INTERRUPTS 0x3FUL
158#define DISABLE_ALL_INTERRUPTS 0x0UL
159/*
160 * Message State
161 * we use the spi_message.state (void *) pointer to
162 * hold a single state value, that's why all this
163 * (void *) casting is done here.
164 */
165#define STATE_START ((void *) 0)
166#define STATE_RUNNING ((void *) 1)
167#define STATE_DONE ((void *) 2)
168#define STATE_ERROR ((void *) -1)
169
170/*
171 * SPI State - Whether Enabled or Disabled
172 */
173#define SPI_DISABLED (0)
174#define SPI_ENABLED (1)
175
176/*
177 * SPI DMA State - Whether DMA Enabled or Disabled
178 */
179#define SPI_DMA_DISABLED (0)
180#define SPI_DMA_ENABLED (1)
181
182/*
183 * SPI SOD State - Whether SOD Enabled or Disabled
184 */
185#define SPI_SOD_DISABLED (1)
186#define SPI_SOD_ENABLED (0)
187
188#define GPIO_AP_SPI0_CS 30
189#define GPIO_AP_SPI0_CLK 31
190#define GPIO_AP_SPI0_RXD 32
191#define GPIO_AP_SPI0_TXD 33
192#define GPIO_AP_SPI1_CS 7
193#define GPIO_AP_SPI1_CLK 8
194#define GPIO_AP_SPI1_RXD 13
195#define GPIO_AP_SPI1_TXD 14
196
197enum spi_fifo_threshold_level {
198 SPI_FIFO_THRES_1,
199 SPI_FIFO_THRES_2,
200 SPI_FIFO_THRES_3,
201 SPI_FIFO_THRES_4,
202 SPI_FIFO_THRES_5,
203 SPI_FIFO_THRES_6,
204 SPI_FIFO_THRES_7,
205 SPI_FIFO_THRES_8,
206 SPI_FIFO_THRES_9,
207 SPI_FIFO_THRES_10,
208 SPI_FIFO_THRES_11,
209 SPI_FIFO_THRES_12,
210 SPI_FIFO_THRES_13,
211 SPI_FIFO_THRES_14,
212 SPI_FIFO_THRES_15,
213 SPI_FIFO_THRES_16
214
215};
216
217
218/*
219 * SPI Clock Parameter ranges
220 */
221#define DIV_MIN 0x00
222#define DIV_MAX 0x0F
223
224#define SPI_POLLING_TIMEOUT 1000
225
226/*
227 * The type of reading going on on this chip
228 */
229enum spi_reading {
230 READING_NULL,
231 READING_U8,
232 READING_U16,
233 READING_U32
234};
235
236/**
237 * The type of writing going on on this chip
238 */
239enum spi_writing {
240 WRITING_NULL,
241 WRITING_U8,
242 WRITING_U16,
243 WRITING_U32
244};
245
246/**
247 * struct vendor_data - vendor-specific config parameters
248 * for PL022 derivates
249 * @fifodepth: depth of FIFOs (both)
250 * @max_bpw: maximum number of bits per word
251 * @unidir: supports unidirection transfers
252 * @extended_cr: 32 bit wide control register 0 with extra
253 * features and extra features in CR1 as found in the ST variants
254 * @pl023: supports a subset of the ST extensions called "PL023"
255 */
256struct vendor_data {
257 int fifodepth;
258 int max_bpw;
259 bool loopback;
260};
261/**
262 * struct pl022 - This is the private SSP driver data structure
263 * @adev: AMBA device model hookup
264 * @vendor: vendor data for the IP block
265 * @phybase: the physical memory where the SSP device resides
266 * @virtbase: the virtual memory where the SSP is mapped
267 * @clk: outgoing clock "SPICLK" for the SPI bus
268 * @master: SPI framework hookup
269 * @master_info: controller-specific data from machine setup
270 * @kworker: thread struct for message pump
271 * @kworker_task: pointer to task for message pump kworker thread
272 * @pump_messages: work struct for scheduling work to the message pump
273 * @queue_lock: spinlock to syncronise access to message queue
274 * @queue: message queue
275 * @busy: message pump is busy
276 * @running: message pump is running
277 * @pump_transfers: Tasklet used in Interrupt Transfer mode
278 * @cur_msg: Pointer to current spi_message being processed
279 * @cur_transfer: Pointer to current spi_transfer
280 * @cur_chip: pointer to current clients chip(assigned from controller_state)
281 * @next_msg_cs_active: the next message in the queue has been examined
282 * and it was found that it uses the same chip select as the previous
283 * message, so we left it active after the previous transfer, and it's
284 * active already.
285 * @tx: current position in TX buffer to be read
286 * @tx_end: end position in TX buffer to be read
287 * @rx: current position in RX buffer to be written
288 * @rx_end: end position in RX buffer to be written
289 * @read: the type of read currently going on
290 * @write: the type of write currently going on
291 * @exp_fifo_level: expected FIFO level
292 * @dma_rx_channel: optional channel for RX DMA
293 * @dma_tx_channel: optional channel for TX DMA
294 * @sgt_rx: scattertable for the RX transfer
295 * @sgt_tx: scattertable for the TX transfer
296 * @dummypage: a dummy page used for driving data on the bus with DMA
297 */
298struct zx297520v2_spi {
299 struct platform_device *pdev;
300 struct vendor_data *vendor;
301 resource_size_t phybase;
302 void __iomem *virtbase;
303 struct clk *pclk;/* spi controller work clock */
304 struct clk *spi_clk;/* spi clk line clock */
305 u32 clkfreq;
306 struct spi_master *master;
307 struct zx297520v2_spi_controller *master_info;
308 /* Message per-transfer pump */
309 struct tasklet_struct pump_transfers;
310 struct spi_message *cur_msg;
311 struct spi_transfer *cur_transfer;
312 struct chip_data *cur_chip;
313 bool next_msg_cs_active;
314 void *tx;
315 void *tx_end;
316 void *rx;
317 void *rx_end;
318 enum spi_reading read;
319 enum spi_writing write;
320 u32 exp_fifo_level;
321 enum spi_rx_level_trig rx_lev_trig;
322 enum spi_tx_level_trig tx_lev_trig;
323 /* DMA settings */
324#ifdef CONFIG_DMA_ENGINE
325 struct dma_chan *dma_rx_channel;
326 struct dma_chan *dma_tx_channel;
327 struct sg_table sgt_rx;
328 struct sg_table sgt_tx;
329 char *dummypage;
330 bool dma_running;
331#endif
332};
333
334/**
335 * struct chip_data - To maintain runtime state of SSP for each client chip
336 * @cr0: Value of control register CR0 of SSP - on later ST variants this
337 * register is 32 bits wide rather than just 16
338 * @cr1: Value of control register CR1 of SSP
339 * @dmacr: Value of DMA control Register of SSP
340 * @cpsr: Value of Clock prescale register
341 * @cs: Value of cs register
342 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
343 * @enable_dma: Whether to enable DMA or not
344 * @read: function ptr to be used to read when doing xfer for this chip
345 * @write: function ptr to be used to write when doing xfer for this chip
346 * @cs_control: chip select callback provided by chip
347 * @xfer_type: polling/interrupt/DMA
348 *
349 * Runtime state of the SSP controller, maintained per chip,
350 * This would be set according to the current message that would be served
351 */
352struct chip_data {
353 u32 ver_reg;
354 u32 com_ctrl;
355 u32 fmt_ctrl;
356 u32 fifo_ctrl;
357// u32 intr_en;
358 u8 n_bytes;
359 u8 clk_div;/* spi clk divider */
360 bool enable_dma;
361 enum spi_reading read;
362 enum spi_writing write;
363 //void (*cs_control) (u32 command);
364 int xfer_type;
365};
366struct semaphore g_SpiTransferSemaphore;
367/**
368 * null_cs_control - Dummy chip select function
369 * @command: select/delect the chip
370 *
371 * If no chip select function is provided by client this is used as dummy
372 * chip select
373 */
374static void null_cs_control(u32 command)
375{
376 pr_debug("zx297520v2 spi: dummy chip select control, CS=0x%x\n", command);
377}
378
379/**
380 * giveback - current spi_message is over, schedule next message and call
381 * callback of this message. Assumes that caller already
382 * set message->status; dma and pio irqs are blocked
383 * @pl022: SSP driver private data structure
384 */
385static void giveback(struct zx297520v2_spi *zx297520v2spi)
386{
387 struct spi_transfer *last_transfer;
388 zx297520v2spi->next_msg_cs_active = false;
389
390 last_transfer = list_entry(zx297520v2spi->cur_msg->transfers.prev,
391 struct spi_transfer,
392 transfer_list);
393
394 /* Delay if requested before any change in chip select */
395 if (last_transfer->delay_usecs)
396 /*
397 * FIXME: This runs in interrupt context.
398 * Is this really smart?
399 */
400 udelay(last_transfer->delay_usecs);
401
402 if (!last_transfer->cs_change) {
403 struct spi_message *next_msg;
404
405 /*
406 * cs_change was not set. We can keep the chip select
407 * enabled if there is message in the queue and it is
408 * for the same spi device.
409 *
410 * We cannot postpone this until pump_messages, because
411 * after calling msg->complete (below) the driver that
412 * sent the current message could be unloaded, which
413 * could invalidate the cs_control() callback...
414 */
415 /* get a pointer to the next message, if any */
416 next_msg = spi_get_next_queued_message(zx297520v2spi->master);
417
418 /*
419 * see if the next and current messages point
420 * to the same spi device.
421 */
422 if (next_msg && next_msg->spi != zx297520v2spi->cur_msg->spi)
423 next_msg = NULL;
424 //if (!next_msg || zx297520v2spi->cur_msg->state == STATE_ERROR)
425 // zx297520v2spi->cur_chip->cs_control(SSP_CHIP_DESELECT);
426 //else
427 // zx297520v2spi->next_msg_cs_active = true;
428
429 }
430
431 zx297520v2spi->cur_msg = NULL;
432 zx297520v2spi->cur_transfer = NULL;
433 zx297520v2spi->cur_chip = NULL;
434 spi_finalize_current_message(zx297520v2spi->master);
435}
436
437/**
438 * flush - flush the FIFO to reach a clean state
439 * @pl022: SSP driver private data structure
440 */
441static int flush(struct zx297520v2_spi *zx297520v2spi)
442{
443 unsigned long limit = loops_per_jiffy << 1;
444
445 dev_dbg(&zx297520v2spi->pdev->dev, "flush\n");
446 do {
447 while (readl(SPI_FIFO_SR(zx297520v2spi->virtbase)) & SPI_FIFO_SR_MASK_RX_FIFO_CNTR)
448 readl(SPI_DR(zx297520v2spi->virtbase));
449 } while ((readl(SPI_FIFO_SR(zx297520v2spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) && limit--);
450
451 zx297520v2spi->exp_fifo_level = 0;
452
453 return limit;
454}
455
456/**
457 * restore_state - Load configuration of current chip
458 * @pl022: SSP driver private data structure
459 */
460static void restore_state(struct zx297520v2_spi *zx297520v2spi)
461{
462 struct chip_data *chip = zx297520v2spi->cur_chip;
463
464 writel(chip->com_ctrl, SPI_COM_CTRL(zx297520v2spi->virtbase));
465 writel(chip->fmt_ctrl, SPI_FMT_CTRL(zx297520v2spi->virtbase));
466 writel(chip->fifo_ctrl, SPI_FIFO_CTRL(zx297520v2spi->virtbase));
467// writel(chip->intr_en, SPI_INTR_EN(zx297520v2spi->virtbase));
468 /* disable all interrupts */
469 writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297520v2spi->virtbase));
470 writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297520v2spi->virtbase));
471}
472
473/*
474 * Default spi Register Values
475 */
476#define DEFAULT_SPI_COM_CTRL ( \
477 GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
478 GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
479 GEN_MASK_BITS(SPI_MASTER, SPI_COM_CTRL_MASK_MS, 2) | \
480 GEN_MASK_BITS(SPI_SOD_DISABLED, SPI_COM_CTRL_MASK_SOD, 3) \
481)
482
483#define DEFAULT_SPI_FMT_CTRL ( \
484 GEN_MASK_BITS(SPI_INTERFACE_MOTOROLA_SPI, SPI_FMT_CTRL_MASK_FRF, 0) | \
485 GEN_MASK_BITS(SPI_CLK_POL_IDLE_LOW, SPI_FMT_CTRL_MASK_POL, 2) | \
486 GEN_MASK_BITS(SPI_CLK_FIRST_EDGE, SPI_FMT_CTRL_MASK_PHA, 3) | \
487 GEN_MASK_BITS(SPI_DATA_BITS_8, SPI_FMT_CTRL_MASK_DSS, 4) \
488)
489
490#define DEFAULT_SPI_FIFO_CTRL ( \
491 GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2) | \
492 GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3) | \
493 GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4) | \
494 GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8) \
495)
496
497
498/**
499 * load_ssp_default_config - Load default configuration for SSP
500 * @pl022: SSP driver private data structure
501 */
502static void load_spi_default_config(struct zx297520v2_spi *zx297520v2spi)
503{
504 writel(DEFAULT_SPI_COM_CTRL, SPI_COM_CTRL(zx297520v2spi->virtbase));
505 writel(DEFAULT_SPI_FMT_CTRL, SPI_FMT_CTRL(zx297520v2spi->virtbase));
506 writel(DEFAULT_SPI_FIFO_CTRL, SPI_FIFO_CTRL(zx297520v2spi->virtbase));
507 writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297520v2spi->virtbase));
508 writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297520v2spi->virtbase));
509}
510
511/**
512 * This will write to TX according to the parameters
513 * set in pl022.
514 */
515static void write(struct zx297520v2_spi *zx297520v2spi)
516{
517
518 /*
519 * The FIFO depth is different between primecell variants.
520 * I believe filling in too much in the FIFO might cause
521 * errons in 8bit wide transfers on ARM variants (just 8 words
522 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
523 *
524 * To prevent this issue, the TX FIFO is only filled to the
525 * unused RX FIFO fill length, regardless of what the TX
526 * FIFO status flag indicates.
527 */
528 dev_dbg(&zx297520v2spi->pdev->dev,
529 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
530 __func__, zx297520v2spi->rx, zx297520v2spi->rx_end, zx297520v2spi->tx, zx297520v2spi->tx_end);
531
532 while ((readl(SPI_FIFO_SR(zx297520v2spi->virtbase)) & SPI_FIFO_SR_MASK_TX_FIFO_EMPTY)
533 && (zx297520v2spi->tx < zx297520v2spi->tx_end)) {
534 switch (zx297520v2spi->write) {
535 case WRITING_NULL:
536 writew(0x0, SPI_DR(zx297520v2spi->virtbase));
537 break;
538 case WRITING_U8:
539 writew(*(u8 *) (zx297520v2spi->tx), SPI_DR(zx297520v2spi->virtbase));
540 break;
541 case WRITING_U16:
542 writew((*(u16 *) (zx297520v2spi->tx)), SPI_DR(zx297520v2spi->virtbase));
543 break;
544 case WRITING_U32:
545 writel(*(u32 *) (zx297520v2spi->tx), SPI_DR(zx297520v2spi->virtbase));
546 break;
547 }
548 while(readl(SPI_FIFO_SR(zx297520v2spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) ;
549 zx297520v2spi->tx += (zx297520v2spi->cur_chip->n_bytes);
550 }
551}
552
553/**
554 * This will write to TX and read from RX according to the parameters
555 * set in pl022.
556 */
557static void readwriter(struct zx297520v2_spi *zx297520v2spi)
558{
559
560 /*
561 * The FIFO depth is different between primecell variants.
562 * I believe filling in too much in the FIFO might cause
563 * errons in 8bit wide transfers on ARM variants (just 8 words
564 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
565 *
566 * To prevent this issue, the TX FIFO is only filled to the
567 * unused RX FIFO fill length, regardless of what the TX
568 * FIFO status flag indicates.
569 */
570 dev_dbg(&zx297520v2spi->pdev->dev,
571 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
572 __func__, zx297520v2spi->rx, zx297520v2spi->rx_end, zx297520v2spi->tx, zx297520v2spi->tx_end);
573
574 /* Read as much as you can */
575 while ((readl(SPI_FIFO_SR(zx297520v2spi->virtbase)) & SPI_FIFO_SR_MASK_RX_FIFO_CNTR)
576 && (zx297520v2spi->rx < zx297520v2spi->rx_end)) {
577 switch (zx297520v2spi->read) {
578 case READING_NULL:
579 readl(SPI_DR(zx297520v2spi->virtbase));
580 break;
581 case READING_U8:
582 *(u8 *) (zx297520v2spi->rx) =
583 readw(SPI_DR(zx297520v2spi->virtbase)) & 0xFFU;
584 break;
585 case READING_U16:
586 *(u16 *) (zx297520v2spi->rx) =
587 (u16) readw(SPI_DR(zx297520v2spi->virtbase));
588 break;
589 case READING_U32:
590 *(u32 *) (zx297520v2spi->rx) =
591 readl(SPI_DR(zx297520v2spi->virtbase));
592 break;
593 }
594 zx297520v2spi->rx += (zx297520v2spi->cur_chip->n_bytes);
595 zx297520v2spi->exp_fifo_level--;
596 }
597 /*
598 * Write as much as possible up to the TX FIFO size
599 */
600 while ((zx297520v2spi->exp_fifo_level < zx297520v2spi->vendor->fifodepth)
601 && (zx297520v2spi->tx < zx297520v2spi->tx_end)) {
602 switch (zx297520v2spi->write) {
603 case WRITING_NULL:
604 writew(0x0, SPI_DR(zx297520v2spi->virtbase));
605 break;
606 case WRITING_U8:
607 writew(*(u8 *) (zx297520v2spi->tx), SPI_DR(zx297520v2spi->virtbase));
608 break;
609 case WRITING_U16:
610 writew((*(u16 *) (zx297520v2spi->tx)), SPI_DR(zx297520v2spi->virtbase));
611 break;
612 case WRITING_U32:
613 writel(*(u32 *) (zx297520v2spi->tx), SPI_DR(zx297520v2spi->virtbase));
614 break;
615 }
616 zx297520v2spi->tx += (zx297520v2spi->cur_chip->n_bytes);
617 zx297520v2spi->exp_fifo_level++;
618 /*
619 * This inner reader takes care of things appearing in the RX
620 * FIFO as we're transmitting. This will happen a lot since the
621 * clock starts running when you put things into the TX FIFO,
622 * and then things are continuously clocked into the RX FIFO.
623 */
624 while ((readl(SPI_FIFO_SR(zx297520v2spi->virtbase)) & SPI_FIFO_SR_MASK_RX_FIFO_CNTR)
625 && (zx297520v2spi->rx < zx297520v2spi->rx_end)) {
626 switch (zx297520v2spi->read) {
627 case READING_NULL:
628 readw(SPI_DR(zx297520v2spi->virtbase));
629 break;
630 case READING_U8:
631 *(u8 *) (zx297520v2spi->rx) =
632 readw(SPI_DR(zx297520v2spi->virtbase)) & 0xFFU;
633 break;
634 case READING_U16:
635 *(u16 *) (zx297520v2spi->rx) =
636 (u16) readw(SPI_DR(zx297520v2spi->virtbase));
637 break;
638 case READING_U32:
639 *(u32 *) (zx297520v2spi->rx) =
640 readl(SPI_DR(zx297520v2spi->virtbase));
641 break;
642 }
643 zx297520v2spi->rx += (zx297520v2spi->cur_chip->n_bytes);
644 zx297520v2spi->exp_fifo_level--;
645 }
646 }
647 /*
648 * When we exit here the TX FIFO should be full and the RX FIFO
649 * should be empty
650 */
651}
652
653/**
654 * next_transfer - Move to the Next transfer in the current spi message
655 * @pl022: SSP driver private data structure
656 *
657 * This function moves though the linked list of spi transfers in the
658 * current spi message and returns with the state of current spi
659 * message i.e whether its last transfer is done(STATE_DONE) or
660 * Next transfer is ready(STATE_RUNNING)
661 */
662static void *next_transfer(struct zx297520v2_spi *zx297520v2spi)
663{
664 struct spi_message *msg = zx297520v2spi->cur_msg;
665 struct spi_transfer *trans = zx297520v2spi->cur_transfer;
666
667 /* Move to next transfer */
668 if (trans->transfer_list.next != &msg->transfers) {
669 zx297520v2spi->cur_transfer =
670 list_entry(trans->transfer_list.next,
671 struct spi_transfer, transfer_list);
672 return STATE_RUNNING;
673 }
674 return STATE_DONE;
675}
676
677/*
678 * This DMA functionality is only compiled in if we have
679 * access to the generic DMA devices/DMA engine.
680 */
681#ifdef CONFIG_DMA_ENGINE
682static void unmap_free_dma_scatter(struct zx297520v2_spi *zx297520v2spi)
683{
684 /* Unmap and free the SG tables */
685 dma_unmap_sg(zx297520v2spi->dma_tx_channel->device->dev, zx297520v2spi->sgt_tx.sgl,
686 zx297520v2spi->sgt_tx.nents, DMA_TO_DEVICE);
687 dma_unmap_sg(zx297520v2spi->dma_rx_channel->device->dev, zx297520v2spi->sgt_rx.sgl,
688 zx297520v2spi->sgt_rx.nents, DMA_FROM_DEVICE);
689 sg_free_table(&zx297520v2spi->sgt_rx);
690 sg_free_table(&zx297520v2spi->sgt_tx);
691}
692
693static void dma_callback(void *data)
694{
695 //printk(KERN_INFO "spi:dma transfer complete\n");//YXY
696 up(&g_SpiTransferSemaphore);
697}
698
699/*modify by yxy:not support scatterlists dma transfer*/
700#if 0
701static void dma_callback(void *data)
702{
703 struct zx297520v2_spi *zx297520v2spi = data;
704 struct spi_message *msg = zx297520v2spi->cur_msg;
705
706 BUG_ON(!zx297520v2spi->sgt_rx.sgl);
707
708#ifdef VERBOSE_DEBUG
709 /*
710 * Optionally dump out buffers to inspect contents, this is
711 * good if you want to convince yourself that the loopback
712 * read/write contents are the same, when adopting to a new
713 * DMA engine.
714 */
715 {
716 struct scatterlist *sg;
717 unsigned int i;
718
719 dma_sync_sg_for_cpu(&zx297520v2spi->adev->dev,
720 zx297520v2spi->sgt_rx.sgl,
721 zx297520v2spi->sgt_rx.nents,
722 DMA_FROM_DEVICE);
723
724 for_each_sg(zx297520v2spi->sgt_rx.sgl, sg, zx297520v2spi->sgt_rx.nents, i) {
725 dev_dbg(&zx297520v2spi->adev->dev, "SPI RX SG ENTRY: %d", i);
726 print_hex_dump(KERN_ERR, "SPI RX: ",
727 DUMP_PREFIX_OFFSET,
728 16,
729 1,
730 sg_virt(sg),
731 sg_dma_len(sg),
732 1);
733 }
734 for_each_sg(zx297520v2spi->sgt_tx.sgl, sg, zx297520v2spi->sgt_tx.nents, i) {
735 dev_dbg(&zx297520v2spi->adev->dev, "SPI TX SG ENTRY: %d", i);
736 print_hex_dump(KERN_ERR, "SPI TX: ",
737 DUMP_PREFIX_OFFSET,
738 16,
739 1,
740 sg_virt(sg),
741 sg_dma_len(sg),
742 1);
743 }
744 }
745#endif
746
747 unmap_free_dma_scatter(zx297520v2spi);
748
749 /* Update total bytes transferred */
750 msg->actual_length += zx297520v2spi->cur_transfer->len;
751 /*if (zx297520v2spi->cur_transfer->cs_change)
752 zx297520v2spi->cur_chip->
753 cs_control(SSP_CHIP_DESELECT);*/
754
755 /* Move to next transfer */
756 msg->state = next_transfer(zx297520v2spi);
757 tasklet_schedule(&zx297520v2spi->pump_transfers);
758}
759
760static void setup_dma_scatter(struct zx297520v2_spi *zx297520v2spi,
761 void *buffer,
762 unsigned int length,
763 struct sg_table *sgtab)
764{
765 struct scatterlist *sg;
766 int bytesleft = length;
767 void *bufp = buffer;
768 int mapbytes;
769 int i;
770
771 if (buffer) {
772 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
773 /*
774 * If there are less bytes left than what fits
775 * in the current page (plus page alignment offset)
776 * we just feed in this, else we stuff in as much
777 * as we can.
778 */
779 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
780 mapbytes = bytesleft;
781 else
782 mapbytes = PAGE_SIZE - offset_in_page(bufp);
783 sg_set_page(sg, virt_to_page(bufp),
784 mapbytes, offset_in_page(bufp));
785 bufp += mapbytes;
786 bytesleft -= mapbytes;
787 dev_dbg(&zx297520v2spi->pdev->dev,
788 "set RX/TX target page @ %p, %d bytes, %d left\n",
789 bufp, mapbytes, bytesleft);
790 }
791 } else {
792 /* Map the dummy buffer on every page */
793 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
794 if (bytesleft < PAGE_SIZE)
795 mapbytes = bytesleft;
796 else
797 mapbytes = PAGE_SIZE;
798 sg_set_page(sg, virt_to_page(zx297520v2spi->dummypage),
799 mapbytes, 0);
800 bytesleft -= mapbytes;
801 dev_dbg(&zx297520v2spi->pdev->dev,
802 "set RX/TX to dummy page %d bytes, %d left\n",
803 mapbytes, bytesleft);
804
805 }
806 }
807 BUG_ON(bytesleft);
808}
809#endif
810/**
811 * configure_dma - configures the channels for the next transfer
812 * @pl022: SSP driver's private data structure
813 */
814static int configure_dma(struct zx297520v2_spi *zx297520v2spi)
815{
816 dma_channel_def rx_conf ={
817 .src_addr = SPI_DR(zx297520v2spi->phybase),
818 .dest_addr = zx297520v2spi->rx,
819 .dma_control.tran_mode = TRAN_PERI_TO_MEM,
820 .dma_control.irq_mode = DMA_ALL_IRQ_ENABLE,
821 .link_addr = 0,
822 };
823 dma_channel_def tx_conf ={
824 .src_addr = zx297520v2spi->tx,
825 .dest_addr = SPI_DR(zx297520v2spi->phybase),
826 .dma_control.tran_mode = TRAN_MEM_TO_PERI,
827 .dma_control.irq_mode = DMA_ALL_IRQ_ENABLE,
828 .link_addr = 0,
829 };
830 unsigned int pages;
831 int ret;
832 int rx_sglen, tx_sglen;
833 struct dma_chan *rxchan = zx297520v2spi->dma_rx_channel;
834 struct dma_chan *txchan = zx297520v2spi->dma_tx_channel;
835 struct dma_async_tx_descriptor *rxdesc;
836 struct dma_async_tx_descriptor *txdesc;
837
838 /* Check that the channels are available */
839 if (!rxchan || !txchan)
840 return -ENODEV;
841
842 /*
843 * If supplied, the DMA burstsize should equal the FIFO trigger level.
844 * Notice that the DMA engine uses one-to-one mapping. Since we can
845 * not trigger on 2 elements this needs explicit mapping rather than
846 * calculation.
847 */
848 switch (zx297520v2spi->rx_lev_trig) {
849 case SPI_RX_1_OR_MORE_ELEM:
850 rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
851 rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
852 break;
853 case SPI_RX_4_OR_MORE_ELEM:
854 rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
855 rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
856 break;
857 case SPI_RX_8_OR_MORE_ELEM:
858 rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
859 rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
860 break;
861 case SPI_RX_16_OR_MORE_ELEM:
862 rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
863 rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
864 break;
865 case SPI_RX_32_OR_MORE_ELEM:
866 rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
867 rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
868 break;
869 default:
870 rx_conf.dma_control.src_burst_len = zx297520v2spi->vendor->fifodepth >> 1;
871 rx_conf.dma_control.dest_burst_len = zx297520v2spi->vendor->fifodepth >> 1;
872 break;
873 }
874
875 switch (zx297520v2spi->tx_lev_trig) {
876 case SPI_TX_1_OR_MORE_EMPTY_LOC:
877 tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
878 tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
879 break;
880 case SPI_TX_4_OR_MORE_EMPTY_LOC:
881 tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
882 tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
883 break;
884 case SPI_TX_8_OR_MORE_EMPTY_LOC:
885 tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
886 tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
887 break;
888 case SPI_TX_16_OR_MORE_EMPTY_LOC:
889 tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
890 tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
891 break;
892 case SPI_TX_32_OR_MORE_EMPTY_LOC:
893 tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
894 tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
895 break;
896 default:
897 tx_conf.dma_control.src_burst_len = zx297520v2spi->vendor->fifodepth >> 1;
898 tx_conf.dma_control.dest_burst_len = zx297520v2spi->vendor->fifodepth >> 1;
899 break;
900 }
901
902 switch (zx297520v2spi->read) {
903 case READING_NULL:
904 /* Use the same as for writing */
905 rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
906 rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
907 rx_conf.count = zx297520v2spi->cur_transfer->len;
908 break;
909 case READING_U8:
910 rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
911 rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
912 rx_conf.count = zx297520v2spi->cur_transfer->len;
913 break;
914 case READING_U16:
915 rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
916 rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
917 rx_conf.count = zx297520v2spi->cur_transfer->len;
918 break;
919 case READING_U32:
920 rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
921 rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
922 rx_conf.count = zx297520v2spi->cur_transfer->len;
923 break;
924 }
925
926 switch (zx297520v2spi->write) {
927 case WRITING_NULL:
928 /* Use the same as for reading */
929 tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
930 tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
931 tx_conf.count = zx297520v2spi->cur_transfer->len;
932 break;
933 case READING_U8:
934 tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
935 tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
936 tx_conf.count = zx297520v2spi->cur_transfer->len;
937 break;
938 case READING_U16:
939 tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
940 tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
941 tx_conf.count = zx297520v2spi->cur_transfer->len;
942 break;
943 case READING_U32:
944 tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
945 tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
946 tx_conf.count = zx297520v2spi->cur_transfer->len;
947 break;
948 }
949#if 0
950 /* SPI pecularity: we need to read and write the same width */
951 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
952 rx_conf.src_addr_width = tx_conf.dst_addr_width;
953 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
954 tx_conf.dst_addr_width = rx_conf.src_addr_width;
955 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
956#endif
957 dmaengine_slave_config(rxchan,(struct dma_slave_config*) &rx_conf);
958 dmaengine_slave_config(txchan,(struct dma_slave_config*)&tx_conf);
959
960/*modify by yxy:not support scatterlists dma transfer*/
961#if 0
962 /* Create sglists for the transfers */
963 pages = DIV_ROUND_UP(zx297520v2spi->cur_transfer->len, PAGE_SIZE);
964 dev_dbg(&zx297520v2spi->pdev->dev, "using %d pages for transfer\n", pages);
965
966 ret = sg_alloc_table(&zx297520v2spi->sgt_rx, pages, GFP_ATOMIC);
967 if (ret)
968 goto err_alloc_rx_sg;
969
970 ret = sg_alloc_table(&zx297520v2spi->sgt_tx, pages, GFP_ATOMIC);
971 if (ret)
972 goto err_alloc_tx_sg;
973
974 /* Fill in the scatterlists for the RX+TX buffers */
975 setup_dma_scatter(zx297520v2spi, zx297520v2spi->rx,
976 zx297520v2spi->cur_transfer->len, &zx297520v2spi->sgt_rx);
977 setup_dma_scatter(zx297520v2spi, zx297520v2spi->tx,
978 zx297520v2spi->cur_transfer->len, &zx297520v2spi->sgt_tx);
979
980 /* Map DMA buffers */
981 rx_sglen = dma_map_sg(rxchan->device->dev, zx297520v2spi->sgt_rx.sgl,
982 zx297520v2spi->sgt_rx.nents, DMA_FROM_DEVICE);
983 if (!rx_sglen)
984 goto err_rx_sgmap;
985
986 tx_sglen = dma_map_sg(txchan->device->dev, zx297520v2spi->sgt_tx.sgl,
987 zx297520v2spi->sgt_tx.nents, DMA_TO_DEVICE);
988 if (!tx_sglen)
989 goto err_tx_sgmap;
990
991 /* Send both scatterlists */
992 rxdesc = dmaengine_prep_slave_sg(rxchan,
993 zx297520v2spi->sgt_rx.sgl,
994 rx_sglen,
995 DMA_DEV_TO_MEM,
996 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
997 if (!rxdesc)
998 goto err_rxdesc;
999
1000 txdesc = dmaengine_prep_slave_sg(txchan,
1001 zx297520v2spi->sgt_tx.sgl,
1002 tx_sglen,
1003 DMA_MEM_TO_DEV,
1004 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1005 if (!txdesc)
1006 goto err_txdesc;
1007#endif
1008
1009 /* Submit and fire RX and TX with TX last so we're ready to read! */
1010 if (zx297520v2spi->rx){
1011 rxdesc= rxchan->device->device_prep_interleaved_dma(rxchan,NULL,0);
1012 /* Put the callback on the RX transfer only, that should finish last */
1013 rxdesc->callback = dma_callback;
1014 rxdesc->callback_param = zx297520v2spi;
1015
1016 dmaengine_submit(rxdesc);
1017 dmaengine_submit(txdesc);
1018 dma_async_issue_pending(rxchan);
1019 dma_async_issue_pending(txchan);
1020 }
1021 else if (zx297520v2spi->tx){
1022 txdesc= txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
1023 txdesc->callback = dma_callback;
1024 txdesc->callback_param = zx297520v2spi;
1025 dmaengine_submit(txdesc);
1026 dma_async_issue_pending(txchan);
1027 }
1028 zx297520v2spi->dma_running = true;
1029 ret = down_interruptible(&g_SpiTransferSemaphore);
1030
1031 return 0;
1032
1033err_txdesc:
1034 dmaengine_terminate_all(txchan);
1035err_rxdesc:
1036 dmaengine_terminate_all(rxchan);
1037 dma_unmap_sg(txchan->device->dev, zx297520v2spi->sgt_tx.sgl,
1038 zx297520v2spi->sgt_tx.nents, DMA_TO_DEVICE);
1039/*modify by yxy:not support scatterlists dma transfer*/
1040#if 0
1041err_tx_sgmap:
1042 dma_unmap_sg(rxchan->device->dev, zx297520v2spi->sgt_rx.sgl,
1043 zx297520v2spi->sgt_tx.nents, DMA_FROM_DEVICE);
1044err_rx_sgmap:
1045 sg_free_table(&zx297520v2spi->sgt_tx);
1046err_alloc_tx_sg:
1047 sg_free_table(&zx297520v2spi->sgt_rx);
1048err_alloc_rx_sg:
1049 return -ENOMEM;
1050 #endif
1051}
1052
1053extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
1054static int __devinit zx297520v2_dma_probe(struct zx297520v2_spi *zx297520v2spi)
1055{
1056 dma_cap_mask_t mask;
1057
1058 /* Try to acquire a generic DMA engine slave channel */
1059 dma_cap_zero(mask);
1060 dma_cap_set(DMA_SLAVE, mask);
1061 /*
1062 * We need both RX and TX channels to do DMA, else do none
1063 * of them.
1064 */
1065 zx297520v2spi->dma_rx_channel = dma_request_channel(mask,
1066 zx29_dma_filter_fn,
1067 zx297520v2spi->master_info->dma_rx_param);
1068 if (!zx297520v2spi->dma_rx_channel) {
1069 dev_dbg(&zx297520v2spi->pdev->dev, "no RX DMA channel!\n");
1070 goto err_no_rxchan;
1071 }
1072
1073 zx297520v2spi->dma_tx_channel = dma_request_channel(mask,
1074 zx29_dma_filter_fn,
1075 zx297520v2spi->master_info->dma_tx_param);
1076 if (!zx297520v2spi->dma_tx_channel) {
1077 dev_dbg(&zx297520v2spi->pdev->dev, "no TX DMA channel!\n");
1078 goto err_no_txchan;
1079 }
1080
1081 zx297520v2spi->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1082 if (!zx297520v2spi->dummypage) {
1083 dev_dbg(&zx297520v2spi->pdev->dev, "no DMA dummypage!\n");
1084 goto err_no_dummypage;
1085 }
1086
1087 dev_info(&zx297520v2spi->pdev->dev, "setup for DMA on RX %s, TX %s\n",
1088 dma_chan_name(zx297520v2spi->dma_rx_channel),
1089 dma_chan_name(zx297520v2spi->dma_tx_channel));
1090
1091 return 0;
1092
1093err_no_dummypage:
1094 dma_release_channel(zx297520v2spi->dma_tx_channel);
1095err_no_txchan:
1096 dma_release_channel(zx297520v2spi->dma_rx_channel);
1097 zx297520v2spi->dma_rx_channel = NULL;
1098err_no_rxchan:
1099 dev_err(&zx297520v2spi->pdev->dev,
1100 "Failed to work in dma mode, work without dma!\n");
1101 return -ENODEV;
1102}
1103
1104static void terminate_dma(struct zx297520v2_spi *zx297520v2spi)
1105{
1106 struct dma_chan *rxchan = zx297520v2spi->dma_rx_channel;
1107 struct dma_chan *txchan = zx297520v2spi->dma_tx_channel;
1108
1109 dmaengine_terminate_all(rxchan);
1110 dmaengine_terminate_all(txchan);
1111 unmap_free_dma_scatter(zx297520v2spi);
1112 zx297520v2spi->dma_running = false;
1113}
1114
1115static void zx297520v2_dma_remove(struct zx297520v2_spi *zx297520v2spi)
1116{
1117 if (zx297520v2spi->dma_running)
1118 terminate_dma(zx297520v2spi);
1119 if (zx297520v2spi->dma_tx_channel)
1120 dma_release_channel(zx297520v2spi->dma_tx_channel);
1121 if (zx297520v2spi->dma_rx_channel)
1122 dma_release_channel(zx297520v2spi->dma_rx_channel);
1123 kfree(zx297520v2spi->dummypage);
1124}
1125
1126#else
1127static inline int configure_dma(struct zx297520v2_spi *zx297520v2spi)
1128{
1129 return -ENODEV;
1130}
1131
1132static inline int zx297520v2_dma_probe(struct zx297520v2_spi *zx297520v2spi)
1133{
1134 return 0;
1135}
1136
1137static inline void zx297520v2_dma_remove(struct zx297520v2_spi *zx297520v2spi)
1138{
1139}
1140#endif
1141
1142/**
1143 * pl022_interrupt_handler - Interrupt handler for SSP controller
1144 *
1145 * This function handles interrupts generated for an interrupt based transfer.
1146 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
1147 * current message's state as STATE_ERROR and schedule the tasklet
1148 * pump_transfers which will do the postprocessing of the current message by
1149 * calling giveback(). Otherwise it reads data from RX FIFO till there is no
1150 * more data, and writes data in TX FIFO till it is not full. If we complete
1151 * the transfer we move to the next transfer and schedule the tasklet.
1152 */
1153static irqreturn_t zx297520v2_interrupt_handler(int irq, void *dev_id)
1154{
1155 struct zx297520v2_spi *zx297520v2spi = dev_id;
1156 struct spi_message *msg = zx297520v2spi->cur_msg;
1157 u32 irq_status = 0;
1158 u16 flag = 0;
1159
1160 dev_dbg(&zx297520v2spi->pdev->dev,"in function %s \n", __FUNCTION__);
1161
1162 if (unlikely(!msg)) {
1163 dev_err(&zx297520v2spi->pdev->dev,
1164 "bad message state in interrupt handler");
1165 /* Never fail */
1166 return IRQ_HANDLED;
1167 }
1168
1169 /* Read the Interrupt Status Register */
1170 irq_status = readl(SPI_INTR_SR_SCLR(zx297520v2spi->virtbase));
1171 /* clear all Interrupt */
1172 writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297520v2spi->virtbase));
1173
1174 dev_dbg(&zx297520v2spi->pdev->dev, "irq status 0x%X", irq_status);
1175
1176 if (unlikely(!irq_status))
1177 return IRQ_NONE;
1178
1179 /*
1180 * This handles the FIFO interrupts, the timeout
1181 * interrupts are flatly ignored, they cannot be
1182 * trusted.
1183 */
1184 if ( unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR)
1185 || unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR) ) {
1186 /*
1187 * Overrun interrupt - bail out since our Data has been
1188 * corrupted
1189 */
1190 if ( unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) )
1191 dev_err(&zx297520v2spi->pdev->dev, "RXFIFO is OVERRUN \n");
1192 if ( unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR))
1193 dev_err(&zx297520v2spi->pdev->dev, "TXFIFO is UNDERRUN \n");
1194
1195 /*
1196 * Disable and clear interrupts, disable SSP,
1197 * mark message with bad status so it can be
1198 * retried.
1199 */
1200 writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297520v2spi->virtbase));
1201 writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297520v2spi->virtbase));
1202 writel((readl(SPI_COM_CTRL(zx297520v2spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE)),
1203 SPI_COM_CTRL(zx297520v2spi->virtbase));
1204 msg->state = STATE_ERROR;
1205
1206 /* Schedule message queue handler */
1207 tasklet_schedule(&zx297520v2spi->pump_transfers);
1208 return IRQ_HANDLED;
1209 }
1210
1211 if (zx297520v2spi->rx != NULL )
1212 readwriter(zx297520v2spi);
1213 else
1214 //write(zx297520v2spi);
1215
1216 dev_dbg( &zx297520v2spi->pdev->dev, "%s tx %p tx_end %p rx %p rx_end %p\n", __FUNCTION__,
1217 zx297520v2spi->tx,
1218 zx297520v2spi->tx_end,
1219 zx297520v2spi->rx,
1220 zx297520v2spi->rx_end);
1221
1222 if ((zx297520v2spi->tx == zx297520v2spi->tx_end) && (flag == 0)) {
1223 u32 irq_flag = SPI_INTR_EN_MASK_RX_FULL_IE|SPI_INTR_EN_MASK_RX_OVERRUN_IE|SPI_INTR_EN_MASK_RX_THRES_IE;
1224 flag = 1;
1225 /* Disable Transmit interrupt, enable receive interrupt */
1226 /*writel((readl(SPI_INTR_EN(zx297520v2spi->virtbase)) &
1227 ~SSP_CR1_MASK_TIE) | SSP_CR1_MASK_RIE,
1228 SSP_CR1(zx297520v27502ssp->virtbase));*/
1229 writel(irq_flag, SPI_INTR_EN(zx297520v2spi ->virtbase));
1230 }
1231
1232 /*
1233 * Since all transactions must write as much as shall be read,
1234 * we can conclude the entire transaction once RX is complete.
1235 * At this point, all TX will always be finished.
1236 */
1237 if (zx297520v2spi->rx >= zx297520v2spi->rx_end) {
1238 /*writew(DISABLE_ALL_INTERRUPTS,
1239 SSP_IMSC(pl022->virtbase));*/
1240 writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297520v2spi->virtbase));
1241 writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297520v2spi->virtbase));
1242 if (unlikely(zx297520v2spi->rx > zx297520v2spi->rx_end)) {
1243 dev_warn(&zx297520v2spi->pdev->dev, "read %u surplus "
1244 "bytes (did you request an odd "
1245 "number of bytes on a 16bit bus?)\n",
1246 (u32) (zx297520v2spi->rx - zx297520v2spi->rx_end));
1247 }
1248 /* Update total bytes transferred */
1249 msg->actual_length += zx297520v2spi->cur_transfer->len;
1250// if (zx297520v27502ssp->cur_transfer->cs_change)
1251// zx297520v27502ssp->cur_chip->cs_control(SSP_CHIP_DESELECT);
1252 /* Move to next transfer */
1253 msg->state = next_transfer(zx297520v2spi);
1254 tasklet_schedule(&zx297520v2spi->pump_transfers);
1255 return IRQ_HANDLED;
1256 }
1257 return IRQ_HANDLED;
1258}
1259
1260/**
1261 * This sets up the pointers to memory for the next message to
1262 * send out on the SPI bus.
1263 */
1264static int set_up_next_transfer(struct zx297520v2_spi *zx297520v2spi,
1265 struct spi_transfer *transfer)
1266{
1267 int residue;
1268
1269 /* Sanity check the message for this bus width */
1270 residue = zx297520v2spi->cur_transfer->len % zx297520v2spi->cur_chip->n_bytes;
1271 if (unlikely(residue != 0)) {
1272 dev_err(&zx297520v2spi->pdev->dev,
1273 "message of %u bytes to transmit but the current "
1274 "chip bus has a data width of %u bytes!\n",
1275 zx297520v2spi->cur_transfer->len,
1276 zx297520v2spi->cur_chip->n_bytes);
1277 dev_err(&zx297520v2spi->pdev->dev, "skipping this message\n");
1278 return -EIO;
1279 }
1280 if((void *)transfer->tx_buf != NULL){
1281 zx297520v2spi->tx = (void *)transfer->tx_buf;
1282 zx297520v2spi->tx_end = zx297520v2spi->tx + zx297520v2spi->cur_transfer->len;
1283 }
1284 if((void *)transfer->rx_buf != NULL){
1285 zx297520v2spi->rx = (void *)transfer->rx_buf;
1286 zx297520v2spi->rx_end = zx297520v2spi->rx + zx297520v2spi->cur_transfer->len;
1287 }
1288 zx297520v2spi->write =
1289 zx297520v2spi->tx ? zx297520v2spi->cur_chip->write : WRITING_NULL;
1290 zx297520v2spi->read = zx297520v2spi->rx ? zx297520v2spi->cur_chip->read : READING_NULL;
1291 return 0;
1292}
1293
1294/**
1295 * pump_transfers - Tasklet function which schedules next transfer
1296 * when running in interrupt or DMA transfer mode.
1297 * @data: SSP driver private data structure
1298 *
1299 */
1300static void pump_transfers(unsigned long data)
1301{
1302 struct zx297520v2_spi *zx297520v2spi = (struct zx297520v2_spi *) data;
1303 struct spi_message *message = NULL;
1304 struct spi_transfer *transfer = NULL;
1305 struct spi_transfer *previous = NULL;
1306
1307 dev_dbg(&zx297520v2spi->pdev->dev,"in function %s\n", __FUNCTION__);
1308
1309 /* Get current state information */
1310 message = zx297520v2spi->cur_msg;
1311 transfer = zx297520v2spi->cur_transfer;
1312
1313 /* Handle for abort */
1314 if (message->state == STATE_ERROR) {
1315 message->status = -EIO;
1316 giveback(zx297520v2spi);
1317 return;
1318 }
1319
1320 /* Handle end of message */
1321 if (message->state == STATE_DONE) {
1322 message->status = 0;
1323 giveback(zx297520v2spi);
1324 return;
1325 }
1326
1327 /* Delay if requested at end of transfer before CS change */
1328 if (message->state == STATE_RUNNING) {
1329 previous = list_entry(transfer->transfer_list.prev,
1330 struct spi_transfer,
1331 transfer_list);
1332 if (previous->delay_usecs)
1333 /*
1334 * FIXME: This runs in interrupt context.
1335 * Is this really smart?
1336 */
1337 udelay(previous->delay_usecs);
1338
1339 /* Reselect chip select only if cs_change was requested */
1340// if (previous->cs_change)
1341// zx297520v2spi->cur_chip->cs_control(SSP_CHIP_SELECT);
1342 } else {
1343 /* STATE_START */
1344 message->state = STATE_RUNNING;
1345 }
1346
1347 if (set_up_next_transfer(zx297520v2spi, transfer)) {
1348 message->state = STATE_ERROR;
1349 message->status = -EIO;
1350 giveback(zx297520v2spi);
1351 return;
1352 }
1353 /* Flush the FIFOs and let's go! */
1354 flush(zx297520v2spi);
1355
1356 if (zx297520v2spi->cur_chip->enable_dma) {
1357 if (configure_dma(zx297520v2spi)) {
1358 dev_dbg(&zx297520v2spi->pdev->dev,
1359 "configuration of DMA failed, fall back to interrupt mode\n");
1360 goto err_config_dma;
1361 }
1362 return;
1363 }
1364
1365err_config_dma:
1366 /* enable all interrupts except RX */
1367 writel( (SPI_INTR_EN_MASK_TX_UNDERRUN_IE | SPI_INTR_EN_MASK_TX_THRES_IE | SPI_INTR_EN_MASK_TX_EMPTY_IE),
1368 SPI_INTR_EN(zx297520v2spi->virtbase) );
1369 // writew((readl(SSP_CR1(zx297520v27502ssp->virtbase))|SSP_CR1_MASK_TIE|SSP_CR1_MASK_RORIE)&(~SSP_CR1_MASK_RIE),
1370 // SSP_CR1(zx297520v27502ssp->virtbase));
1371}
1372
1373static void do_interrupt_dma_transfer(struct zx297520v2_spi *zx297520v2spi)
1374{
1375 /*
1376 * Default is to enable all interrupts except RX -
1377 * this will be enabled once TX is complete
1378 */
1379 u32 irqflags = ENABLE_ALL_INTERRUPTS;
1380
1381 dev_dbg(&zx297520v2spi->pdev->dev,"in function %s\n", __FUNCTION__);
1382
1383 /* Enable target chip, if not already active */
1384 //if (!zx297520v27502ssp->next_msg_cs_active)
1385 // zx297520v27502ssp->cur_chip->cs_control(SSP_CHIP_SELECT);
1386
1387 if (set_up_next_transfer(zx297520v2spi, zx297520v2spi->cur_transfer)) {
1388 /* Error path */
1389 zx297520v2spi->cur_msg->state = STATE_ERROR;
1390 zx297520v2spi->cur_msg->status = -EIO;
1391 giveback(zx297520v2spi);
1392 return;
1393 }
1394 /* If we're using DMA, set up DMA here */
1395 if (zx297520v2spi->cur_chip->enable_dma) {
1396 /* Configure DMA transfer */
1397 if (configure_dma(zx297520v2spi)) {
1398 dev_dbg(&zx297520v2spi->pdev->dev,
1399 "configuration of DMA failed, fall back to interrupt mode\n");
1400 goto err_config_dma;
1401 }
1402 /* Disable interrupts in DMA mode, IRQ from DMA controller */
1403 irqflags = DISABLE_ALL_INTERRUPTS;
1404 }
1405
1406 if(zx297520v2spi ->tx != NULL && zx297520v2spi ->rx != NULL){
1407 /* enable all interrupts */
1408 irqflags = ENABLE_ALL_INTERRUPTS;
1409 }else if(zx297520v2spi->tx != NULL){
1410 /*enable tx interrupts*/
1411 irqflags = SPI_INTR_EN_MASK_TX_EMPTY_IE
1412 |SPI_INTR_EN_MASK_TX_THRES_IE
1413 |SPI_INTR_EN_MASK_TX_UNDERRUN_IE;
1414 }
1415err_config_dma:
1416 /* Enable SSP, turn on interrupts */
1417 writel(readl(SPI_COM_CTRL(zx297520v2spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,
1418 SPI_COM_CTRL(zx297520v2spi->virtbase));
1419
1420 /* config interrupts */
1421 writel(irqflags, SPI_INTR_EN(zx297520v2spi->virtbase));
1422
1423 /*writew(irqflags, SSP_IMSC(zx297520v27502ssp->virtbase));*/
1424}
1425
1426static void do_polling_transfer(struct zx297520v2_spi *zx297520v2spi)
1427{
1428 struct spi_message *message = NULL;
1429 struct spi_transfer *transfer = NULL;
1430 struct spi_transfer *previous = NULL;
1431 struct chip_data *chip;
1432 unsigned long time, timeout;
1433
1434 chip = zx297520v2spi->cur_chip;
1435 message = zx297520v2spi->cur_msg;
1436
1437 while (message->state != STATE_DONE) {
1438 /* Handle for abort */
1439 if (message->state == STATE_ERROR)
1440 break;
1441 transfer = zx297520v2spi->cur_transfer;
1442
1443 /* Delay if requested at end of transfer */
1444 if (message->state == STATE_RUNNING) {
1445 previous =
1446 list_entry(transfer->transfer_list.prev,
1447 struct spi_transfer, transfer_list);
1448 if (previous->delay_usecs)
1449 udelay(previous->delay_usecs);
1450 //if (previous->cs_change)
1451 // zx297520v27502ssp->cur_chip->cs_control(SSP_CHIP_SELECT);
1452 } else {
1453 /* STATE_START */
1454 message->state = STATE_RUNNING;
1455 //if (!zx297520v27502ssp->next_msg_cs_active)
1456 // zx297520v27502ssp->cur_chip->cs_control(SSP_CHIP_SELECT);
1457 }
1458
1459 /* Configuration Changing Per Transfer */
1460 if (set_up_next_transfer(zx297520v2spi, transfer)) {
1461 /* Error path */
1462 message->state = STATE_ERROR;
1463 break;
1464 }
1465 /* Flush FIFOs and enable SSP */
1466 flush(zx297520v2spi);
1467 //writel((readl(SSP_CR1(zx297520v27502ssp->virtbase)) | SSP_CR1_MASK_SSE),
1468 // SSP_CR1(zx297520v27502ssp->virtbase));
1469 writel(readl(SPI_COM_CTRL(zx297520v2spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,
1470 SPI_COM_CTRL(zx297520v2spi->virtbase));
1471
1472 dev_dbg(&zx297520v2spi->pdev->dev, "polling transfer ongoing ...\n");
1473
1474 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1475
1476 if(zx297520v2spi->tx != NULL && zx297520v2spi->rx != NULL )
1477 { /*read and write*/
1478 while (zx297520v2spi->tx < zx297520v2spi->tx_end || zx297520v2spi->rx < zx297520v2spi->rx_end) {
1479 time = jiffies;
1480 readwriter(zx297520v2spi);
1481 if (time_after(time, timeout)) {
1482 dev_warn(&zx297520v2spi->pdev->dev,
1483 "%s: timeout!\n", __func__);
1484 message->state = STATE_ERROR;
1485 goto out;
1486 }
1487 cpu_relax();
1488 }
1489 }
1490 else if (zx297520v2spi->tx != NULL )
1491 {/* only write */
1492 while (zx297520v2spi->tx < zx297520v2spi->tx_end ) {
1493 time = jiffies;
1494 write(zx297520v2spi);
1495 if (time_after(time, timeout)) {
1496 dev_warn(&zx297520v2spi->pdev->dev,
1497 "%s: timeout!\n", __func__);
1498 message->state = STATE_ERROR;
1499 goto out;
1500 }
1501 cpu_relax();
1502 }
1503 }
1504 /* Update total byte transferred */
1505 message->actual_length += zx297520v2spi->cur_transfer->len;
1506// if (zx297520v2spi->cur_transfer->cs_change)
1507// zx297520v2spi->cur_chip->cs_control(SSP_CHIP_DESELECT);
1508 /* Move to next transfer */
1509 message->state = next_transfer(zx297520v2spi);
1510 }
1511out:
1512 /* Handle end of message */
1513 if (message->state == STATE_DONE)
1514 message->status = 0;
1515 else
1516 message->status = -EIO;
1517
1518 giveback(zx297520v2spi);
1519 return;
1520}
1521
1522static int zx297520v2_transfer_one_message(struct spi_master *master,
1523 struct spi_message *msg)
1524{
1525 struct zx297520v2_spi *zx297520v2spi = spi_master_get_devdata(master);
1526
1527 //printk(KERN_INFO "ssp:in function %s \n", __FUNCTION__);
1528
1529 /* Initial message state */
1530 zx297520v2spi->cur_msg = msg;
1531 msg->state = STATE_START;
1532
1533 zx297520v2spi->cur_transfer = list_entry(msg->transfers.next,
1534 struct spi_transfer, transfer_list);
1535
1536 /* Setup the SPI using the per chip configuration */
1537 zx297520v2spi->cur_chip = spi_get_ctldata(msg->spi);
1538
1539 restore_state(zx297520v2spi);
1540 flush(zx297520v2spi);
1541
1542 if (zx297520v2spi->cur_chip->xfer_type == POLLING_TRANSFER ||zx297520v2spi->cur_transfer->len <=16)
1543 do_polling_transfer(zx297520v2spi);
1544 else
1545 do_interrupt_dma_transfer(zx297520v2spi);
1546
1547 return 0;
1548}
1549
1550static int zx297520v2_prepare_transfer_hardware(struct spi_master *master)
1551{
1552// struct zx297520v2_spi *zx297520v2spi = spi_master_get_devdata(master);
1553
1554 //dev_warn(&zx297520v27502ssp->pdev->dev,"in function %s\n", __FUNCTION__);
1555
1556 #if 0
1557 /*
1558 * Just make sure we have all we need to run the transfer by syncing
1559 * with the runtime PM framework.
1560 */
1561 pm_runtime_get_sync(&pl022->adev->dev);
1562 #endif
1563 return 0;
1564}
1565
1566static int zx297520v2_unprepare_transfer_hardware(struct spi_master *master)
1567{
1568 struct zx297520v2_spi *zx297520v2spi = spi_master_get_devdata(master);
1569
1570 //dev_warn(&zx297520v27502ssp->pdev->dev,"in function %s\n", __FUNCTION__);
1571
1572 /* nothing more to do - disable spi/ssp and power off */
1573 writel(readl(SPI_COM_CTRL(zx297520v2spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE,
1574 SPI_COM_CTRL(zx297520v2spi->virtbase));
1575 #if 0
1576 if (pl022->master_info->autosuspend_delay > 0) {
1577 pm_runtime_mark_last_busy(&pl022->adev->dev);
1578 pm_runtime_put_autosuspend(&pl022->adev->dev);
1579 } else {
1580 pm_runtime_put(&pl022->adev->dev);
1581 }
1582 #endif
1583 return 0;
1584}
1585
1586static int verify_controller_parameters(struct zx297520v2_spi *zx297520v2spi,
1587 struct spi_config_chip const *chip_info)
1588{
1589 if ((chip_info->iface < SPI_INTERFACE_MOTOROLA_SPI)
1590 || (chip_info->iface > SPI_INTERFACE_TI_SYNC_SERIAL)) {
1591 dev_err(&zx297520v2spi->pdev->dev,
1592 "interface is configured incorrectly\n");
1593 return -EINVAL;
1594 }
1595
1596 if ((chip_info->hierarchy != SPI_MASTER)
1597 && (chip_info->hierarchy != SPI_SLAVE)) {
1598 dev_err(&zx297520v2spi->pdev->dev,
1599 "hierarchy is configured incorrectly\n");
1600 return -EINVAL;
1601 }
1602 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1603 && (chip_info->com_mode != DMA_TRANSFER)
1604 && (chip_info->com_mode != POLLING_TRANSFER)) {
1605 dev_err(&zx297520v2spi->pdev->dev,
1606 "Communication mode is configured incorrectly\n");
1607 return -EINVAL;
1608 }
1609 switch (chip_info->rx_lev_trig) {
1610 case SPI_RX_1_OR_MORE_ELEM:
1611 case SPI_RX_4_OR_MORE_ELEM:
1612 case SPI_RX_8_OR_MORE_ELEM:
1613 /* These are always OK, all variants can handle this */
1614 break;
1615 case SPI_RX_16_OR_MORE_ELEM:
1616 if (zx297520v2spi->vendor->fifodepth < 16) {
1617 dev_err(&zx297520v2spi->pdev->dev,
1618 "RX FIFO Trigger Level is configured incorrectly\n");
1619 return -EINVAL;
1620 }
1621 break;
1622 case SPI_RX_32_OR_MORE_ELEM:
1623 if (zx297520v2spi->vendor->fifodepth < 32) {
1624 dev_err(&zx297520v2spi->pdev->dev,
1625 "RX FIFO Trigger Level is configured incorrectly\n");
1626 return -EINVAL;
1627 }
1628 break;
1629 default:
1630 dev_err(&zx297520v2spi->pdev->dev,
1631 "RX FIFO Trigger Level is configured incorrectly\n");
1632 return -EINVAL;
1633 break;
1634 }
1635 switch (chip_info->tx_lev_trig) {
1636 case SPI_TX_1_OR_MORE_EMPTY_LOC:
1637 case SPI_TX_4_OR_MORE_EMPTY_LOC:
1638 case SPI_TX_8_OR_MORE_EMPTY_LOC:
1639 /* These are always OK, all variants can handle this */
1640 break;
1641 case SPI_TX_16_OR_MORE_EMPTY_LOC:
1642 if (zx297520v2spi->vendor->fifodepth < 16) {
1643 dev_err(&zx297520v2spi->pdev->dev,
1644 "TX FIFO Trigger Level is configured incorrectly\n");
1645 return -EINVAL;
1646 }
1647 break;
1648 case SPI_TX_32_OR_MORE_EMPTY_LOC:
1649 if (zx297520v2spi->vendor->fifodepth < 32) {
1650 dev_err(&zx297520v2spi->pdev->dev,
1651 "TX FIFO Trigger Level is configured incorrectly\n");
1652 return -EINVAL;
1653 }
1654 break;
1655 default:
1656 dev_err(&zx297520v2spi->pdev->dev,
1657 "TX FIFO Trigger Level is configured incorrectly\n");
1658 return -EINVAL;
1659 break;
1660 }
1661
1662 return 0;
1663}
1664
1665static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1666{
1667 return rate / (cpsdvsr * (1 + scr));
1668}
1669
1670static int calculate_effective_freq(struct zx297520v2_spi *zx297520v2spi, u32 freq, u8* div)
1671{
1672 u8 clk_div;
1673 /*div from src clk 104M*/
1674 /* f(ssp_clk) = 2*f(ssp_sclk_out) */
1675 clk_div = zx297520v2spi->clkfreq /( freq *2);
1676 if( clk_div < DIV_MIN+1 || clk_div > DIV_MAX+1 )
1677 {
1678 dev_err(&zx297520v2spi->pdev->dev, "error!!! speed is %d Hz out of rang",freq );
1679 return -ENOTSUPP;
1680 }
1681 *div = clk_div;
1682 return 0;
1683}
1684
1685static struct vendor_data vendor_arm = {
1686 .fifodepth = 16,
1687 .max_bpw = 32,
1688 .loopback = true,
1689};
1690static struct resource spi0_gpio_resources[] ={
1691 [0]={
1692 .start = GPIO_AP_SPI0_TXD,
1693 .end = GPIO_AP_SPI0_TXD,
1694 .name = "txd",
1695 .flags = IORESOURCE_IO,
1696 },
1697 [1]={
1698 .start = GPIO_AP_SPI0_CLK,
1699 .end = GPIO_AP_SPI0_CLK,
1700 .name = "clk",
1701 .flags = IORESOURCE_IO,
1702 },
1703 [2]={
1704 .start = GPIO_AP_SPI0_CS,
1705 .end = GPIO_AP_SPI0_CS,
1706 .name = "cs",
1707 .flags = IORESOURCE_IO,
1708 },
1709#if 0
1710 [3]={
1711 .start = GPIO_AP_SPI0_RXD,
1712 .end = GPIO_AP_SPI0_RXD,
1713 .name = "rxd",
1714 .flags = IORESOURCE_IO,
1715 }
1716#endif
1717};
1718static struct resource spi1_gpio_resources[] ={
1719 [0]={
1720 .start = GPIO_AP_SPI1_TXD,
1721 .end = GPIO_AP_SPI1_TXD,
1722 .name = "txd",
1723 .flags = IORESOURCE_IO,
1724 },
1725 [1]={
1726 .start = GPIO_AP_SPI1_CLK,
1727 .end = GPIO_AP_SPI1_CLK,
1728 .name = "clk",
1729 .flags = IORESOURCE_IO,
1730 },
1731 [2]={
1732 .start = GPIO_AP_SPI1_CS,
1733 .end = GPIO_AP_SPI1_CS,
1734 .name = "cs",
1735 .flags = IORESOURCE_IO,
1736 },
1737#if 0
1738 [3]={
1739 .start = GPIO_AP_SPI1_RXD,
1740 .end = GPIO_AP_SPI1_RXD,
1741 .name = "rxd",
1742 .flags = IORESOURCE_IO,
1743 }
1744#endif
1745};
1746/*
1747 * A piece of default chip info unless the platform
1748 * supplies it.
1749 */
1750static const struct spi_config_chip spi_default_chip_info = {
1751 .com_mode = POLLING_TRANSFER,
1752 .iface = SPI_INTERFACE_MOTOROLA_SPI,
1753 .hierarchy = SPI_MASTER,
1754 .slave_tx_disable = DO_NOT_DRIVE_TX,
1755 .rx_lev_trig = SPI_RX_8_OR_MORE_ELEM,
1756 .tx_lev_trig = SPI_TX_8_OR_MORE_EMPTY_LOC,
1757// .ctrl_len = SSP_BITS_8,
1758// .wait_state = SSP_MWIRE_WAIT_ZERO,
1759// .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1760 .cs_control = null_cs_control,
1761};
1762
1763/*
1764* spi ʹÓÃGPIOģʽ¶ÁÈ¡LCD µÄID Begin
1765*/
1766#define SPI_GPIO_FUNCTION 1
1767#define SPI_GPIO_GPIO 0
1768
1769#define SPI_GPIO_HIGH 1
1770#define SPI_GPIO_LOW 0
1771
1772static void spi_set_gpio_function(void)
1773{
1774 //TODO:ÉèÖÃGPIOΪ¹¦ÄܽÅ
1775 zx29_gpio_function_sel(GPIO_AP_SPI0_CS, SPI_GPIO_FUNCTION);
1776 zx29_gpio_function_sel(GPIO_AP_SPI0_CLK,SPI_GPIO_FUNCTION);
1777 zx29_gpio_function_sel(GPIO_AP_SPI0_RXD,SPI_GPIO_FUNCTION);
1778 zx29_gpio_function_sel(GPIO_AP_SPI0_TXD,SPI_GPIO_FUNCTION);
1779}
1780static void spi_set_gpio_gpio(void)
1781{
1782 //TODO:ÉèÖÃGPIOΪGPIO½Å
1783 zx29_gpio_function_sel(GPIO_AP_SPI0_CS, GPIO30_GPIO30);
1784 zx29_gpio_function_sel(GPIO_AP_SPI0_CLK,GPIO31_GPIO31);
1785 zx29_gpio_function_sel(GPIO_AP_SPI0_RXD,GPIO32_GPIO32);
1786 zx29_gpio_function_sel(GPIO_AP_SPI0_TXD,GPIO33_GPIO33);
1787}
1788static void spi_set_gpio_val(int gpio_num, int val)
1789{
1790 zx29_gpio_output_data(gpio_num, val);
1791}
1792
1793static int spi_get_gpio_val(int gpio_num)
1794{
1795 //zx29_gpio_set_direction(gpio,GPIO_IN);
1796
1797 return zx29_gpio_input_data(gpio_num);
1798}
1799
1800static void spi_time_delay(int delay/*us*/)
1801{
1802 udelay(delay);
1803}
1804
1805void spi_gpio_mode_start(void)
1806{
1807 /* set clk tx rx cs to gpio */
1808 spi_set_gpio_gpio();
1809
1810
1811 zx29_gpio_set_direction(GPIO_AP_SPI0_CS, GPIO_OUT);
1812 zx29_gpio_set_direction(GPIO_AP_SPI0_CLK, GPIO_OUT);
1813 zx29_gpio_set_direction(GPIO_AP_SPI0_TXD, GPIO_OUT);
1814 zx29_gpio_set_direction(GPIO_AP_SPI0_RXD, GPIO_IN);
1815
1816 spi_set_gpio_val(GPIO_AP_SPI0_CS, SPI_GPIO_HIGH);/* CSµÍÓÐЧ */
1817 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_LOW);/* clk¿ÕÏÐʱΪµÍ */
1818#if 0
1819 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_LOW);
1820 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_LOW);
1821#endif
1822}
1823EXPORT_SYMBOL(spi_gpio_mode_start);
1824void spi_gpio_mode_stop(void)
1825{
1826 /* set clk tx rx cs to function */
1827 spi_set_gpio_function();
1828}
1829EXPORT_SYMBOL(spi_gpio_mode_stop);
1830/*******************************************************************************
1831 * Function:
1832 * Description:
1833 * Parameters:
1834 * Input:
1835 *
1836 * Output:
1837 *
1838 * Returns:
1839 *
1840 *
1841 * Others:
1842 ********************************************************************************/
1843void spi_gpio_write_single8(unsigned char data)
1844{
1845 int i;
1846
1847 //printk("howard spi_gpio_write_single8 %x\n", data);
1848
1849 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_HIGH);
1850 spi_set_gpio_val(GPIO_AP_SPI0_CS, SPI_GPIO_LOW);/* CSµÍÓÐЧ */
1851
1852 for( i=7; i>=0; i-- )
1853 {
1854 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_LOW);
1855 if ((data >> i) & 0x1)
1856 {
1857 spi_set_gpio_val(GPIO_AP_SPI0_TXD, SPI_GPIO_HIGH);
1858 }
1859 else
1860 {
1861 spi_set_gpio_val(GPIO_AP_SPI0_TXD, SPI_GPIO_LOW);
1862 }
1863 spi_time_delay(1);
1864 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_HIGH);
1865 spi_time_delay(1);
1866 }
1867 spi_set_gpio_val(GPIO_AP_SPI0_CS, SPI_GPIO_HIGH);
1868 spi_set_gpio_val(GPIO_AP_SPI0_TXD, SPI_GPIO_LOW);
1869
1870}
1871EXPORT_SYMBOL(spi_gpio_write_single8);
1872/*******************************************************************************
1873 * Function:
1874 * Description:
1875 * Parameters:
1876 * Input:
1877 *
1878 * Output:
1879 *
1880 * Returns:
1881 *
1882 *
1883 * Others:
1884 ********************************************************************************/
1885unsigned char spi_gpio_read_single8(void)
1886{
1887 int i;
1888 unsigned char readData = 0;
1889
1890 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_HIGH);
1891 spi_set_gpio_val(GPIO_AP_SPI0_CS, SPI_GPIO_LOW);/* CSµÍÓÐЧ */
1892
1893 for( i=7; i>=0; i-- )
1894 {
1895 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_LOW);
1896 spi_time_delay(1);
1897 spi_set_gpio_val(GPIO_AP_SPI0_CLK, SPI_GPIO_HIGH);
1898 if( spi_get_gpio_val(GPIO_AP_SPI0_RXD) )/* lcd ¸´ÓÃtx rx */
1899 {
1900 readData |= (1 << i);
1901 }
1902 spi_time_delay(1);
1903 }
1904 spi_set_gpio_val(GPIO_AP_SPI0_CS, SPI_GPIO_HIGH);
1905
1906 //printk("howard spi_gpio_read_single8 %x\n", readData);
1907 return readData;
1908}
1909EXPORT_SYMBOL(spi_gpio_read_single8);
1910
1911/*
1912* spi ʹÓÃGPIOģʽ¶ÁÈ¡LCD µÄID End
1913*/
1914
1915/**
1916 * pl022_setup - setup function registered to SPI master framework
1917 * @spi: spi device which is requesting setup
1918 *
1919 * This function is registered to the SPI framework for this SPI master
1920 * controller. If it is the first time when setup is called by this device,
1921 * this function will initialize the runtime state for this chip and save
1922 * the same in the device structure. Else it will update the runtime info
1923 * with the updated chip info. Nothing is really being written to the
1924 * controller hardware here, that is not done until the actual transfer
1925 * commence.
1926 */
1927static int zx297520v2_setup(struct spi_device *spi)
1928{
1929 struct spi_config_chip const *chip_info;
1930 struct chip_data *chip;
1931 u8 clk_div = 0;
1932 int status = 0;
1933 struct zx297520v2_spi *zx297520v2spi = spi_master_get_devdata(spi->master);
1934 unsigned int bits = spi->bits_per_word;
1935 u32 tmp;
1936
1937
1938 if (!spi->max_speed_hz)
1939 return -EINVAL;
1940
1941 /* Get controller_state if one is supplied */
1942 chip = spi_get_ctldata(spi);
1943
1944 if (chip == NULL) {
1945 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1946 if (!chip) {
1947 dev_err(&spi->dev,
1948 "cannot allocate controller state\n");
1949 return -ENOMEM;
1950 }
1951 dev_dbg(&spi->dev,
1952 "allocated memory for controller's runtime state\n");
1953 }
1954
1955 /* Get controller data if one is supplied */
1956 chip_info = spi->controller_data;
1957
1958 if (chip_info == NULL) {
1959 chip_info = &spi_default_chip_info;
1960 /* spi_board_info.controller_data not is supplied */
1961 dev_dbg(&spi->dev,
1962 "using default controller_data settings\n");
1963 } else
1964 dev_dbg(&spi->dev,
1965 "using user supplied controller_data settings\n");
1966
1967 /*
1968 * We can override with custom divisors, else we use the board
1969 * frequency setting
1970 */
1971
1972 status = calculate_effective_freq(zx297520v2spi,
1973 spi->max_speed_hz,
1974 &clk_div);
1975 if (status < 0)
1976 goto err_config_params;
1977
1978 chip ->clk_div = clk_div;
1979
1980 dev_dbg(&spi->dev, "clk dividor is %d\n", clk_div);
1981
1982 /* enable ssp clock source */
1983 clk_enable(zx297520v2spi->spi_clk);
1984
1985 /* set spi clock source at 104MHz/1 */
1986 // zx297520v2spi->spi_clk->ops->set_division(zx297520v2spi->spi_clk,chip ->clk_div-1);
1987 //writel(chip ->clk_div-1, M0_SSP_CLKDIV_REG_VA);
1988 clk_set_rate(zx297520v2spi->spi_clk, spi->max_speed_hz*2); /* f(ssp_clk) = 2*f(ssp_sclk_out) */
1989
1990 status = verify_controller_parameters(zx297520v2spi, chip_info);
1991 if (status) {
1992 dev_err(&spi->dev, "controller data is incorrect");
1993 goto err_config_params;
1994 }
1995
1996 zx297520v2spi->rx_lev_trig = chip_info->rx_lev_trig;
1997 zx297520v2spi->tx_lev_trig = chip_info->tx_lev_trig;
1998
1999 /* Now set controller state based on controller data */
2000 chip->xfer_type = chip_info->com_mode;
2001 /*
2002 if (!chip_info->cs_control) {
2003 chip->cs_control = null_cs_control;
2004 dev_warn(&spi->dev,
2005 "chip select function is NULL for this chip\n");
2006 } else
2007 chip->cs_control = chip_info->cs_control;*/
2008
2009 /* Check bits per word with vendor specific range */
2010 if ((bits <= 3) || (bits > zx297520v2spi->vendor->max_bpw)) {
2011 status = -ENOTSUPP;
2012 dev_err(&spi->dev, "illegal data size for this controller!\n");
2013 dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
2014 zx297520v2spi->vendor->max_bpw);
2015 goto err_config_params;
2016 } else if (bits <= 8) {
2017 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
2018 chip->n_bytes = 1;
2019 chip->read = READING_U8;
2020 chip->write = WRITING_U8;
2021 } else if (bits <= 16) {
2022 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
2023 chip->n_bytes = 2;
2024 chip->read = READING_U16;
2025 chip->write = WRITING_U16;
2026 } else {
2027 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
2028 chip->n_bytes = 4;
2029 chip->read = READING_U32;
2030 chip->write = WRITING_U32;
2031 }
2032
2033 /* Now Initialize all register settings required for this chip */
2034 chip->com_ctrl = 0;
2035 chip->fmt_ctrl = 0;
2036 chip->fifo_ctrl = 0;
2037
2038 if ((chip_info->com_mode == DMA_TRANSFER)
2039 && ((zx297520v2spi->master_info)->enable_dma)) {
2040 chip->enable_dma = true;
2041 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
2042 SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_ENABLED,
2043 SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
2044 SPI_WRITE_BITS(chip->fifo_ctrl,
2045 SPI_DMA_ENABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
2046 } else {
2047 chip->enable_dma = false;
2048 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
2049 SPI_WRITE_BITS(chip->fifo_ctrl,
2050 SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
2051 SPI_WRITE_BITS(chip->fifo_ctrl,
2052 SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
2053 }
2054
2055
2056 SPI_WRITE_BITS(chip->fifo_ctrl,
2057 SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
2058 SPI_WRITE_BITS(chip->fifo_ctrl,
2059 SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
2060
2061 SPI_WRITE_BITS(chip->fmt_ctrl, bits - 1,SPI_FMT_CTRL_MASK_DSS, 4);
2062 SPI_WRITE_BITS(chip->fmt_ctrl, chip_info->iface, SPI_FMT_CTRL_MASK_FRF, 0);
2063
2064 /* Stuff that is common for all versions */
2065 if (spi->mode & SPI_CPOL)
2066 tmp = SPI_CLK_POL_IDLE_HIGH;
2067 else
2068 tmp = SPI_CLK_POL_IDLE_LOW;
2069 SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_POL,2);
2070
2071 if (spi->mode & SPI_CPHA)
2072 tmp = SPI_CLK_SECOND_EDGE;
2073 else
2074 tmp = SPI_CLK_FIRST_EDGE;
2075
2076 SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_PHA, 3);
2077
2078 /* Loopback is available on all versions except PL023 */
2079 if (zx297520v2spi->vendor->loopback) {
2080 if (spi->mode & SPI_LOOP)
2081 tmp = LOOPBACK_ENABLED;
2082 else
2083 tmp = LOOPBACK_DISABLED;
2084 SPI_WRITE_BITS(chip->com_ctrl, tmp, SPI_COM_CTRL_MASK_LBM, 0);
2085 }
2086 SPI_WRITE_BITS(chip->com_ctrl, SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
2087 SPI_WRITE_BITS(chip->com_ctrl, chip_info->hierarchy, SPI_COM_CTRL_MASK_MS, 2);
2088 SPI_WRITE_BITS(chip->com_ctrl, chip_info->slave_tx_disable, SPI_COM_CTRL_MASK_SOD, 3);
2089
2090 /* Save controller_state */
2091 spi_set_ctldata(spi, chip);
2092 return status;
2093 err_config_params:
2094 spi_set_ctldata(spi, NULL);
2095 kfree(chip);
2096 return status;
2097}
2098
2099/**
2100 * pl022_cleanup - cleanup function registered to SPI master framework
2101 * @spi: spi device which is requesting cleanup
2102 *
2103 * This function is registered to the SPI framework for this SPI master
2104 * controller. It will free the runtime state of chip.
2105 */
2106static void zx297520v2_cleanup(struct spi_device *spi)
2107{
2108 struct chip_data *chip = spi_get_ctldata(spi);
2109
2110 spi_set_ctldata(spi, NULL);
2111 kfree(chip);
2112}
2113
2114static int __devinit zx297520v2_spi_probe(struct platform_device *pdev)
2115{
2116 struct device *dev = &pdev->dev;
2117 struct zx297520v2_spi_controller *platform_info = pdev->dev.platform_data;
2118 struct spi_master *master;
2119 struct zx297520v2_spi *zx297520v2spi = NULL; /*Data for this driver */
2120 struct resource * regs = NULL;
2121 struct resource * gpio = NULL;
2122 struct resource * irq = NULL;
2123 int status = 0, i;
2124 u32 regval = 0;
2125
2126 printk(KERN_INFO "spi:zx297520v2_spi_probe \n");
2127 sema_init(&g_SpiTransferSemaphore,0);
2128
2129 if (platform_info == NULL) {
2130 dev_err(&pdev->dev, "probe - no platform data supplied\n");
2131 status = -ENODEV;
2132 goto err_no_pdata;
2133 }
2134
2135 /* Allocate master with space for data */
2136 master = spi_alloc_master(dev, sizeof(struct zx297520v2_spi));
2137 if (master == NULL) {
2138 dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
2139 status = -ENOMEM;
2140 goto err_no_master;
2141 }
2142
2143 zx297520v2spi = spi_master_get_devdata(master);
2144 zx297520v2spi->master = master;
2145 zx297520v2spi->master_info = platform_info;
2146 zx297520v2spi->pdev = pdev;
2147 zx297520v2spi->vendor = &vendor_arm;
2148
2149 dev_set_drvdata(&pdev->dev, zx297520v2spi);
2150 /*
2151 * Bus Number Which has been Assigned to this SSP controller
2152 * on this board
2153 */
2154 master->bus_num = platform_info->bus_id;
2155 master->num_chipselect = platform_info->num_chipselect;
2156 master->cleanup = zx297520v2_cleanup;
2157 master->setup = zx297520v2_setup;
2158 master->prepare_transfer_hardware = zx297520v2_prepare_transfer_hardware;
2159 master->transfer_one_message = zx297520v2_transfer_one_message;
2160 master->unprepare_transfer_hardware = zx297520v2_unprepare_transfer_hardware;
2161 master->rt = platform_info->rt;
2162
2163 /*
2164 * Supports mode 0-3, loopback, and active low CS. Transfers are
2165 * always MS bit first on the original pl022.
2166 */
2167 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS;
2168
2169 dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
2170
2171 /* registers */
2172 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2173 if ( regs == NULL ){
2174 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
2175 status = -ENOENT;
2176 goto err_no_registers;
2177 }
2178 if (master->bus_num == 0){
2179 zx297520v2spi->phybase = ZX297520V2_SSP0_PHYS;
2180 }
2181 else if (master->bus_num == 1){
2182 zx297520v2spi->phybase = ZX297520V2_SSP1_PHYS;
2183 }
2184 zx297520v2spi->virtbase = regs->start;
2185
2186 if (zx297520v2spi->virtbase == NULL) {
2187 status = -ENOMEM;
2188 goto err_no_ioremap;
2189 }
2190 dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
2191 regs->start, zx297520v2spi->virtbase);
2192 if(0 == pdev->id) {
2193 /* gpios txd rxd sclk cs */
2194 for(i = 0; i < ARRAY_SIZE(spi0_gpio_resources); i++){
2195 //gpio = platform_get_resource(pdev, IORESOURCE_IO, i);
2196 gpio = &spi0_gpio_resources[i];
2197 if( gpio == NULL )
2198 {
2199 dev_err(&pdev->dev, "Cannot get IORESOURCE_IO\n");
2200 status = -ENOENT;
2201 goto err_gpios;
2202 }
2203 dev_dbg(&pdev->dev, "used gpio num %d as %s \n", gpio->start, gpio ->name);
2204
2205 status = gpio_request(gpio->start,gpio->name);
2206 if( status < 0 )
2207 goto err_gpios;
2208 //zte_gpio_config(gpio->start, SET_FUNCTION);
2209 zx29_gpio_function_sel(gpio->start,1);
2210 }
2211 } else if(1 == pdev->id){
2212 /* gpios txd rxd sclk cs */
2213 for(i = 0; i < ARRAY_SIZE(spi1_gpio_resources); i++){
2214 //gpio = platform_get_resource(pdev, IORESOURCE_IO, i);
2215 gpio = &spi1_gpio_resources[i];
2216 if( gpio == NULL )
2217 {
2218 dev_err(&pdev->dev, "Cannot get IORESOURCE_IO\n");
2219 status = -ENOENT;
2220 goto err_gpios;
2221 }
2222 dev_dbg(&pdev->dev, "used gpio num %d as %s \n", gpio->start, gpio ->name);
2223
2224 status = gpio_request(gpio->start,gpio->name);
2225 if( status < 0 )
2226 goto err_gpios;
2227 //zte_gpio_config(gpio->start, SET_FUNCTION);
2228 zx29_gpio_function_sel(gpio->start,1);
2229 }
2230
2231 }
2232
2233 /* work clock */
2234 zx297520v2spi->spi_clk = clk_get(&pdev->dev, "work_clk");//clk_get_sys("zx297520v2_ssp.0", const char * con_id);//
2235 if (IS_ERR(zx297520v2spi->spi_clk)) {
2236 status = PTR_ERR(zx297520v2spi->spi_clk);
2237 dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
2238 goto err_no_clk;
2239 }
2240 /* enable spiclk at function zx297520v2_setup */
2241
2242 zx297520v2spi->clkfreq = SPI_SPICLK_FREQ_104M;
2243
2244
2245 /* apb clock */
2246 zx297520v2spi->pclk = clk_get(&pdev->dev, "apb_clk");
2247 if (IS_ERR(zx297520v2spi->pclk)) {
2248 status = PTR_ERR(zx297520v2spi->pclk);
2249 dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
2250 goto err_no_clk;
2251 }
2252 clk_enable(zx297520v2spi->pclk);
2253
2254 /* Initialize transfer pump */
2255 tasklet_init(&zx297520v2spi->pump_transfers, pump_transfers,
2256 (unsigned long)zx297520v2spi);
2257
2258 /* Disable SPI */
2259 regval = readl(SPI_COM_CTRL(zx297520v2spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
2260
2261 writel(regval, SPI_COM_CTRL(zx297520v2spi->virtbase));
2262
2263 load_spi_default_config(zx297520v2spi);
2264
2265 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2266 if( irq == NULL ){
2267 dev_err(&pdev->dev, "Cannot get IORESOURCE_IRQ\n");
2268 status = -ENOENT;
2269 goto err_no_irq;
2270 }
2271
2272 dev_dbg(&pdev->dev, "used interrupt num is %d\n", irq->start);
2273
2274 status = request_irq(irq->start, zx297520v2_interrupt_handler, 0, "zx297520v2_spi",
2275 zx297520v2spi);
2276 if (status < 0) {
2277 dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
2278 goto err_no_irq;
2279 }
2280
2281 /* Get DMA channels */
2282 if (platform_info->enable_dma) {
2283 status = zx297520v2_dma_probe(zx297520v2spi);
2284 if (status != 0)
2285 platform_info->enable_dma = 0;
2286 }
2287
2288 status = spi_register_master(master);
2289 if (status != 0) {
2290 dev_err(&pdev->dev,
2291 "probe - problem registering spi master\n");
2292 goto err_spi_register;
2293 }
2294 dev_dbg(&pdev->dev," probe succeeded\n");
2295 printk(KERN_INFO "spi:zx297520v2_spi_probe OK\n");
2296
2297 /* let runtime pm put suspend */
2298 if (platform_info->autosuspend_delay > 0) {
2299 dev_info(&pdev->dev,
2300 "will use autosuspend for runtime pm, delay %dms\n",
2301 platform_info->autosuspend_delay);
2302 pm_runtime_set_autosuspend_delay(dev,
2303 platform_info->autosuspend_delay);
2304 pm_runtime_use_autosuspend(dev);
2305 pm_runtime_put_autosuspend(dev);
2306 } else {
2307 pm_runtime_put(dev);
2308 }
2309 return 0;
2310
2311 err_spi_register:
2312 if (platform_info->enable_dma)
2313 zx297520v2_dma_remove(zx297520v2spi);
2314
2315 free_irq(irq->start, zx297520v2spi);
2316 err_no_irq:
2317 clk_disable(zx297520v2spi->spi_clk);
2318// err_no_clk_en:
2319 //clk_unprepare(pl022->clk);
2320 //err_clk_prep:
2321 clk_put(zx297520v2spi->spi_clk);
2322 err_no_clk:
2323 iounmap(zx297520v2spi->virtbase);
2324 err_gpios:
2325 /* add */
2326 err_no_ioremap:
2327 err_no_registers:
2328 spi_master_put(master);
2329 err_no_master:
2330 err_no_pdata:
2331 return status;
2332}
2333
2334static int __exit zx297520v2_spi_remove(struct platform_device *pdev)
2335{
2336 struct zx297520v2_spi *zx297520v2spi = dev_get_drvdata(&pdev->dev);
2337 struct resource * gpio = NULL;
2338 struct resource * irq = NULL;
2339 int i;
2340
2341 if (!zx297520v2spi)
2342 return 0;
2343
2344 /*
2345 * undo pm_runtime_put() in probe. I assume that we're not
2346 * accessing the primecell here.
2347 */
2348 pm_runtime_get_noresume(&pdev->dev);
2349
2350 load_spi_default_config(zx297520v2spi);
2351 if (zx297520v2spi->master_info->enable_dma)
2352 zx297520v2_dma_remove(zx297520v2spi);
2353
2354 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2355 if( irq != NULL )
2356 {
2357 free_irq(irq->start, zx297520v2spi);
2358 }
2359
2360 clk_disable(zx297520v2spi->spi_clk);
2361 clk_put(zx297520v2spi->spi_clk);
2362
2363 clk_disable(zx297520v2spi->pclk);
2364 clk_put(zx297520v2spi->pclk);
2365
2366 if(0 == pdev->id) {
2367 /* gpios txd rxd sclk sfr */
2368 for(i = 0; i < ARRAY_SIZE(spi0_gpio_resources); i++){
2369 //gpio = platform_get_resource(pdev, IORESOURCE_IO, i);
2370 gpio = &spi0_gpio_resources[i];
2371
2372 if( gpio != NULL )
2373 {
2374 gpio_free(gpio->start);
2375 }
2376 }
2377 } else if(1 == pdev->id){
2378 /* gpios txd rxd sclk sfr */
2379 for(i = 0; i < ARRAY_SIZE(spi1_gpio_resources); i++){
2380 //gpio = platform_get_resource(pdev, IORESOURCE_IO, i);
2381 gpio = &spi1_gpio_resources[i];
2382
2383 if( gpio != NULL )
2384 {
2385 gpio_free(gpio->start);
2386 }
2387 }
2388 }
2389
2390 iounmap(zx297520v2spi->virtbase);
2391 //amba_release_regions(adev);
2392 tasklet_disable(&zx297520v2spi->pump_transfers);
2393 spi_unregister_master(zx297520v2spi->master);
2394 spi_master_put(zx297520v2spi->master);
2395 //amba_set_drvdata(adev, NULL);
2396 dev_set_drvdata(&pdev->dev, NULL);
2397 return 0;
2398}
2399
2400static struct platform_driver zx297520v2_spi_driver = {
2401 .driver = {
2402 //.name = "zx297520_ssp",
2403 .name = "zx29_ssp",
2404 .owner = THIS_MODULE,
2405 },
2406 .probe = zx297520v2_spi_probe,
2407 .remove = __exit_p(zx297520v2_spi_remove),
2408};
2409
2410static int __init zx297520v2_spi_init(void)
2411{
2412 return platform_driver_register(&zx297520v2_spi_driver);
2413}
2414
2415static void __exit zx297520v2_spi_exit(void)
2416{
2417 platform_driver_unregister(&zx297520v2_spi_driver);
2418}
2419
2420module_init(zx297520v2_spi_init);
2421module_exit(zx297520v2_spi_exit);
2422
2423MODULE_DESCRIPTION("zx297520v2 spi controller driver");
2424MODULE_AUTHOR("ZTER");
2425MODULE_LICENSE("ZTE");
2426