[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit
Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/os/linux/linux-3.4.x/drivers/spi/spi-zx297510.c b/ap/os/linux/linux-3.4.x/drivers/spi/spi-zx297510.c
new file mode 100755
index 0000000..d3ed5fa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/spi/spi-zx297510.c
@@ -0,0 +1,2268 @@
+/*
+ * zx297510 spi controller driver
+ * Author: ZTER
+ * from original zx297510 driver
+ *
+ * Copyright (C) 2005, 2006 ZTE Corporation
+ * Author: ZTER
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/spi/spi.h>
+
+#include <mach/clock.h>
+#include <mach/spi.h>
+#include <mach/gpio.h>
+/*
+ * This macro is used to define some register default values.
+ * reg is masked with mask, the OR:ed with an (again masked)
+ * val shifted sb steps to the left.
+ */
+#define SPI_WRITE_BITS(reg, val, mask, sb) \
+ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
+
+/*
+ * This macro is also used to define some default values.
+ * It will just shift val by sb steps to the left and mask
+ * the result with mask.
+ */
+#define GEN_MASK_BITS(val, mask, sb) \
+ (((val)<<(sb)) & (mask))
+
+#define DRIVE_TX 0
+#define DO_NOT_DRIVE_TX 1
+
+#define DO_NOT_QUEUE_DMA 0
+#define QUEUE_DMA 1
+
+#define RX_TRANSFER 1
+#define TX_TRANSFER 2
+
+/* registers */
+#define SPI_VER_REG(r) (r + 0x00)
+#define SPI_COM_CTRL(r) (r + 0x04)
+#define SPI_FMT_CTRL(r) (r + 0x08)
+#define SPI_DR(r) (r + 0x0C)
+#define SPI_FIFO_CTRL(r) (r + 0x10)
+#define SPI_FIFO_SR(r) (r + 0x14)
+#define SPI_INTR_EN(r) (r + 0x18)
+#define SPI_INTR_SR_SCLR(r) (r + 0x1C)
+
+/*
+ * SPI Version Register - SPI_VER_REG
+ */
+#define SPI_VER_REG_MASK_Y (0xFFUL << 16)
+#define SPI_VER_REG_MASK_X (0xFFUL << 24)
+
+/*
+ * SPI Common Control Register - SPI_COM_CTRL
+ */
+#define SPI_COM_CTRL_MASK_LBM (0x1UL << 0)
+#define SPI_COM_CTRL_MASK_SSPE (0x1UL << 1)
+#define SPI_COM_CTRL_MASK_MS (0x1UL << 2)
+#define SPI_COM_CTRL_MASK_SOD (0x1UL << 3)
+
+/*
+ * SPI Format Control Register - SPI_FMT_CTRL
+ */
+#define SPI_FMT_CTRL_MASK_FRF (0x3UL << 0)
+#define SPI_FMT_CTRL_MASK_POL (0x1UL << 2)
+#define SPI_FMT_CTRL_MASK_PHA (0x1UL << 3)
+#define SPI_FMT_CTRL_MASK_DSS (0x1FUL << 4)
+
+/*
+ * SPI FIFO Control Register - SPI_FIFO_CTRL
+ */
+#define SPI_FIFO_CTRL_MASK_RX_DMA_EN (0x1UL << 2)
+#define SPI_FIFO_CTRL_MASK_TX_DMA_EN (0x1UL << 3)
+#define SPI_FIFO_CTRL_MASK_RX_FIFO_THRES (0xFUL << 4)
+#define SPI_FIFO_CTRL_MASK_TX_FIFO_THRES (0xFUL << 8)
+/*
+ * SPI FIFO Status Register - SPI_FIFO_SR
+ */
+
+#define SPI_FIFO_SR_MASK_RX_BEYOND_THRES (0x1UL << 0)
+#define SPI_FIFO_SR_MASK_TX_BEYOND_THRES (0x1UL << 1)
+#define SPI_FIFO_SR_MASK_RX_FIFO_FULL (0x1UL << 2)
+#define SPI_FIFO_SR_MASK_TX_FIFO_EMPTY (0x1UL << 3)
+#define SPI_FIFO_SR_MASK_BUSY (0x1UL << 4)
+#define SPI_FIFO_SR_MASK_RX_FIFO_CNTR (0x1FUL << 5)
+#define SPI_FIFO_SR_MASK_TX_FIFO_CNTR (0x1FUL << 10)
+
+/*
+ * SPI Interrupt Enable Register - SPI_INTR_EN
+ */
+#define SPI_INTR_EN_MASK_RX_OVERRUN_IE (0x1UL << 0)
+#define SPI_INTR_EN_MASK_TX_UNDERRUN_IE (0x1UL << 1)
+#define SPI_INTR_EN_MASK_RX_FULL_IE (0x1UL << 2)
+#define SPI_INTR_EN_MASK_TX_EMPTY_IE (0x1UL << 3)
+#define SPI_INTR_EN_MASK_RX_THRES_IE (0x1UL << 4)
+#define SPI_INTR_EN_MASK_TX_THRES_IE (0x1UL << 5)
+
+/*
+ * SPI Interrupt Status Register OR Interrupt Clear Register - SPI_INTR_SR_SCLR
+ */
+
+#define SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR (0x1UL << 0)
+#define SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR (0x1UL << 1)
+#define SPI_INTR_SR_SCLR_MASK_RX_FULL_INTR (0x1UL << 2)
+#define SPI_INTR_SR_SCLR_MASK_TX_EMPTY_INTR (0x1UL << 3)
+#define SPI_INTR_SR_SCLR_MASK_RX_THRES_INTR (0x1UL << 4)
+#define SPI_INTR_SR_SCLR_MASK_TX_THRES_INTR (0x1UL << 5)
+
+/* SPI State */
+#define SPI_RUNNING 0
+#define SPI_SHUTDOWN 1
+
+/* SPI WCLK Freqency */
+#define SPI_SPICLK_FREQ_104M 104000000
+
+#define CLEAR_ALL_INTERRUPTS 0x3FUL
+#define ENABLE_ALL_INTERRUPTS 0x3FUL
+#define DISABLE_ALL_INTERRUPTS 0x0UL
+/*
+ * Message State
+ * we use the spi_message.state (void *) pointer to
+ * hold a single state value, that's why all this
+ * (void *) casting is done here.
+ */
+#define STATE_START ((void *) 0)
+#define STATE_RUNNING ((void *) 1)
+#define STATE_DONE ((void *) 2)
+#define STATE_ERROR ((void *) -1)
+
+/*
+ * SPI State - Whether Enabled or Disabled
+ */
+#define SPI_DISABLED (0)
+#define SPI_ENABLED (1)
+
+/*
+ * SPI DMA State - Whether DMA Enabled or Disabled
+ */
+#define SPI_DMA_DISABLED (0)
+#define SPI_DMA_ENABLED (1)
+
+/*
+ * SPI SOD State - Whether SOD Enabled or Disabled
+ */
+#define SPI_SOD_DISABLED (1)
+#define SPI_SOD_ENABLED (0)
+
+
+enum spi_fifo_threshold_level {
+ SPI_FIFO_THRES_1,
+ SPI_FIFO_THRES_2,
+ SPI_FIFO_THRES_3,
+ SPI_FIFO_THRES_4,
+ SPI_FIFO_THRES_5,
+ SPI_FIFO_THRES_6,
+ SPI_FIFO_THRES_7,
+ SPI_FIFO_THRES_8,
+ SPI_FIFO_THRES_9,
+ SPI_FIFO_THRES_10,
+ SPI_FIFO_THRES_11,
+ SPI_FIFO_THRES_12,
+ SPI_FIFO_THRES_13,
+ SPI_FIFO_THRES_14,
+ SPI_FIFO_THRES_15,
+ SPI_FIFO_THRES_16
+
+};
+
+
+/*
+ * SPI Clock Parameter ranges
+ */
+#define DIV_MIN 0x00
+#define DIV_MAX 0x0F
+
+#define SPI_POLLING_TIMEOUT 1000
+
+/*
+ * The type of reading going on on this chip
+ */
+enum spi_reading {
+ READING_NULL,
+ READING_U8,
+ READING_U16,
+ READING_U32
+};
+
+/**
+ * The type of writing going on on this chip
+ */
+enum spi_writing {
+ WRITING_NULL,
+ WRITING_U8,
+ WRITING_U16,
+ WRITING_U32
+};
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL022 derivates
+ * @fifodepth: depth of FIFOs (both)
+ * @max_bpw: maximum number of bits per word
+ * @unidir: supports unidirection transfers
+ * @extended_cr: 32 bit wide control register 0 with extra
+ * features and extra features in CR1 as found in the ST variants
+ * @pl023: supports a subset of the ST extensions called "PL023"
+ */
+struct vendor_data {
+ int fifodepth;
+ int max_bpw;
+ bool loopback;
+};
+/**
+ * struct pl022 - This is the private SSP driver data structure
+ * @adev: AMBA device model hookup
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
+ * @master: SPI framework hookup
+ * @master_info: controller-specific data from machine setup
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @pump_transfers: Tasklet used in Interrupt Transfer mode
+ * @cur_msg: Pointer to current spi_message being processed
+ * @cur_transfer: Pointer to current spi_transfer
+ * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
+ * @tx: current position in TX buffer to be read
+ * @tx_end: end position in TX buffer to be read
+ * @rx: current position in RX buffer to be written
+ * @rx_end: end position in RX buffer to be written
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
+ */
+struct zx297510_spi {
+ struct platform_device *pdev;
+ struct vendor_data *vendor;
+ resource_size_t phybase;
+ void __iomem *virtbase;
+ struct clk *pclk;/* spi controller work clock */
+ struct clk *spi_clk;/* spi clk line clock */
+ u32 clkfreq;
+ struct spi_master *master;
+ struct zx297510_spi_controller *master_info;
+ /* Message per-transfer pump */
+ struct tasklet_struct pump_transfers;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ bool next_msg_cs_active;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ enum spi_reading read;
+ enum spi_writing write;
+ u32 exp_fifo_level;
+ enum spi_rx_level_trig rx_lev_trig;
+ enum spi_tx_level_trig tx_lev_trig;
+ /* DMA settings */
+#ifdef CONFIG_DMA_ENGINE
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct sg_table sgt_rx;
+ struct sg_table sgt_tx;
+ char *dummypage;
+ bool dma_running;
+#endif
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SSP for each client chip
+ * @cr0: Value of control register CR0 of SSP - on later ST variants this
+ * register is 32 bits wide rather than just 16
+ * @cr1: Value of control register CR1 of SSP
+ * @dmacr: Value of DMA control Register of SSP
+ * @cpsr: Value of Clock prescale register
+ * @cs: Value of cs register
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @enable_dma: Whether to enable DMA or not
+ * @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt/DMA
+ *
+ * Runtime state of the SSP controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ u32 ver_reg;
+ u32 com_ctrl;
+ u32 fmt_ctrl;
+ u32 fifo_ctrl;
+// u32 intr_en;
+ u8 n_bytes;
+ u8 clk_div;/* spi clk divider */
+ bool enable_dma;
+ enum spi_reading read;
+ enum spi_writing write;
+ //void (*cs_control) (u32 command);
+ int xfer_type;
+};
+/**
+ * null_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+static void null_cs_control(u32 command)
+{
+ pr_debug("zx297510 spi: dummy chip select control, CS=0x%x\n", command);
+}
+
+/**
+ * giveback - current spi_message is over, schedule next message and call
+ * callback of this message. Assumes that caller already
+ * set message->status; dma and pio irqs are blocked
+ * @pl022: SSP driver private data structure
+ */
+static void giveback(struct zx297510_spi *zx297510spi)
+{
+ struct spi_transfer *last_transfer;
+ zx297510spi->next_msg_cs_active = false;
+
+ last_transfer = list_entry(zx297510spi->cur_msg->transfers.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ /* Delay if requested before any change in chip select */
+ if (last_transfer->delay_usecs)
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ udelay(last_transfer->delay_usecs);
+
+ if (!last_transfer->cs_change) {
+ struct spi_message *next_msg;
+
+ /*
+ * cs_change was not set. We can keep the chip select
+ * enabled if there is message in the queue and it is
+ * for the same spi device.
+ *
+ * We cannot postpone this until pump_messages, because
+ * after calling msg->complete (below) the driver that
+ * sent the current message could be unloaded, which
+ * could invalidate the cs_control() callback...
+ */
+ /* get a pointer to the next message, if any */
+ next_msg = spi_get_next_queued_message(zx297510spi->master);
+
+ /*
+ * see if the next and current messages point
+ * to the same spi device.
+ */
+ if (next_msg && next_msg->spi != zx297510spi->cur_msg->spi)
+ next_msg = NULL;
+ //if (!next_msg || zx297510spi->cur_msg->state == STATE_ERROR)
+ // zx297510spi->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ //else
+ // zx297510spi->next_msg_cs_active = true;
+
+ }
+
+ zx297510spi->cur_msg = NULL;
+ zx297510spi->cur_transfer = NULL;
+ zx297510spi->cur_chip = NULL;
+ spi_finalize_current_message(zx297510spi->master);
+}
+
+/**
+ * flush - flush the FIFO to reach a clean state
+ * @pl022: SSP driver private data structure
+ */
+static int flush(struct zx297510_spi *zx297510spi)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ dev_dbg(&zx297510spi->pdev->dev, "flush\n");
+ do {
+ while (readl(SPI_FIFO_SR(zx297510spi->virtbase)) & SPI_FIFO_SR_MASK_RX_FIFO_CNTR)
+ readl(SPI_DR(zx297510spi->virtbase));
+ } while ((readl(SPI_FIFO_SR(zx297510spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) && limit--);
+
+ zx297510spi->exp_fifo_level = 0;
+
+ return limit;
+}
+
+/**
+ * restore_state - Load configuration of current chip
+ * @pl022: SSP driver private data structure
+ */
+static void restore_state(struct zx297510_spi *zx297510spi)
+{
+ struct chip_data *chip = zx297510spi->cur_chip;
+
+ writel(chip->com_ctrl, SPI_COM_CTRL(zx297510spi->virtbase));
+ writel(chip->fmt_ctrl, SPI_FMT_CTRL(zx297510spi->virtbase));
+ writel(chip->fifo_ctrl, SPI_FIFO_CTRL(zx297510spi->virtbase));
+// writel(chip->intr_en, SPI_INTR_EN(zx297510spi->virtbase));
+ /* disable all interrupts */
+ writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297510spi->virtbase));
+ writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297510spi->virtbase));
+}
+
+/*
+ * Default spi Register Values
+ */
+#define DEFAULT_SPI_COM_CTRL ( \
+ GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
+ GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
+ GEN_MASK_BITS(SPI_MASTER, SPI_COM_CTRL_MASK_MS, 2) | \
+ GEN_MASK_BITS(SPI_SOD_DISABLED, SPI_COM_CTRL_MASK_SOD, 3) \
+)
+
+#define DEFAULT_SPI_FMT_CTRL ( \
+ GEN_MASK_BITS(SPI_INTERFACE_MOTOROLA_SPI, SPI_FMT_CTRL_MASK_FRF, 0) | \
+ GEN_MASK_BITS(SPI_CLK_POL_IDLE_LOW, SPI_FMT_CTRL_MASK_POL, 2) | \
+ GEN_MASK_BITS(SPI_CLK_FIRST_EDGE, SPI_FMT_CTRL_MASK_PHA, 3) | \
+ GEN_MASK_BITS(SPI_DATA_BITS_8, SPI_FMT_CTRL_MASK_DSS, 4) \
+)
+
+#define DEFAULT_SPI_FIFO_CTRL ( \
+ GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2) | \
+ GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3) | \
+ GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4) | \
+ GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8) \
+)
+
+
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * @pl022: SSP driver private data structure
+ */
+static void load_spi_default_config(struct zx297510_spi *zx297510spi)
+{
+ writel(DEFAULT_SPI_COM_CTRL, SPI_COM_CTRL(zx297510spi->virtbase));
+ writel(DEFAULT_SPI_FMT_CTRL, SPI_FMT_CTRL(zx297510spi->virtbase));
+ writel(DEFAULT_SPI_FIFO_CTRL, SPI_FIFO_CTRL(zx297510spi->virtbase));
+ writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297510spi->virtbase));
+ writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297510spi->virtbase));
+}
+
+/**
+ * This will write to TX according to the parameters
+ * set in pl022.
+ */
+static void write(struct zx297510_spi *zx297510spi)
+{
+
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+ dev_dbg(&zx297510spi->pdev->dev,
+ "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
+ __func__, zx297510spi->rx, zx297510spi->rx_end, zx297510spi->tx, zx297510spi->tx_end);
+
+ while ((readl(SPI_FIFO_SR(zx297510spi->virtbase)) & SPI_FIFO_SR_MASK_TX_FIFO_EMPTY)
+ && (zx297510spi->tx < zx297510spi->tx_end)) {
+ switch (zx297510spi->write) {
+ case WRITING_NULL:
+ writew(0x0, SPI_DR(zx297510spi->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (zx297510spi->tx), SPI_DR(zx297510spi->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (zx297510spi->tx)), SPI_DR(zx297510spi->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (zx297510spi->tx), SPI_DR(zx297510spi->virtbase));
+ break;
+ }
+ while(readl(SPI_FIFO_SR(zx297510spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) ;
+ zx297510spi->tx += (zx297510spi->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * This will write to TX and read from RX according to the parameters
+ * set in pl022.
+ */
+static void readwriter(struct zx297510_spi *zx297510spi)
+{
+
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+ dev_dbg(&zx297510spi->pdev->dev,
+ "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
+ __func__, zx297510spi->rx, zx297510spi->rx_end, zx297510spi->tx, zx297510spi->tx_end);
+
+ /* Read as much as you can */
+ while ((readl(SPI_FIFO_SR(zx297510spi->virtbase)) & SPI_FIFO_SR_MASK_RX_FIFO_CNTR)
+ && (zx297510spi->rx < zx297510spi->rx_end)) {
+ switch (zx297510spi->read) {
+ case READING_NULL:
+ readl(SPI_DR(zx297510spi->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (zx297510spi->rx) =
+ readw(SPI_DR(zx297510spi->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (zx297510spi->rx) =
+ (u16) readw(SPI_DR(zx297510spi->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (zx297510spi->rx) =
+ readl(SPI_DR(zx297510spi->virtbase));
+ break;
+ }
+ zx297510spi->rx += (zx297510spi->cur_chip->n_bytes);
+ zx297510spi->exp_fifo_level--;
+ }
+ /*
+ * Write as much as possible up to the TX FIFO size
+ */
+ while ((zx297510spi->exp_fifo_level < zx297510spi->vendor->fifodepth)
+ && (zx297510spi->tx < zx297510spi->tx_end)) {
+ switch (zx297510spi->write) {
+ case WRITING_NULL:
+ writew(0x0, SPI_DR(zx297510spi->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (zx297510spi->tx), SPI_DR(zx297510spi->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (zx297510spi->tx)), SPI_DR(zx297510spi->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (zx297510spi->tx), SPI_DR(zx297510spi->virtbase));
+ break;
+ }
+ zx297510spi->tx += (zx297510spi->cur_chip->n_bytes);
+ zx297510spi->exp_fifo_level++;
+ /*
+ * This inner reader takes care of things appearing in the RX
+ * FIFO as we're transmitting. This will happen a lot since the
+ * clock starts running when you put things into the TX FIFO,
+ * and then things are continuously clocked into the RX FIFO.
+ */
+ while ((readl(SPI_FIFO_SR(zx297510spi->virtbase)) & SPI_FIFO_SR_MASK_RX_FIFO_CNTR)
+ && (zx297510spi->rx < zx297510spi->rx_end)) {
+ switch (zx297510spi->read) {
+ case READING_NULL:
+ readw(SPI_DR(zx297510spi->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (zx297510spi->rx) =
+ readw(SPI_DR(zx297510spi->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (zx297510spi->rx) =
+ (u16) readw(SPI_DR(zx297510spi->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (zx297510spi->rx) =
+ readl(SPI_DR(zx297510spi->virtbase));
+ break;
+ }
+ zx297510spi->rx += (zx297510spi->cur_chip->n_bytes);
+ zx297510spi->exp_fifo_level--;
+ }
+ }
+ /*
+ * When we exit here the TX FIFO should be full and the RX FIFO
+ * should be empty
+ */
+}
+
+/**
+ * next_transfer - Move to the Next transfer in the current spi message
+ * @pl022: SSP driver private data structure
+ *
+ * This function moves though the linked list of spi transfers in the
+ * current spi message and returns with the state of current spi
+ * message i.e whether its last transfer is done(STATE_DONE) or
+ * Next transfer is ready(STATE_RUNNING)
+ */
+static void *next_transfer(struct zx297510_spi *zx297510spi)
+{
+ struct spi_message *msg = zx297510spi->cur_msg;
+ struct spi_transfer *trans = zx297510spi->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ zx297510spi->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return STATE_RUNNING;
+ }
+ return STATE_DONE;
+}
+
+/*
+ * This DMA functionality is only compiled in if we have
+ * access to the generic DMA devices/DMA engine.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void unmap_free_dma_scatter(struct zx297510_spi *zx297510spi)
+{
+ /* Unmap and free the SG tables */
+ dma_unmap_sg(zx297510spi->dma_tx_channel->device->dev, zx297510spi->sgt_tx.sgl,
+ zx297510spi->sgt_tx.nents, DMA_TO_DEVICE);
+ dma_unmap_sg(zx297510spi->dma_rx_channel->device->dev, zx297510spi->sgt_rx.sgl,
+ zx297510spi->sgt_rx.nents, DMA_FROM_DEVICE);
+ sg_free_table(&zx297510spi->sgt_rx);
+ sg_free_table(&zx297510spi->sgt_tx);
+}
+
+static void dma_callback(void *data)
+{
+ struct zx297510_spi *zx297510spi = data;
+ struct spi_message *msg = zx297510spi->cur_msg;
+
+ BUG_ON(!zx297510spi->sgt_rx.sgl);
+
+#ifdef VERBOSE_DEBUG
+ /*
+ * Optionally dump out buffers to inspect contents, this is
+ * good if you want to convince yourself that the loopback
+ * read/write contents are the same, when adopting to a new
+ * DMA engine.
+ */
+ {
+ struct scatterlist *sg;
+ unsigned int i;
+
+ dma_sync_sg_for_cpu(&zx297510spi->adev->dev,
+ zx297510spi->sgt_rx.sgl,
+ zx297510spi->sgt_rx.nents,
+ DMA_FROM_DEVICE);
+
+ for_each_sg(zx297510spi->sgt_rx.sgl, sg, zx297510spi->sgt_rx.nents, i) {
+ dev_dbg(&zx297510spi->adev->dev, "SPI RX SG ENTRY: %d", i);
+ print_hex_dump(KERN_ERR, "SPI RX: ",
+ DUMP_PREFIX_OFFSET,
+ 16,
+ 1,
+ sg_virt(sg),
+ sg_dma_len(sg),
+ 1);
+ }
+ for_each_sg(zx297510spi->sgt_tx.sgl, sg, zx297510spi->sgt_tx.nents, i) {
+ dev_dbg(&zx297510spi->adev->dev, "SPI TX SG ENTRY: %d", i);
+ print_hex_dump(KERN_ERR, "SPI TX: ",
+ DUMP_PREFIX_OFFSET,
+ 16,
+ 1,
+ sg_virt(sg),
+ sg_dma_len(sg),
+ 1);
+ }
+ }
+#endif
+
+ unmap_free_dma_scatter(zx297510spi);
+
+ /* Update total bytes transferred */
+ msg->actual_length += zx297510spi->cur_transfer->len;
+ /*if (zx297510spi->cur_transfer->cs_change)
+ zx297510spi->cur_chip->
+ cs_control(SSP_CHIP_DESELECT);*/
+
+ /* Move to next transfer */
+ msg->state = next_transfer(zx297510spi);
+ tasklet_schedule(&zx297510spi->pump_transfers);
+}
+
+static void setup_dma_scatter(struct zx297510_spi *zx297510spi,
+ void *buffer,
+ unsigned int length,
+ struct sg_table *sgtab)
+{
+ struct scatterlist *sg;
+ int bytesleft = length;
+ void *bufp = buffer;
+ int mapbytes;
+ int i;
+
+ if (buffer) {
+ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
+ /*
+ * If there are less bytes left than what fits
+ * in the current page (plus page alignment offset)
+ * we just feed in this, else we stuff in as much
+ * as we can.
+ */
+ if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
+ mapbytes = bytesleft;
+ else
+ mapbytes = PAGE_SIZE - offset_in_page(bufp);
+ sg_set_page(sg, virt_to_page(bufp),
+ mapbytes, offset_in_page(bufp));
+ bufp += mapbytes;
+ bytesleft -= mapbytes;
+ dev_dbg(&zx297510spi->pdev->dev,
+ "set RX/TX target page @ %p, %d bytes, %d left\n",
+ bufp, mapbytes, bytesleft);
+ }
+ } else {
+ /* Map the dummy buffer on every page */
+ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
+ if (bytesleft < PAGE_SIZE)
+ mapbytes = bytesleft;
+ else
+ mapbytes = PAGE_SIZE;
+ sg_set_page(sg, virt_to_page(zx297510spi->dummypage),
+ mapbytes, 0);
+ bytesleft -= mapbytes;
+ dev_dbg(&zx297510spi->pdev->dev,
+ "set RX/TX to dummy page %d bytes, %d left\n",
+ mapbytes, bytesleft);
+
+ }
+ }
+ BUG_ON(bytesleft);
+}
+
+/**
+ * configure_dma - configures the channels for the next transfer
+ * @pl022: SSP driver's private data structure
+ */
+static int configure_dma(struct zx297510_spi *zx297510spi)
+{
+ struct dma_slave_config rx_conf = {
+ .src_addr = SPI_DR(zx297510spi->phybase),
+ .direction = DMA_DEV_TO_MEM,
+ .device_fc = false,
+ };
+ struct dma_slave_config tx_conf = {
+ .dst_addr = SPI_DR(zx297510spi->phybase),
+ .direction = DMA_MEM_TO_DEV,
+ .device_fc = false,
+ };
+ unsigned int pages;
+ int ret;
+ int rx_sglen, tx_sglen;
+ struct dma_chan *rxchan = zx297510spi->dma_rx_channel;
+ struct dma_chan *txchan = zx297510spi->dma_tx_channel;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+ switch (zx297510spi->rx_lev_trig) {
+ case SPI_RX_1_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 1;
+ break;
+ case SPI_RX_4_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 4;
+ break;
+ case SPI_RX_8_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 8;
+ break;
+ case SPI_RX_16_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 16;
+ break;
+ case SPI_RX_32_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 32;
+ break;
+ default:
+ rx_conf.src_maxburst = zx297510spi->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (zx297510spi->tx_lev_trig) {
+ case SPI_TX_1_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 1;
+ break;
+ case SPI_TX_4_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 4;
+ break;
+ case SPI_TX_8_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 8;
+ break;
+ case SPI_TX_16_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 16;
+ break;
+ case SPI_TX_32_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 32;
+ break;
+ default:
+ tx_conf.dst_maxburst = zx297510spi->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (zx297510spi->read) {
+ case READING_NULL:
+ /* Use the same as for writing */
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ break;
+ case READING_U8:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case READING_U16:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case READING_U32:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ switch (zx297510spi->write) {
+ case WRITING_NULL:
+ /* Use the same as for reading */
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ break;
+ case WRITING_U8:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case WRITING_U16:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case WRITING_U32:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ /* SPI pecularity: we need to read and write the same width */
+ if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ rx_conf.src_addr_width = tx_conf.dst_addr_width;
+ if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ tx_conf.dst_addr_width = rx_conf.src_addr_width;
+ BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
+
+ dmaengine_slave_config(rxchan, &rx_conf);
+ dmaengine_slave_config(txchan, &tx_conf);
+
+ /* Create sglists for the transfers */
+ pages = DIV_ROUND_UP(zx297510spi->cur_transfer->len, PAGE_SIZE);
+ dev_dbg(&zx297510spi->pdev->dev, "using %d pages for transfer\n", pages);
+
+ ret = sg_alloc_table(&zx297510spi->sgt_rx, pages, GFP_ATOMIC);
+ if (ret)
+ goto err_alloc_rx_sg;
+
+ ret = sg_alloc_table(&zx297510spi->sgt_tx, pages, GFP_ATOMIC);
+ if (ret)
+ goto err_alloc_tx_sg;
+
+ /* Fill in the scatterlists for the RX+TX buffers */
+ setup_dma_scatter(zx297510spi, zx297510spi->rx,
+ zx297510spi->cur_transfer->len, &zx297510spi->sgt_rx);
+ setup_dma_scatter(zx297510spi, zx297510spi->tx,
+ zx297510spi->cur_transfer->len, &zx297510spi->sgt_tx);
+
+ /* Map DMA buffers */
+ rx_sglen = dma_map_sg(rxchan->device->dev, zx297510spi->sgt_rx.sgl,
+ zx297510spi->sgt_rx.nents, DMA_FROM_DEVICE);
+ if (!rx_sglen)
+ goto err_rx_sgmap;
+
+ tx_sglen = dma_map_sg(txchan->device->dev, zx297510spi->sgt_tx.sgl,
+ zx297510spi->sgt_tx.nents, DMA_TO_DEVICE);
+ if (!tx_sglen)
+ goto err_tx_sgmap;
+
+ /* Send both scatterlists */
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ zx297510spi->sgt_rx.sgl,
+ rx_sglen,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_rxdesc;
+
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ zx297510spi->sgt_tx.sgl,
+ tx_sglen,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_txdesc;
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = zx297510spi;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rxchan);
+ dma_async_issue_pending(txchan);
+ zx297510spi->dma_running = true;
+
+ return 0;
+
+err_txdesc:
+ dmaengine_terminate_all(txchan);
+err_rxdesc:
+ dmaengine_terminate_all(rxchan);
+ dma_unmap_sg(txchan->device->dev, zx297510spi->sgt_tx.sgl,
+ zx297510spi->sgt_tx.nents, DMA_TO_DEVICE);
+err_tx_sgmap:
+ dma_unmap_sg(rxchan->device->dev, zx297510spi->sgt_rx.sgl,
+ zx297510spi->sgt_tx.nents, DMA_FROM_DEVICE);
+err_rx_sgmap:
+ sg_free_table(&zx297510spi->sgt_tx);
+err_alloc_tx_sg:
+ sg_free_table(&zx297510spi->sgt_rx);
+err_alloc_rx_sg:
+ return -ENOMEM;
+}
+
+static int __devinit zx297510_dma_probe(struct zx297510_spi *zx297510spi)
+{
+ dma_cap_mask_t mask;
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ /*
+ * We need both RX and TX channels to do DMA, else do none
+ * of them.
+ */
+ zx297510spi->dma_rx_channel = dma_request_channel(mask,
+ zx297510spi->master_info->dma_filter,
+ zx297510spi->master_info->dma_rx_param);
+ if (!zx297510spi->dma_rx_channel) {
+ dev_dbg(&zx297510spi->pdev->dev, "no RX DMA channel!\n");
+ goto err_no_rxchan;
+ }
+
+ zx297510spi->dma_tx_channel = dma_request_channel(mask,
+ zx297510spi->master_info->dma_filter,
+ zx297510spi->master_info->dma_tx_param);
+ if (!zx297510spi->dma_tx_channel) {
+ dev_dbg(&zx297510spi->pdev->dev, "no TX DMA channel!\n");
+ goto err_no_txchan;
+ }
+
+ zx297510spi->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!zx297510spi->dummypage) {
+ dev_dbg(&zx297510spi->pdev->dev, "no DMA dummypage!\n");
+ goto err_no_dummypage;
+ }
+
+ dev_info(&zx297510spi->pdev->dev, "setup for DMA on RX %s, TX %s\n",
+ dma_chan_name(zx297510spi->dma_rx_channel),
+ dma_chan_name(zx297510spi->dma_tx_channel));
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(zx297510spi->dma_tx_channel);
+err_no_txchan:
+ dma_release_channel(zx297510spi->dma_rx_channel);
+ zx297510spi->dma_rx_channel = NULL;
+err_no_rxchan:
+ dev_err(&zx297510spi->pdev->dev,
+ "Failed to work in dma mode, work without dma!\n");
+ return -ENODEV;
+}
+
+static void terminate_dma(struct zx297510_spi *zx297510spi)
+{
+ struct dma_chan *rxchan = zx297510spi->dma_rx_channel;
+ struct dma_chan *txchan = zx297510spi->dma_tx_channel;
+
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
+ unmap_free_dma_scatter(zx297510spi);
+ zx297510spi->dma_running = false;
+}
+
+static void zx297510_dma_remove(struct zx297510_spi *zx297510spi)
+{
+ if (zx297510spi->dma_running)
+ terminate_dma(zx297510spi);
+ if (zx297510spi->dma_tx_channel)
+ dma_release_channel(zx297510spi->dma_tx_channel);
+ if (zx297510spi->dma_rx_channel)
+ dma_release_channel(zx297510spi->dma_rx_channel);
+ kfree(zx297510spi->dummypage);
+}
+
+#else
+static inline int configure_dma(struct zx297510_spi *zx297510spi)
+{
+ return -ENODEV;
+}
+
+static inline int zx297510_dma_probe(struct zx297510_spi *zx297510spi)
+{
+ return 0;
+}
+
+static inline void zx297510_dma_remove(struct zx297510_spi *zx297510spi)
+{
+}
+#endif
+
+/**
+ * pl022_interrupt_handler - Interrupt handler for SSP controller
+ *
+ * This function handles interrupts generated for an interrupt based transfer.
+ * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
+ * current message's state as STATE_ERROR and schedule the tasklet
+ * pump_transfers which will do the postprocessing of the current message by
+ * calling giveback(). Otherwise it reads data from RX FIFO till there is no
+ * more data, and writes data in TX FIFO till it is not full. If we complete
+ * the transfer we move to the next transfer and schedule the tasklet.
+ */
+static irqreturn_t zx297510_interrupt_handler(int irq, void *dev_id)
+{
+ struct zx297510_spi *zx297510spi = dev_id;
+ struct spi_message *msg = zx297510spi->cur_msg;
+ u32 irq_status = 0;
+ u16 flag = 0;
+
+ dev_dbg(&zx297510spi->pdev->dev,"in function %s \n", __FUNCTION__);
+
+ if (unlikely(!msg)) {
+ dev_err(&zx297510spi->pdev->dev,
+ "bad message state in interrupt handler");
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ /* Read the Interrupt Status Register */
+ irq_status = readl(SPI_INTR_SR_SCLR(zx297510spi->virtbase));
+ /* clear all Interrupt */
+ writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297510spi->virtbase));
+
+ dev_dbg(&zx297510spi->pdev->dev, "irq status 0x%X", irq_status);
+
+ if (unlikely(!irq_status))
+ return IRQ_NONE;
+
+ /*
+ * This handles the FIFO interrupts, the timeout
+ * interrupts are flatly ignored, they cannot be
+ * trusted.
+ */
+ if ( unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR)
+ || unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR) ) {
+ /*
+ * Overrun interrupt - bail out since our Data has been
+ * corrupted
+ */
+ if ( unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) )
+ dev_err(&zx297510spi->pdev->dev, "RXFIFO is OVERRUN \n");
+ if ( unlikely(irq_status & SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR))
+ dev_err(&zx297510spi->pdev->dev, "TXFIFO is UNDERRUN \n");
+
+ /*
+ * Disable and clear interrupts, disable SSP,
+ * mark message with bad status so it can be
+ * retried.
+ */
+ writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297510spi->virtbase));
+ writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297510spi->virtbase));
+ writel((readl(SPI_COM_CTRL(zx297510spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE)),
+ SPI_COM_CTRL(zx297510spi->virtbase));
+ msg->state = STATE_ERROR;
+
+ /* Schedule message queue handler */
+ tasklet_schedule(&zx297510spi->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ if (zx297510spi->rx != NULL )
+ readwriter(zx297510spi);
+ else
+ write(zx297510spi);
+
+ dev_dbg( &zx297510spi->pdev->dev, "%s tx %p tx_end %p rx %p rx_end %p\n", __FUNCTION__,
+ zx297510spi->tx,
+ zx297510spi->tx_end,
+ zx297510spi->rx,
+ zx297510spi->rx_end);
+
+ if ((zx297510spi->tx == zx297510spi->tx_end) && (flag == 0)) {
+ u32 irq_flag = SPI_INTR_EN_MASK_RX_FULL_IE|SPI_INTR_EN_MASK_RX_OVERRUN_IE|SPI_INTR_EN_MASK_RX_THRES_IE;
+ flag = 1;
+ /* Disable Transmit interrupt, enable receive interrupt */
+ /*writel((readl(SPI_INTR_EN(zx297510spi->virtbase)) &
+ ~SSP_CR1_MASK_TIE) | SSP_CR1_MASK_RIE,
+ SSP_CR1(zx297502ssp->virtbase));*/
+ writel(irq_flag, SPI_INTR_EN(zx297510spi ->virtbase));
+ }
+
+ /*
+ * Since all transactions must write as much as shall be read,
+ * we can conclude the entire transaction once RX is complete.
+ * At this point, all TX will always be finished.
+ */
+ if (zx297510spi->rx >= zx297510spi->rx_end) {
+ /*writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));*/
+ writel(DISABLE_ALL_INTERRUPTS, SPI_INTR_EN(zx297510spi->virtbase));
+ writel(CLEAR_ALL_INTERRUPTS, SPI_INTR_SR_SCLR(zx297510spi->virtbase));
+ if (unlikely(zx297510spi->rx > zx297510spi->rx_end)) {
+ dev_warn(&zx297510spi->pdev->dev, "read %u surplus "
+ "bytes (did you request an odd "
+ "number of bytes on a 16bit bus?)\n",
+ (u32) (zx297510spi->rx - zx297510spi->rx_end));
+ }
+ /* Update total bytes transferred */
+ msg->actual_length += zx297510spi->cur_transfer->len;
+// if (zx297502ssp->cur_transfer->cs_change)
+// zx297502ssp->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ /* Move to next transfer */
+ msg->state = next_transfer(zx297510spi);
+ tasklet_schedule(&zx297510spi->pump_transfers);
+ return IRQ_HANDLED;
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * This sets up the pointers to memory for the next message to
+ * send out on the SPI bus.
+ */
+static int set_up_next_transfer(struct zx297510_spi *zx297510spi,
+ struct spi_transfer *transfer)
+{
+ int residue;
+
+ /* Sanity check the message for this bus width */
+ residue = zx297510spi->cur_transfer->len % zx297510spi->cur_chip->n_bytes;
+ if (unlikely(residue != 0)) {
+ dev_err(&zx297510spi->pdev->dev,
+ "message of %u bytes to transmit but the current "
+ "chip bus has a data width of %u bytes!\n",
+ zx297510spi->cur_transfer->len,
+ zx297510spi->cur_chip->n_bytes);
+ dev_err(&zx297510spi->pdev->dev, "skipping this message\n");
+ return -EIO;
+ }
+ if((void *)transfer->tx_buf != NULL){
+ zx297510spi->tx = (void *)transfer->tx_buf;
+ zx297510spi->tx_end = zx297510spi->tx + zx297510spi->cur_transfer->len;
+ }
+ if((void *)transfer->rx_buf != NULL){
+ zx297510spi->rx = (void *)transfer->rx_buf;
+ zx297510spi->rx_end = zx297510spi->rx + zx297510spi->cur_transfer->len;
+ }
+ zx297510spi->write =
+ zx297510spi->tx ? zx297510spi->cur_chip->write : WRITING_NULL;
+ zx297510spi->read = zx297510spi->rx ? zx297510spi->cur_chip->read : READING_NULL;
+ return 0;
+}
+
+/**
+ * pump_transfers - Tasklet function which schedules next transfer
+ * when running in interrupt or DMA transfer mode.
+ * @data: SSP driver private data structure
+ *
+ */
+static void pump_transfers(unsigned long data)
+{
+ struct zx297510_spi *zx297510spi = (struct zx297510_spi *) data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+
+ dev_dbg(&zx297510spi->pdev->dev,"in function %s\n", __FUNCTION__);
+
+ /* Get current state information */
+ message = zx297510spi->cur_msg;
+ transfer = zx297510spi->cur_transfer;
+
+ /* Handle for abort */
+ if (message->state == STATE_ERROR) {
+ message->status = -EIO;
+ giveback(zx297510spi);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == STATE_DONE) {
+ message->status = 0;
+ giveback(zx297510spi);
+ return;
+ }
+
+ /* Delay if requested at end of transfer before CS change */
+ if (message->state == STATE_RUNNING) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ udelay(previous->delay_usecs);
+
+ /* Reselect chip select only if cs_change was requested */
+// if (previous->cs_change)
+// zx297510spi->cur_chip->cs_control(SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ }
+
+ if (set_up_next_transfer(zx297510spi, transfer)) {
+ message->state = STATE_ERROR;
+ message->status = -EIO;
+ giveback(zx297510spi);
+ return;
+ }
+ /* Flush the FIFOs and let's go! */
+ flush(zx297510spi);
+
+ if (zx297510spi->cur_chip->enable_dma) {
+ if (configure_dma(zx297510spi)) {
+ dev_dbg(&zx297510spi->pdev->dev,
+ "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ return;
+ }
+
+err_config_dma:
+ /* enable all interrupts except RX */
+ writel( (SPI_INTR_EN_MASK_TX_UNDERRUN_IE | SPI_INTR_EN_MASK_TX_THRES_IE | SPI_INTR_EN_MASK_TX_EMPTY_IE),
+ SPI_INTR_EN(zx297510spi->virtbase) );
+ // writew((readl(SSP_CR1(zx297502ssp->virtbase))|SSP_CR1_MASK_TIE|SSP_CR1_MASK_RORIE)&(~SSP_CR1_MASK_RIE),
+ // SSP_CR1(zx297502ssp->virtbase));
+}
+
+static void do_interrupt_dma_transfer(struct zx297510_spi *zx297510spi)
+{
+ /*
+ * Default is to enable all interrupts except RX -
+ * this will be enabled once TX is complete
+ */
+ u32 irqflags = ENABLE_ALL_INTERRUPTS;
+
+ dev_dbg(&zx297510spi->pdev->dev,"in function %s\n", __FUNCTION__);
+
+ /* Enable target chip, if not already active */
+ //if (!zx297502ssp->next_msg_cs_active)
+ // zx297502ssp->cur_chip->cs_control(SSP_CHIP_SELECT);
+
+ if (set_up_next_transfer(zx297510spi, zx297510spi->cur_transfer)) {
+ /* Error path */
+ zx297510spi->cur_msg->state = STATE_ERROR;
+ zx297510spi->cur_msg->status = -EIO;
+ giveback(zx297510spi);
+ return;
+ }
+ /* If we're using DMA, set up DMA here */
+ if (zx297510spi->cur_chip->enable_dma) {
+ /* Configure DMA transfer */
+ if (configure_dma(zx297510spi)) {
+ dev_dbg(&zx297510spi->pdev->dev,
+ "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ /* Disable interrupts in DMA mode, IRQ from DMA controller */
+ irqflags = DISABLE_ALL_INTERRUPTS;
+ }
+
+ if(zx297510spi ->tx != NULL && zx297510spi ->rx != NULL){
+ /* enable all interrupts */
+ irqflags = ENABLE_ALL_INTERRUPTS;
+ }else if(zx297510spi->tx != NULL){
+ /*enable tx interrupts*/
+ irqflags = SPI_INTR_EN_MASK_TX_EMPTY_IE
+ |SPI_INTR_EN_MASK_TX_THRES_IE
+ |SPI_INTR_EN_MASK_TX_UNDERRUN_IE;
+ }
+err_config_dma:
+ /* Enable SSP, turn on interrupts */
+ writel(readl(SPI_COM_CTRL(zx297510spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,
+ SPI_COM_CTRL(zx297510spi->virtbase));
+
+ /* config interrupts */
+ writel(irqflags, SPI_INTR_EN(zx297510spi->virtbase));
+
+ /*writew(irqflags, SSP_IMSC(zx297502ssp->virtbase));*/
+}
+
+static void do_polling_transfer(struct zx297510_spi *zx297510spi)
+{
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip;
+ unsigned long time, timeout;
+
+ chip = zx297510spi->cur_chip;
+ message = zx297510spi->cur_msg;
+
+ while (message->state != STATE_DONE) {
+ /* Handle for abort */
+ if (message->state == STATE_ERROR)
+ break;
+ transfer = zx297510spi->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == STATE_RUNNING) {
+ previous =
+ list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ //if (previous->cs_change)
+ // zx297502ssp->cur_chip->cs_control(SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ //if (!zx297502ssp->next_msg_cs_active)
+ // zx297502ssp->cur_chip->cs_control(SSP_CHIP_SELECT);
+ }
+
+ /* Configuration Changing Per Transfer */
+ if (set_up_next_transfer(zx297510spi, transfer)) {
+ /* Error path */
+ message->state = STATE_ERROR;
+ break;
+ }
+ /* Flush FIFOs and enable SSP */
+ flush(zx297510spi);
+ //writel((readl(SSP_CR1(zx297502ssp->virtbase)) | SSP_CR1_MASK_SSE),
+ // SSP_CR1(zx297502ssp->virtbase));
+ writel(readl(SPI_COM_CTRL(zx297510spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,
+ SPI_COM_CTRL(zx297510spi->virtbase));
+
+ dev_dbg(&zx297510spi->pdev->dev, "polling transfer ongoing ...\n");
+
+ timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
+
+ if(zx297510spi->tx != NULL && zx297510spi->rx != NULL )
+ { /*read and write*/
+ while (zx297510spi->tx < zx297510spi->tx_end || zx297510spi->rx < zx297510spi->rx_end) {
+ time = jiffies;
+ readwriter(zx297510spi);
+ if (time_after(time, timeout)) {
+ dev_warn(&zx297510spi->pdev->dev,
+ "%s: timeout!\n", __func__);
+ message->state = STATE_ERROR;
+ goto out;
+ }
+ cpu_relax();
+ }
+ }
+ else if (zx297510spi->tx != NULL )
+ {/* only write */
+ while (zx297510spi->tx < zx297510spi->tx_end ) {
+ time = jiffies;
+ write(zx297510spi);
+ if (time_after(time, timeout)) {
+ dev_warn(&zx297510spi->pdev->dev,
+ "%s: timeout!\n", __func__);
+ message->state = STATE_ERROR;
+ goto out;
+ }
+ cpu_relax();
+ }
+ }
+ /* Update total byte transferred */
+ message->actual_length += zx297510spi->cur_transfer->len;
+// if (zx297510spi->cur_transfer->cs_change)
+// zx297510spi->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ /* Move to next transfer */
+ message->state = next_transfer(zx297510spi);
+ }
+out:
+ /* Handle end of message */
+ if (message->state == STATE_DONE)
+ message->status = 0;
+ else
+ message->status = -EIO;
+
+ giveback(zx297510spi);
+ return;
+}
+
+static int zx297510_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct zx297510_spi *zx297510spi = spi_master_get_devdata(master);
+
+ //printk(KERN_INFO "ssp:in function %s \n", __FUNCTION__);
+
+ /* Initial message state */
+ zx297510spi->cur_msg = msg;
+ msg->state = STATE_START;
+
+ zx297510spi->cur_transfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ /* Setup the SPI using the per chip configuration */
+ zx297510spi->cur_chip = spi_get_ctldata(msg->spi);
+
+ restore_state(zx297510spi);
+ flush(zx297510spi);
+
+ if (zx297510spi->cur_chip->xfer_type == POLLING_TRANSFER)
+ do_polling_transfer(zx297510spi);
+ else
+ do_interrupt_dma_transfer(zx297510spi);
+
+ return 0;
+}
+
+static int zx297510_prepare_transfer_hardware(struct spi_master *master)
+{
+// struct zx297510_spi *zx297510spi = spi_master_get_devdata(master);
+
+ //dev_warn(&zx297502ssp->pdev->dev,"in function %s\n", __FUNCTION__);
+
+ #if 0
+ /*
+ * Just make sure we have all we need to run the transfer by syncing
+ * with the runtime PM framework.
+ */
+ pm_runtime_get_sync(&pl022->adev->dev);
+ #endif
+ return 0;
+}
+
+static int zx297510_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct zx297510_spi *zx297510spi = spi_master_get_devdata(master);
+
+ //dev_warn(&zx297502ssp->pdev->dev,"in function %s\n", __FUNCTION__);
+
+ /* nothing more to do - disable spi/ssp and power off */
+ writel(readl(SPI_COM_CTRL(zx297510spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE,
+ SPI_COM_CTRL(zx297510spi->virtbase));
+ #if 0
+ if (pl022->master_info->autosuspend_delay > 0) {
+ pm_runtime_mark_last_busy(&pl022->adev->dev);
+ pm_runtime_put_autosuspend(&pl022->adev->dev);
+ } else {
+ pm_runtime_put(&pl022->adev->dev);
+ }
+ #endif
+ return 0;
+}
+
+static int verify_controller_parameters(struct zx297510_spi *zx297510spi,
+ struct spi_config_chip const *chip_info)
+{
+ if ((chip_info->iface < SPI_INTERFACE_MOTOROLA_SPI)
+ || (chip_info->iface > SPI_INTERFACE_TI_SYNC_SERIAL)) {
+ dev_err(&zx297510spi->pdev->dev,
+ "interface is configured incorrectly\n");
+ return -EINVAL;
+ }
+
+ if ((chip_info->hierarchy != SPI_MASTER)
+ && (chip_info->hierarchy != SPI_SLAVE)) {
+ dev_err(&zx297510spi->pdev->dev,
+ "hierarchy is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->com_mode != INTERRUPT_TRANSFER)
+ && (chip_info->com_mode != DMA_TRANSFER)
+ && (chip_info->com_mode != POLLING_TRANSFER)) {
+ dev_err(&zx297510spi->pdev->dev,
+ "Communication mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ switch (chip_info->rx_lev_trig) {
+ case SPI_RX_1_OR_MORE_ELEM:
+ case SPI_RX_4_OR_MORE_ELEM:
+ case SPI_RX_8_OR_MORE_ELEM:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SPI_RX_16_OR_MORE_ELEM:
+ if (zx297510spi->vendor->fifodepth < 16) {
+ dev_err(&zx297510spi->pdev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SPI_RX_32_OR_MORE_ELEM:
+ if (zx297510spi->vendor->fifodepth < 32) {
+ dev_err(&zx297510spi->pdev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&zx297510spi->pdev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ break;
+ }
+ switch (chip_info->tx_lev_trig) {
+ case SPI_TX_1_OR_MORE_EMPTY_LOC:
+ case SPI_TX_4_OR_MORE_EMPTY_LOC:
+ case SPI_TX_8_OR_MORE_EMPTY_LOC:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SPI_TX_16_OR_MORE_EMPTY_LOC:
+ if (zx297510spi->vendor->fifodepth < 16) {
+ dev_err(&zx297510spi->pdev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SPI_TX_32_OR_MORE_EMPTY_LOC:
+ if (zx297510spi->vendor->fifodepth < 32) {
+ dev_err(&zx297510spi->pdev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&zx297510spi->pdev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
+{
+ return rate / (cpsdvsr * (1 + scr));
+}
+
+static int calculate_effective_freq(struct zx297510_spi *zx297510spi, u32 freq, u8* div)
+{
+ u8 clk_div;
+ /*div from src clk 104M*/
+ /* f(ssp_clk) = 2*f(ssp_sclk_out) */
+ clk_div = zx297510spi->clkfreq /( freq *2);
+ if( clk_div < DIV_MIN+1 || clk_div > DIV_MAX+1 )
+ {
+ dev_err(&zx297510spi->pdev->dev, "error!!! speed is %d Hz out of rang",freq );
+ return -ENOTSUPP;
+ }
+ *div = clk_div;
+ return 0;
+}
+
+static struct vendor_data vendor_arm = {
+ .fifodepth = 16,
+ .max_bpw = 32,
+ .loopback = true,
+};
+static struct resource spi_gpio_resources[] ={
+ [0]={
+ .start = GPIO_AP_SPI_TXD,
+ .end = GPIO_AP_SPI_TXD,
+ .name = "txd",
+ .flags = IORESOURCE_IO,
+ },
+ [1]={
+ .start = GPIO_AP_SPI_CLK,
+ .end = GPIO_AP_SPI_CLK,
+ .name = "clk",
+ .flags = IORESOURCE_IO,
+ },
+ [2]={
+ .start = GPIO_AP_SPI_CS,
+ .end = GPIO_AP_SPI_CS,
+ .name = "cs",
+ .flags = IORESOURCE_IO,
+ },
+#if 0
+ [3]={
+ .start = GPIO_AP_SPI_RXD,
+ .end = GPIO_AP_SPI_RXD,
+ .name = "rxd",
+ .flags = IORESOURCE_IO,
+ }
+#endif
+};
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct spi_config_chip spi_default_chip_info = {
+ .com_mode = POLLING_TRANSFER,
+ .iface = SPI_INTERFACE_MOTOROLA_SPI,
+ .hierarchy = SPI_MASTER,
+ .slave_tx_disable = DO_NOT_DRIVE_TX,
+ .rx_lev_trig = SPI_RX_8_OR_MORE_ELEM,
+ .tx_lev_trig = SPI_TX_8_OR_MORE_EMPTY_LOC,
+// .ctrl_len = SSP_BITS_8,
+// .wait_state = SSP_MWIRE_WAIT_ZERO,
+// .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ .cs_control = null_cs_control,
+};
+
+/*
+* spi ʹÓÃGPIOģʽ¶ÁÈ¡LCD µÄID Begin
+*/
+#define SPI_GPIO_FUNCTION 1
+#define SPI_GPIO_GPIO 0
+
+#define SPI_GPIO_HIGH 1
+#define SPI_GPIO_LOW 0
+
+static void spi_set_gpio_function(void)
+{
+ //TODO:ÉèÖÃGPIOΪ¹¦ÄܽÅ
+ zx29_gpio1v8_function_sel(GPIO_AP_SPI_CS, SPI_GPIO_FUNCTION);
+ zx29_gpio1v8_function_sel(GPIO_AP_SPI_CLK,SPI_GPIO_FUNCTION);
+// zx29_gpio1v8_function_sel(GPIO_AP_SPI_RXD,SPI_GPIO_FUNCTION);
+ zx29_gpio1v8_function_sel(GPIO_AP_SPI_TXD,SPI_GPIO_FUNCTION);
+}
+static void spi_set_gpio_gpio(void)
+{
+ //TODO:ÉèÖÃGPIOΪGPIO½Å
+ zx29_gpio1v8_function_sel(GPIO_AP_SPI_CS, SPI_GPIO_GPIO);
+ zx29_gpio1v8_function_sel(GPIO_AP_SPI_CLK,SPI_GPIO_GPIO);
+// zx29_gpio1v8_function_sel(GPIO_AP_SPI_RXD,SPI_GPIO_GPIO);
+ zx29_gpio1v8_function_sel(GPIO_AP_SPI_TXD,SPI_GPIO_GPIO);
+}
+static void spi_set_gpio_val(int gpio_num, int val)
+{
+ gpio_direction_output(gpio_num, val);
+}
+
+static int spi_get_gpio_val(int gpio_num)
+{
+ gpio_direction_input(gpio_num);
+
+ return gpio_get_value(gpio_num);
+}
+
+static void spi_time_delay(int delay/*us*/)
+{
+ udelay(delay);
+}
+
+void spi_gpio_mode_start(void)
+{
+ /* set clk tx rx cs to gpio */
+ spi_set_gpio_gpio();
+
+ spi_set_gpio_val(GPIO_AP_SPI_CS, SPI_GPIO_HIGH);/* CSµÍÓÐЧ */
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_LOW);/* clk¿ÕÏÐʱΪµÍ */
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_LOW);
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_LOW);
+}
+EXPORT_SYMBOL(spi_gpio_mode_start);
+void spi_gpio_mode_stop(void)
+{
+ /* set clk tx rx cs to function */
+ spi_set_gpio_function();
+}
+EXPORT_SYMBOL(spi_gpio_mode_stop);
+/*******************************************************************************
+ * Function:
+ * Description:
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void spi_gpio_write_single8(unsigned char data)
+{
+ int i;
+
+ spi_set_gpio_val(GPIO_AP_SPI_CS, SPI_GPIO_LOW);/* CSµÍÓÐЧ */
+
+ for( i=7; i>=0; i-- )
+ {
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_LOW);
+ if ((data >> i) & 0x1)
+ {
+ spi_set_gpio_val(GPIO_AP_SPI_TXD, SPI_GPIO_HIGH);
+ }
+ else
+ {
+ spi_set_gpio_val(GPIO_AP_SPI_TXD, SPI_GPIO_LOW);
+ }
+ spi_time_delay(50);
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_HIGH);
+ spi_time_delay(50);
+ }
+
+}
+EXPORT_SYMBOL(spi_gpio_write_single8);
+/*******************************************************************************
+ * Function:
+ * Description:
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+unsigned char spi_gpio_read_single8(void)
+{
+ int i;
+ unsigned char readData = 0;
+
+ spi_set_gpio_val(GPIO_AP_SPI_CS, SPI_GPIO_LOW);/* CSµÍÓÐЧ */
+
+ for( i=7; i>=0; i-- )
+ {
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_LOW);
+ spi_time_delay(50);
+ spi_set_gpio_val(GPIO_AP_SPI_CLK, SPI_GPIO_HIGH);
+ if( spi_get_gpio_val(GPIO_AP_SPI_TXD) )/* lcd ¸´ÓÃtx rx */
+ {
+ readData |= (1 << i);
+ }
+ spi_time_delay(50);
+ }
+
+ return readData;
+}
+EXPORT_SYMBOL(spi_gpio_read_single8);
+
+/*
+* spi ʹÓÃGPIOģʽ¶ÁÈ¡LCD µÄID End
+*/
+
+/**
+ * pl022_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info. Nothing is really being written to the
+ * controller hardware here, that is not done until the actual transfer
+ * commence.
+ */
+static int zx297510_setup(struct spi_device *spi)
+{
+ struct spi_config_chip const *chip_info;
+ struct chip_data *chip;
+ u8 clk_div = 0;
+ int status = 0;
+ struct zx297510_spi *zx297510spi = spi_master_get_devdata(spi->master);
+ unsigned int bits = spi->bits_per_word;
+ u32 tmp;
+
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ /* Get controller_state if one is supplied */
+ chip = spi_get_ctldata(spi);
+
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev,
+ "cannot allocate controller state\n");
+ return -ENOMEM;
+ }
+ dev_dbg(&spi->dev,
+ "allocated memory for controller's runtime state\n");
+ }
+
+ /* Get controller data if one is supplied */
+ chip_info = spi->controller_data;
+
+ if (chip_info == NULL) {
+ chip_info = &spi_default_chip_info;
+ /* spi_board_info.controller_data not is supplied */
+ dev_dbg(&spi->dev,
+ "using default controller_data settings\n");
+ } else
+ dev_dbg(&spi->dev,
+ "using user supplied controller_data settings\n");
+
+ /*
+ * We can override with custom divisors, else we use the board
+ * frequency setting
+ */
+
+ status = calculate_effective_freq(zx297510spi,
+ spi->max_speed_hz,
+ &clk_div);
+ if (status < 0)
+ goto err_config_params;
+
+ chip ->clk_div = clk_div;
+
+ dev_dbg(&spi->dev, "clk dividor is %d\n", clk_div);
+
+ /* enable ssp clock source */
+ clk_enable(zx297510spi->spi_clk);
+
+ /* set spi clock source at 104MHz/1 */
+ // zx297510spi->spi_clk->ops->set_division(zx297510spi->spi_clk,chip ->clk_div-1);
+ //writel(chip ->clk_div-1, M0_SSP_CLKDIV_REG_VA);
+ clk_set_rate(zx297510spi->spi_clk, spi->max_speed_hz*2); /* f(ssp_clk) = 2*f(ssp_sclk_out) */
+
+ status = verify_controller_parameters(zx297510spi, chip_info);
+ if (status) {
+ dev_err(&spi->dev, "controller data is incorrect");
+ goto err_config_params;
+ }
+
+ zx297510spi->rx_lev_trig = chip_info->rx_lev_trig;
+ zx297510spi->tx_lev_trig = chip_info->tx_lev_trig;
+
+ /* Now set controller state based on controller data */
+ chip->xfer_type = chip_info->com_mode;
+ /*
+ if (!chip_info->cs_control) {
+ chip->cs_control = null_cs_control;
+ dev_warn(&spi->dev,
+ "chip select function is NULL for this chip\n");
+ } else
+ chip->cs_control = chip_info->cs_control;*/
+
+ /* Check bits per word with vendor specific range */
+ if ((bits <= 3) || (bits > zx297510spi->vendor->max_bpw)) {
+ status = -ENOTSUPP;
+ dev_err(&spi->dev, "illegal data size for this controller!\n");
+ dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+ zx297510spi->vendor->max_bpw);
+ goto err_config_params;
+ } else if (bits <= 8) {
+ dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
+ chip->n_bytes = 1;
+ chip->read = READING_U8;
+ chip->write = WRITING_U8;
+ } else if (bits <= 16) {
+ dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
+ chip->n_bytes = 2;
+ chip->read = READING_U16;
+ chip->write = WRITING_U16;
+ } else {
+ dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+ chip->n_bytes = 4;
+ chip->read = READING_U32;
+ chip->write = WRITING_U32;
+ }
+
+ /* Now Initialize all register settings required for this chip */
+ chip->com_ctrl = 0;
+ chip->fmt_ctrl = 0;
+ chip->fifo_ctrl = 0;
+
+ if ((chip_info->com_mode == DMA_TRANSFER)
+ && ((zx297510spi->master_info)->enable_dma)) {
+ chip->enable_dma = true;
+ dev_dbg(&spi->dev, "DMA mode set in controller state\n");
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_ENABLED,
+ SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
+ SPI_WRITE_BITS(chip->fifo_ctrl,
+ SPI_DMA_ENABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
+ } else {
+ chip->enable_dma = false;
+ dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
+ SPI_WRITE_BITS(chip->fifo_ctrl,
+ SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
+ SPI_WRITE_BITS(chip->fifo_ctrl,
+ SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
+ }
+
+
+ SPI_WRITE_BITS(chip->fifo_ctrl,
+ SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
+ SPI_WRITE_BITS(chip->fifo_ctrl,
+ SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
+
+ SPI_WRITE_BITS(chip->fmt_ctrl, bits - 1,SPI_FMT_CTRL_MASK_DSS, 4);
+ SPI_WRITE_BITS(chip->fmt_ctrl, chip_info->iface, SPI_FMT_CTRL_MASK_FRF, 0);
+
+ /* Stuff that is common for all versions */
+ if (spi->mode & SPI_CPOL)
+ tmp = SPI_CLK_POL_IDLE_HIGH;
+ else
+ tmp = SPI_CLK_POL_IDLE_LOW;
+ SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_POL,2);
+
+ if (spi->mode & SPI_CPHA)
+ tmp = SPI_CLK_SECOND_EDGE;
+ else
+ tmp = SPI_CLK_FIRST_EDGE;
+
+ SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_PHA, 3);
+
+ /* Loopback is available on all versions except PL023 */
+ if (zx297510spi->vendor->loopback) {
+ if (spi->mode & SPI_LOOP)
+ tmp = LOOPBACK_ENABLED;
+ else
+ tmp = LOOPBACK_DISABLED;
+ SPI_WRITE_BITS(chip->com_ctrl, tmp, SPI_COM_CTRL_MASK_LBM, 0);
+ }
+ SPI_WRITE_BITS(chip->com_ctrl, SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
+ SPI_WRITE_BITS(chip->com_ctrl, chip_info->hierarchy, SPI_COM_CTRL_MASK_MS, 2);
+ SPI_WRITE_BITS(chip->com_ctrl, chip_info->slave_tx_disable, SPI_COM_CTRL_MASK_SOD, 3);
+
+ /* Save controller_state */
+ spi_set_ctldata(spi, chip);
+ return status;
+ err_config_params:
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ return status;
+}
+
+/**
+ * pl022_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void zx297510_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+}
+
+static int __devinit zx297510_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zx297510_spi_controller *platform_info = pdev->dev.platform_data;
+ struct spi_master *master;
+ struct zx297510_spi *zx297510spi = NULL; /*Data for this driver */
+ struct resource * regs = NULL;
+ struct resource * gpio = NULL;
+ struct resource * irq = NULL;
+ int status = 0, i;
+
+ printk(KERN_INFO "spi:zx297510_spi_probe \n");
+
+
+ if (platform_info == NULL) {
+ dev_err(&pdev->dev, "probe - no platform data supplied\n");
+ status = -ENODEV;
+ goto err_no_pdata;
+ }
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct zx297510_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
+ status = -ENOMEM;
+ goto err_no_master;
+ }
+
+ zx297510spi = spi_master_get_devdata(master);
+ zx297510spi->master = master;
+ zx297510spi->master_info = platform_info;
+ zx297510spi->pdev = pdev;
+ zx297510spi->vendor = &vendor_arm;
+
+ dev_set_drvdata(&pdev->dev, zx297510spi);
+ /*
+ * Bus Number Which has been Assigned to this SSP controller
+ * on this board
+ */
+ master->bus_num = platform_info->bus_id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = zx297510_cleanup;
+ master->setup = zx297510_setup;
+ master->prepare_transfer_hardware = zx297510_prepare_transfer_hardware;
+ master->transfer_one_message = zx297510_transfer_one_message;
+ master->unprepare_transfer_hardware = zx297510_unprepare_transfer_hardware;
+ master->rt = platform_info->rt;
+
+ /*
+ * Supports mode 0-3, loopback, and active low CS. Transfers are
+ * always MS bit first on the original pl022.
+ */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS;
+
+ dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
+
+ /* registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if ( regs == NULL ){
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto err_no_registers;
+ }
+
+ zx297510spi->phybase = regs->start;
+ zx297510spi->virtbase = ioremap(regs->start, resource_size(regs));
+
+ if (zx297510spi->virtbase == NULL) {
+ status = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
+ regs->start, zx297510spi->virtbase);
+
+ /* gpios txd rxd sclk cs */
+ for(i = 0; i < ARRAY_SIZE(spi_gpio_resources); i++){
+ //gpio = platform_get_resource(pdev, IORESOURCE_IO, i);
+ gpio = &spi_gpio_resources[i];
+ if( gpio == NULL )
+ {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_IO\n");
+ status = -ENOENT;
+ goto err_gpios;
+ }
+ dev_dbg(&pdev->dev, "used gpio num %d as %s \n", gpio->start, gpio ->name);
+
+ status = gpio_request(gpio->start,gpio->name);
+ if( status < 0 )
+ goto err_gpios;
+ //zte_gpio_config(gpio->start, SET_FUNCTION);
+ zx29_gpio1v8_function_sel(gpio->start,1);
+ }
+
+ /* work clock */
+ zx297510spi->spi_clk = clk_get(&pdev->dev, "work_clk");
+ if (IS_ERR(zx297510spi->spi_clk)) {
+ status = PTR_ERR(zx297510spi->spi_clk);
+ dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+ goto err_no_clk;
+ }
+ /* enable spiclk at function zx297510_setup */
+
+ zx297510spi->clkfreq = SPI_SPICLK_FREQ_104M;
+
+
+ /* apb clock */
+ zx297510spi->pclk = clk_get(&pdev->dev, "apb_clk");
+ if (IS_ERR(zx297510spi->pclk)) {
+ status = PTR_ERR(zx297510spi->pclk);
+ dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+ goto err_no_clk;
+ }
+ /* enable ssp clock source */
+ clk_enable(zx297510spi->pclk);
+
+ /* Initialize transfer pump */
+ tasklet_init(&zx297510spi->pump_transfers, pump_transfers,
+ (unsigned long)zx297510spi);
+
+ /* Disable SPI */
+ writel((readl(SPI_COM_CTRL(zx297510spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE)),
+ SPI_COM_CTRL(zx297510spi->virtbase));
+
+ load_spi_default_config(zx297510spi);
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if( irq == NULL ){
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_IRQ\n");
+ status = -ENOENT;
+ goto err_no_irq;
+ }
+
+ dev_dbg(&pdev->dev, "used interrupt num is %d\n", irq->start);
+
+ status = request_irq(irq->start, zx297510_interrupt_handler, 0, "zx297510_spi",
+ zx297510spi);
+ if (status < 0) {
+ dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
+ goto err_no_irq;
+ }
+
+ /* Get DMA channels */
+ if (platform_info->enable_dma) {
+ status = zx297510_dma_probe(zx297510spi);
+ if (status != 0)
+ platform_info->enable_dma = 0;
+ }
+
+ status = spi_register_master(master);
+ if (status != 0) {
+ dev_err(&pdev->dev,
+ "probe - problem registering spi master\n");
+ goto err_spi_register;
+ }
+ dev_dbg(&pdev->dev," probe succeeded\n");
+
+ /* let runtime pm put suspend */
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&pdev->dev,
+ "will use autosuspend for runtime pm, delay %dms\n",
+ platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev,
+ platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
+ } else {
+ pm_runtime_put(dev);
+ }
+ return 0;
+
+ err_spi_register:
+ if (platform_info->enable_dma)
+ zx297510_dma_remove(zx297510spi);
+
+ free_irq(irq->start, zx297510spi);
+ err_no_irq:
+ clk_disable(zx297510spi->spi_clk);
+// err_no_clk_en:
+ //clk_unprepare(pl022->clk);
+ //err_clk_prep:
+ clk_put(zx297510spi->spi_clk);
+ err_no_clk:
+ iounmap(zx297510spi->virtbase);
+ err_gpios:
+ /* add */
+ err_no_ioremap:
+ err_no_registers:
+ spi_master_put(master);
+ err_no_master:
+ err_no_pdata:
+ return status;
+}
+
+static int __exit zx297510_spi_remove(struct platform_device *pdev)
+{
+ struct zx297510_spi *zx297510spi = dev_get_drvdata(&pdev->dev);
+ struct resource * gpio = NULL;
+ struct resource * irq = NULL;
+ int i;
+
+ if (!zx297510spi)
+ return 0;
+
+ /*
+ * undo pm_runtime_put() in probe. I assume that we're not
+ * accessing the primecell here.
+ */
+ pm_runtime_get_noresume(&pdev->dev);
+
+ load_spi_default_config(zx297510spi);
+ if (zx297510spi->master_info->enable_dma)
+ zx297510_dma_remove(zx297510spi);
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if( irq != NULL )
+ {
+ free_irq(irq->start, zx297510spi);
+ }
+
+ clk_disable(zx297510spi->spi_clk);
+ clk_put(zx297510spi->spi_clk);
+
+ clk_disable(zx297510spi->pclk);
+ clk_put(zx297510spi->pclk);
+
+ /* gpios txd rxd sclk sfr */
+ for(i = 0; i < ARRAY_SIZE(spi_gpio_resources); i++){
+ //gpio = platform_get_resource(pdev, IORESOURCE_IO, i);
+ gpio = &spi_gpio_resources[i];
+
+ if( gpio != NULL )
+ {
+ gpio_free(gpio->start);
+ }
+ }
+
+ iounmap(zx297510spi->virtbase);
+ //amba_release_regions(adev);
+ tasklet_disable(&zx297510spi->pump_transfers);
+ spi_unregister_master(zx297510spi->master);
+ spi_master_put(zx297510spi->master);
+ //amba_set_drvdata(adev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return 0;
+}
+
+static struct platform_driver zx297510_spi_driver = {
+ .driver = {
+ .name = "zx297510_ssp",
+ .owner = THIS_MODULE,
+ },
+ .probe = zx297510_spi_probe,
+ .remove = __exit_p(zx297510_spi_remove),
+};
+
+static int __init zx297510_spi_init(void)
+{
+ return platform_driver_register(&zx297510_spi_driver);
+}
+
+static void __exit zx297510_spi_exit(void)
+{
+ platform_driver_unregister(&zx297510_spi_driver);
+}
+
+module_init(zx297510_spi_init);
+module_exit(zx297510_spi_exit);
+
+MODULE_DESCRIPTION("zx297510 spi controller driver");
+MODULE_AUTHOR("ZTER");
+MODULE_LICENSE("ZTE");
+