blob: fd49fd6586705e1b1ef3bb12ed12ced00d48eaa7 [file] [log] [blame]
/*******************************************************************************
* Copyright (C) 2016-2021, ZTE Corporation.
*
* File Name:spi-zx29.c
* File Mark:
* Description:
* Others:
* Version: 1.0
* Author: ZTE
* Date:
* modify
********************************************************************************/
/****************************************************************************
* Include files
****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
#include <linux/semaphore.h>
//#include <linux/wakelock.h> //qhf
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/dma/zx-dma.h>
#include <linux/dma-direct.h>
#include <asm/memory.h>
#include <linux/debugfs.h>
#include <linux/spi/spi.h>
//#include <linux/soc/zte/pm/drv_idle.h>
#include "spi-zx29.h"
#include "pub_debug_info.h"
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
#include <linux/wait.h>
#include <linux/suspend.h>
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
struct zx29_ssp_device_of_data {
enum zx29_ssp_device_mode mode;
};
static const struct of_device_id zx29_spi_of_match[];
/****************************************************************************
* Local Macros
****************************************************************************/
#define CONFIG_SPI_DMA_ENGINE
#define SPI_PSM_CONTROL (0) //(1)//qhf
/*
* This macro is used to define some register default values.
* reg is masked with mask, the OR:ed with an (again masked)
* val shifted sb steps to the left.
*/
#define SPI_WRITE_BITS(reg, val, mask, sb) \
((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
/*
* This macro is also used to define some default values.
* It will just shift val by sb steps to the left and mask
* the result with mask.
*/
#define GEN_MASK_BITS(val, mask, sb) \
(((val)<<(sb)) & (mask))
#define SPI_GPIO_HIGH 1
#define SPI_GPIO_LOW 0
#define ZX29_CS_ACTIVE 1 /* normally nCS, active low */
#define ZX29_CS_INACTIVE 0
#define DRIVE_TX 0
#define DO_NOT_DRIVE_TX 1
#define DO_NOT_QUEUE_DMA 0
#define QUEUE_DMA 1
#define RX_TRANSFER BIT(0)
#define TX_TRANSFER BIT(1)
/* registers */
#define SPI_VER_REG_OFFSET (0x00)
#define SPI_COM_CTRL_OFFSET (0x04)
#define SPI_FMT_CTRL_OFFSET (0x08)
#define SPI_DR_OFFSET (0x0C)
#define SPI_FIFO_CTRL_OFFSET (0x10)
#define SPI_FIFO_SR_OFFSET (0x14)
#define SPI_INTR_EN_OFFSET (0x18)
#define SPI_INTR_SR_OFFSET (0x1C)
#define SPI_TIMING_OFFSET (0x20)
/*
* SPI Version Register - SPI_VER_REG
*/
#define SPI_VER_REG_MASK_Y (0xFFUL << 16)
#define SPI_VER_REG_MASK_X (0xFFUL << 24)
/*
* SPI Common Control Register - SPI_COM_CTRL
*/
#define SPI_COM_CTRL_MASK_LBM (0x1UL << 0)
#define SPI_COM_CTRL_MASK_SSPE (0x1UL << 1)
#define SPI_COM_CTRL_MASK_MS (0x1UL << 2)
#define SPI_COM_CTRL_MASK_SOD (0x1UL << 3)
#define SPI_COM_CTRL_MASK_SSPE_BACK (0x1UL << 4)
/*
* SPI Format Control Register - SPI_FMT_CTRL
*/
#define SPI_FMT_CTRL_MASK_FRF (0x3UL << 0)
#define SPI_FMT_CTRL_MASK_POL (0x1UL << 2)
#define SPI_FMT_CTRL_MASK_PHA (0x1UL << 3)
#define SPI_FMT_CTRL_MASK_DSS (0x1FUL << 4)
/*
* SPI FIFO Control Register - SPI_FIFO_CTRL
*/
#define SPI_FIFO_CTRL_MASK_RX_DMA_EN (0x1UL << 2)
#define SPI_FIFO_CTRL_MASK_TX_DMA_EN (0x1UL << 3)
#define SPI_FIFO_CTRL_MASK_RX_FIFO_THRES (0xFUL << 4)
#define SPI_FIFO_CTRL_MASK_TX_FIFO_THRES (0xFUL << 8)
/*
* SPI FIFO Status Register - SPI_FIFO_SR
*/
#define SPI_FIFO_SR_MASK_RX_BEYOND_THRES (0x1UL << 0)
#define SPI_FIFO_SR_MASK_TX_BEYOND_THRES (0x1UL << 1)
#define SPI_FIFO_SR_MASK_RX_FIFO_FULL (0x1UL << 2)
#define SPI_FIFO_SR_MASK_TX_FIFO_EMPTY (0x1UL << 3)
#define SPI_FIFO_SR_MASK_BUSY (0x1UL << 4)
#define SPI_FIFO_SR_SHIFT_RX_CNT 5
#define SPI_FIFO_SR_MASK_RX_FIFO_CNTR (0x1fUL << SPI_FIFO_SR_SHIFT_RX_CNT)
#define SPI_FIFO_SR_SHIFT_TX_CNT 10
#define SPI_FIFO_SR_MASK_TX_FIFO_CNTR (0x1fUL << SPI_FIFO_SR_SHIFT_TX_CNT)
/*
* SPI Interrupt Enable Register - SPI_INTR_EN
*/
#define SPI_INTR_EN_MASK_RX_OVERRUN_IE (0x1UL << 0)
#define SPI_INTR_EN_MASK_TX_UNDERRUN_IE (0x1UL << 1)
#define SPI_INTR_EN_MASK_RX_FULL_IE (0x1UL << 2)
#define SPI_INTR_EN_MASK_TX_EMPTY_IE (0x1UL << 3)
#define SPI_INTR_EN_MASK_RX_THRES_IE (0x1UL << 4)
#define SPI_INTR_EN_MASK_TX_THRES_IE (0x1UL << 5)
/*
* SPI Interrupt Status Register OR Interrupt Clear Register - SPI_INTR_SR_SCLR
*/
#define SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR (0x1UL << 0)
#define SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR (0x1UL << 1)
#define SPI_INTR_SR_SCLR_MASK_RX_FULL_INTR (0x1UL << 2)
#define SPI_INTR_SR_SCLR_MASK_TX_EMPTY_INTR (0x1UL << 3)
#define SPI_INTR_SR_SCLR_MASK_RX_THRES_INTR (0x1UL << 4)
#define SPI_INTR_SR_SCLR_MASK_TX_THRES_INTR (0x1UL << 5)
/*
* SPI TIMING Register
*/
#define SPI_TIMING_MASK_T_CS_DESEL 0xFUL
/* SPI WCLK Freqency */
#define SPI_SPICLK_FREQ_26M (26*1000*1000)
#define SPI_SPICLK_FREQ_104M (104*1000*1000)
#define SPI_SPICLK_FREQ_156M (156*1000*1000)
#define CLEAR_ALL_INTERRUPTS 0x3FUL
#define ENABLE_ALL_INTERRUPTS 0x3FUL
#define ENABLE_INTERRUPTS 0x03UL
#define DISABLE_ALL_INTERRUPTS 0x0UL
/*
* Message State
* we use the spi_message.state (void *) pointer to
* hold a single state value, that's why all this
* (void *) casting is done here.
*/
enum zx29_spi_state {
STATE_START,
STATE_RUNNING,
STATE_DONE,
STATE_ERROR
};
/*
* SPI State - Whether Enabled or Disabled
*/
#define SPI_DISABLED (0)
#define SPI_ENABLED (1)
/*
* SPI DMA State - Whether DMA Enabled or Disabled
*/
#define SPI_DMA_DISABLED (0)
#define SPI_DMA_ENABLED (1)
/*
* SPI SOD State - Whether SOD Enabled or Disabled
*/
#define SPI_SOD_DISABLED (1)
#define SPI_SOD_ENABLED (0)
#define SPI_SLAVE_MODE (1)
#define SPI_MASTER_MODE (0)
/*
* SPI TRANSFER DELAY CFG
* DELAY TIME ≈ (1 / Buad_Rate) * 8 + (SPI_TIMING_T_CS_DESEL + 1) / Buad_Rate;
*/
enum spi_fifo_threshold_level {
SPI_FIFO_THRES_1,
SPI_FIFO_THRES_2,
SPI_FIFO_THRES_3,
SPI_FIFO_THRES_4,
SPI_FIFO_THRES_5,
SPI_FIFO_THRES_6,
SPI_FIFO_THRES_7,
SPI_FIFO_THRES_8,
SPI_FIFO_THRES_9,
SPI_FIFO_THRES_10,
SPI_FIFO_THRES_11,
SPI_FIFO_THRES_12,
SPI_FIFO_THRES_13,
SPI_FIFO_THRES_14,
SPI_FIFO_THRES_15,
SPI_FIFO_THRES_16
};
/*
* SPI Clock Parameter ranges
*/
#define DIV_MIN 0x00
#define DIV_MAX 0x0F
#define SPI_POLLING_TIMEOUT 1000
/*
* The type of reading going on on this chip
*/
enum spi_reading {
READING_NULL,
READING_U8,
READING_U16,
READING_U32
};
/**
* The type of writing going on on this chip
*/
enum spi_writing {
WRITING_NULL,
WRITING_U8,
WRITING_U16,
WRITING_U32
};
/**
* struct vendor_data - vendor-specific config parameters
* for PL022 derivates
* @fifodepth: depth of FIFOs (both)
* @max_bpw: maximum number of bits per word
* @unidir: supports unidirection transfers
* @extended_cr: 32 bit wide control register 0 with extra
* features and extra features in CR1 as found in the ST variants
* @pl023: supports a subset of the ST extensions called "PL023"
*/
struct vendor_data {
int fifodepth;
int max_bpw;
bool loopback;
};
struct zx29_ssp_pins
{
struct device *dev;
struct pinctrl *pctrl;
struct pinctrl_state *pfunc;
struct pinctrl_state *pgpio;
struct pinctrl_state *pcs_gpio_active;
struct pinctrl_state *pcs_func;
struct pinctrl_state *pcs_gpio_sleep;
int gpio_cs;
int gpio_clk;
int gpio_tx;
int gpio_rx;
};
struct zx29_ssp_pins ssp_pins[4];
/**
* struct spi-zx29 - This is the private SSP driver data structure
* @adev: AMBA device model hookup
* @vendor: vendor data for the IP block
* @phybase: the physical memory where the SSP device resides
* @virtbase: the virtual memory where the SSP is mapped
* @clk: outgoing clock "SPICLK" for the SPI bus
* @master: SPI framework hookup
* @master_info: controller-specific data from machine setup
* @kworker: thread struct for message pump
* @kworker_task: pointer to task for message pump kworker thread
* @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
* @busy: message pump is busy
* @running: message pump is running
* @pump_transfers: Tasklet used in Interrupt Transfer mode
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
* @next_msg_cs_active: the next message in the queue has been examined
* and it was found that it uses the same chip select as the previous
* message, so we left it active after the previous transfer, and it's
* active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
* @rx_end: end position in RX buffer to be written
* @read: the type of read currently going on
* @write: the type of write currently going on
* @exp_fifo_level: expected FIFO level
* @dma_rx_channel: optional channel for RX DMA
* @dma_tx_channel: optional channel for TX DMA
* @sgt_rx: scattertable for the RX transfer
* @sgt_tx: scattertable for the TX transfer
* @dummypage: a dummy page used for driving data on the bus with DMA
*/
struct zx29_spi {
char name[16];
struct platform_device *pdev;
struct vendor_data *vendor;
resource_size_t phybase;
void __iomem *virtbase;
unsigned int irq;
struct clk *pclk;/* spi controller work clock */
struct clk *spi_clk;/* spi clk line clock */
u32 clkfreq;
struct spi_master *master;
struct zx29_spi_controller *master_info;
/* Message per-transfer pump */
struct tasklet_struct pump_transfers;
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
void *rx_end;
enum spi_reading read;
enum spi_writing write;
u32 exp_fifo_level;
enum spi_rx_level_trig rx_lev_trig;
enum spi_tx_level_trig tx_lev_trig;
/* DMA settings */
#ifdef CONFIG_SPI_DMA_ENGINE
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
struct sg_table sgt_rx;
struct sg_table sgt_tx;
char *dummypage;
unsigned int dma_running;
// struct mutex spi_lock;
#endif
#if defined(CONFIG_DEBUG_FS)
struct dentry * spi_root;
struct debugfs_regset32 spi_regset;
u32 spi_poll_cnt;
u32 spi_dma_cnt;
#endif
#if SPI_PSM_CONTROL
struct wake_lock psm_lock;
#endif
struct semaphore sema_dma;
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
wait_queue_head_t wait;
int trans_done;
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
u8 iface_mode;
#define SPI_MOTO_FORMAT 0x00
#define SPI_TI_FORMAT 0x01
#define SPI_ISI_FORMAT 0x02
enum zx29_ssp_device_mode mode;
int (*zx29_flush_rxfifo) (struct zx29_spi *zx29spi,void *buf);
};
/**
* struct chip_data - To maintain runtime state of SSP for each client chip
* @cr0: Value of control register CR0 of SSP - on later ST variants this
* register is 32 bits wide rather than just 16
* @cr1: Value of control register CR1 of SSP
* @dmacr: Value of DMA control Register of SSP
* @cpsr: Value of Clock prescale register
* @cs: Value of cs register
* @n_bytes: how many bytes(power of 2) reqd for a given data width of client
* @enable_dma: Whether to enable DMA or not
* @read: function ptr to be used to read when doing xfer for this chip
* @write: function ptr to be used to write when doing xfer for this chip
* @cs_control: chip select callback provided by chip
* @xfer_type: polling/interrupt/DMA
*
* Runtime state of the SSP controller, maintained per chip,
* This would be set according to the current message that would be served
*/
struct chip_data {
u32 ver_reg;
u32 com_ctrl;
u32 fmt_ctrl;
u32 fifo_ctrl;
u32 timing;
// u32 intr_en;
u8 n_bytes;
u8 clk_div;/* spi clk divider */
bool enable_dma;
bool enable_trans_gap;
enum spi_reading read;
enum spi_writing write;
void (*cs_control) (int dev_id,u32 command);
int xfer_type;
};
//struct semaphore g_SpiTransferSemaphore;
struct zx29_spi *g_zx29_spi[4];
#if SPI_PSM_CONTROL
static volatile unsigned int spi_active_count = 0;
static void zx29_spi_set_active(struct wake_lock *lock)
{
unsigned long flags;
local_irq_save(flags);
if(spi_active_count == 0)
{
zx_cpuidle_set_busy(IDLE_FLAG_SPI);
}
spi_active_count++;
local_irq_restore(flags);
wake_lock(lock);
}
static void zx29_spi_set_idle(struct wake_lock *lock)
{
unsigned long flags;
local_irq_save(flags);
#if 0 //qhf
spi_active_count--;
if(spi_active_count == 0)
{
zx_cpuidle_set_free(IDLE_FLAG_SPI);
}
#endif
local_irq_restore(flags);
wake_unlock(lock);
}
#endif
static int zx29_do_interrupt_dma_transfer(struct zx29_spi *zx29spi);
/**
* default_cs_control - Dummy chip select function
* @command: select/delect the chip
*
* If no chip select function is provided by client this is used as dummy
* chip select
*/
static void default_cs_control(int dev_id,u32 command)
{
gpio_set_value(ssp_pins[dev_id].gpio_cs, !command);
}
/**
* flush - flush the FIFO to reach a clean state
* SSP driver private data structure
*/
static int flush(struct zx29_spi *zx29spi)
{
unsigned long limit = loops_per_jiffy << 1;
uint32_t rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
dev_dbg(&zx29spi->pdev->dev, "flush\n");
/* Flushing FIFO by software cannot clear RX DMA Request. */
do {
if(0 == strcmp(zx29spi->pdev->name,"140a000.ssp")) {
while (readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & rx_fifo_cnt_msk)
readl((SPI_DR_OFFSET+zx29spi->virtbase));
}else {
while ((readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase))>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7F)
readl((SPI_DR_OFFSET+zx29spi->virtbase));
}
} while ((readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) && limit--);
zx29spi->exp_fifo_level = 0;
return limit;
}
/**
* restore_state - Load configuration of current chip
* SSP driver private data structure
*/
static void restore_state(struct zx29_spi *zx29spi)
{
struct chip_data *chip = zx29spi->cur_chip;
/* disable all interrupts */
writel(ENABLE_INTERRUPTS, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));
writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
writel(chip->fmt_ctrl, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
// writel(chip->intr_en, SPI_INTR_EN(zx297520v2spi->virtbase));
if(zx29spi->mode == ZX29_SSP_SLAVE_TYPE)
chip->com_ctrl |= GEN_MASK_BITS(SPI_SLAVE, SPI_COM_CTRL_MASK_MS, 2)|GEN_MASK_BITS(SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
writel(chip->com_ctrl, (SPI_COM_CTRL_OFFSET + zx29spi->virtbase));
writel(chip->timing, (SPI_TIMING_OFFSET + zx29spi->virtbase));
}
/*
* Default spi Register Values
*/
#define DEFAULT_SPI_COM_CTRL ( \
GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
GEN_MASK_BITS(SPI_MASTER, SPI_COM_CTRL_MASK_MS, 2) \
)
/*
* Default spi Register Values
*/
#define DEFAULT_SPI_SLAVE_COM_CTRL ( \
GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
GEN_MASK_BITS(SPI_SLAVE, SPI_COM_CTRL_MASK_MS, 2) \
)
#define DEFAULT_SPI_FMT_CTRL ( \
GEN_MASK_BITS(SPI_INTERFACE_MOTOROLA_SPI, SPI_FMT_CTRL_MASK_FRF, 0) | \
GEN_MASK_BITS(SPI_CLK_POL_IDLE_LOW, SPI_FMT_CTRL_MASK_POL, 2) | \
GEN_MASK_BITS(SPI_CLK_FIRST_EDGE, SPI_FMT_CTRL_MASK_PHA, 3) | \
GEN_MASK_BITS(SPI_DATA_BITS_8, SPI_FMT_CTRL_MASK_DSS, 4) \
)
#define DEFAULT_SPI_FIFO_CTRL ( \
GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2) | \
GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3) | \
GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4) | \
GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8) \
)
/**
* load_ssp_default_config - Load default configuration for SSP
* SSP driver private data structure
*/
static void load_spi_default_config(struct zx29_spi *zx29spi)
{
writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
writel(ENABLE_INTERRUPTS, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));
writel(DEFAULT_SPI_FMT_CTRL, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
writel(DEFAULT_SPI_FIFO_CTRL, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
if(zx29spi->mode == ZX29_SSP_MASTER_TYPE) {
writel(DEFAULT_SPI_COM_CTRL, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
}
else {
writel(DEFAULT_SPI_SLAVE_COM_CTRL, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
}
}
static unsigned reader(struct zx29_spi *zx29spi)
{
uint32_t fifo_sr = 0,rd_max = 0;
unsigned len = 0;
uint32_t rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
/*
* The FIFO depth is different between primecell variants.
* I believe filling in too much in the FIFO might cause
* errons in 8bit wide transfers on ARM variants (just 8 words
* FIFO, means only 8x8 = 64 bits in FIFO) at least.
*
* To prevent this issue, the TX FIFO is only filled to the
* unused RX FIFO fill length, regardless of what the TX
* FIFO status flag indicates.
*/
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
}else {
rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
}
//read rx fifo to empty first
while ((zx29spi->rx < zx29spi->rx_end) && rd_max--) {
switch (zx29spi->read) {
case READING_NULL:
readw((SPI_DR_OFFSET+zx29spi->virtbase));
break;
case READING_U8:
*(u8 *) (zx29spi->rx) =
readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
break;
case READING_U16:
*(u16 *) (zx29spi->rx) =
(u16) readw((SPI_DR_OFFSET+zx29spi->virtbase));
break;
case READING_U32:
*(u32 *) (zx29spi->rx) =
readl((SPI_DR_OFFSET+zx29spi->virtbase));
break;
}
len += zx29spi->cur_chip->n_bytes;
zx29spi->rx += (zx29spi->cur_chip->n_bytes);
zx29spi->exp_fifo_level--;
}
return len;
}
static unsigned writer(struct zx29_spi *zx29spi)
{
uint32_t fifo_sr;
uint32_t wr_max;
uint32_t tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
uint32_t tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
unsigned len = 0;
/*
* The FIFO depth is different between primecell variants.
* I believe filling in too much in the FIFO might cause
* errons in 8bit wide transfers on ARM variants (just 8 words
* FIFO, means only 8x8 = 64 bits in FIFO) at least.
*
* To prevent this issue, the TX FIFO is only filled to the
* unused RX FIFO fill length, regardless of what the TX
* FIFO status flag indicates.
*/
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
}else {
wr_max = (fifo_sr>>12)&0x1f;
}
if ((fifo_sr & SPI_FIFO_SR_MASK_BUSY) && wr_max) {
wr_max--;
}
while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
switch (zx29spi->write) {
case WRITING_NULL:
writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
break;
case WRITING_U8:
writew(*(u8 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
break;
case WRITING_U16:
writew((*(u16 *) (zx29spi->tx)), (SPI_DR_OFFSET+zx29spi->virtbase));
break;
case WRITING_U32:
writel(*(u32 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
break;
}
len += zx29spi->cur_chip->n_bytes;
zx29spi->tx += (zx29spi->cur_chip->n_bytes);
zx29spi->exp_fifo_level++;
}
return len;
}
/**
* This will write to TX and read from RX according to the parameters.
*/
static void readwriter(struct zx29_spi *zx29spi)
{
uint32_t fifo_sr;
uint32_t rd_max, wr_max;
uint32_t rx_fifo_cnt_msk;
uint32_t tx_fifo_cnt_msk;
uint32_t tx_fifo_cnt_pos;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
/*
* The FIFO depth is different between primecell variants.
* I believe filling in too much in the FIFO might cause
* errons in 8bit wide transfers on ARM variants (just 8 words
* FIFO, means only 8x8 = 64 bits in FIFO) at least.
*
* To prevent this issue, the TX FIFO is only filled to the
* unused RX FIFO fill length, regardless of what the TX
* FIFO status flag indicates.
*/
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
}else {
rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
wr_max = (fifo_sr>>12)&0x1f;
}
if ((fifo_sr & SPI_FIFO_SR_MASK_BUSY) && wr_max) {
wr_max--;
}
//read rx fifo to empty first
while ((zx29spi->rx < zx29spi->rx_end) && rd_max--) {
switch (zx29spi->read) {
case READING_NULL:
readw((SPI_DR_OFFSET+zx29spi->virtbase));
break;
case READING_U8:
*(u8 *) (zx29spi->rx) =
readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
break;
case READING_U16:
*(u16 *) (zx29spi->rx) =
(u16) readw((SPI_DR_OFFSET+zx29spi->virtbase));
break;
case READING_U32:
*(u32 *) (zx29spi->rx) =
readl((SPI_DR_OFFSET+zx29spi->virtbase));
break;
}
zx29spi->rx += (zx29spi->cur_chip->n_bytes);
zx29spi->exp_fifo_level--;
}
//write
while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
switch (zx29spi->write) {
case WRITING_NULL:
writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
break;
case WRITING_U8:
writew(*(u8 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
break;
case WRITING_U16:
writew((*(u16 *) (zx29spi->tx)), (SPI_DR_OFFSET+zx29spi->virtbase));
break;
case WRITING_U32:
writel(*(u32 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
break;
}
zx29spi->tx += (zx29spi->cur_chip->n_bytes);
zx29spi->exp_fifo_level++;
if(zx29spi->cur_chip->enable_trans_gap) {
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
cpu_relax();
}
while (fifo_sr && diff_ns < 10000000); //10ms
if(diff_ns >= 10000000) {
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
if(fifo_sr)
dev_info(&zx29spi->pdev->dev, "bus busy time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
}
}
}
if(!zx29spi->cur_chip->enable_trans_gap) {
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
cpu_relax();
}while (fifo_sr && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000) {
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
if(fifo_sr)
dev_info(&zx29spi->pdev->dev, "bus busy.. time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
}
}
/*
* When we exit here the TX FIFO should be full and the RX FIFO
* should be empty
*/
}
/*
* This DMA functionality is only compiled in if we have
* access to the generic DMA devices/DMA engine.
*/
#ifdef CONFIG_SPI_DMA_ENGINE
static void zx29_fill_txfifo(struct zx29_spi *zx29spi)
{
uint32_t fifo_sr;
int32_t rd_max, wr_max;
uint32_t rx_fifo_cnt_msk;
uint32_t tx_fifo_cnt_msk;
uint32_t tx_fifo_cnt_pos;
unsigned cur_transfer_len;
rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
if(!zx29spi) {
printk("zx29spi err! \r\n");
return;
}
cur_transfer_len = zx29spi->cur_transfer->len;
while (zx29spi->tx < zx29spi->tx_end && cur_transfer_len) {
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
#if 0
rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
#else
if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
}else {
rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
wr_max = (fifo_sr>>12)&0x1f;
}
#endif
if (fifo_sr & SPI_FIFO_SR_MASK_BUSY) {
wr_max--;
}
wr_max -= rd_max;
wr_max = (wr_max > 0) ? wr_max : 0;
//write
while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
zx29spi->tx += (zx29spi->cur_chip->n_bytes);
cur_transfer_len -= zx29spi->cur_chip->n_bytes;
}
cpu_relax();
}
}
static void dma_callback(void *data)
{
struct zx29_spi *zx29spi = (struct zx29_spi *)data;
//printk(KERN_INFO "spi:dma transfer complete. %X-%X-%x\n", zx29spi->dma_running, readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
//up(&zx29spi->sema_dma);
if(zx29spi->master->slave == true){
wake_up(&zx29spi->wait);
zx29spi->trans_done = true;
}else{
up(&zx29spi->sema_dma);
}
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
}
/*
static void dma_callback_tx(void *data)
{
struct zx29_spi *zx29spi = (struct zx29_spi *)data;
// printk(KERN_INFO "spi:dma transfer complete tx\n");
printk("%s",__func__);
printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
//up(&g_SpiTransferSemaphore);
}
*/
/**
* configure_dma - configures the channels for the next transfer
* SSP driver's private data structure
*/
static int configure_dma(struct zx29_spi *zx29spi)
{
// unsigned int pages;
// int ret;
// int rx_sglen, tx_sglen;
dma_channel_def rx_conf;
dma_channel_def tx_conf;
struct dma_chan *rxchan = zx29spi->dma_rx_channel;
struct dma_chan *txchan = zx29spi->dma_tx_channel;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
struct spi_transfer *transfer = zx29spi->cur_transfer;
rx_conf.src_addr = (SPI_DR_OFFSET+zx29spi->phybase);
rx_conf.dest_addr = (unsigned int)zx29spi->rx;
rx_conf.dma_control.tran_mode = TRAN_PERI_TO_MEM;
rx_conf.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
rx_conf.link_addr = 0;
tx_conf.src_addr = (unsigned int)zx29spi->tx;
tx_conf.dest_addr = (SPI_DR_OFFSET+zx29spi->phybase);
tx_conf.dma_control.tran_mode = TRAN_MEM_TO_PERI;
tx_conf.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
tx_conf.link_addr = 0;
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
/*
* If supplied, the DMA burstsize should equal the FIFO trigger level.
* Notice that the DMA engine uses one-to-one mapping. Since we can
* not trigger on 2 elements this needs explicit mapping rather than
* calculation.
*/
switch (zx29spi->rx_lev_trig) {
case SPI_RX_1_OR_MORE_ELEM:
rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
break;
case SPI_RX_4_OR_MORE_ELEM:
rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
break;
case SPI_RX_8_OR_MORE_ELEM:
rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
break;
case SPI_RX_16_OR_MORE_ELEM:
rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
break;
case SPI_RX_32_OR_MORE_ELEM:
rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
break;
default:
rx_conf.dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
rx_conf.dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
break;
}
switch (zx29spi->tx_lev_trig) {
case SPI_TX_1_OR_MORE_EMPTY_LOC:
tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
break;
case SPI_TX_4_OR_MORE_EMPTY_LOC:
tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
break;
case SPI_TX_8_OR_MORE_EMPTY_LOC:
tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
break;
case SPI_TX_16_OR_MORE_EMPTY_LOC:
tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
break;
case SPI_TX_32_OR_MORE_EMPTY_LOC:
tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
break;
default:
tx_conf.dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
tx_conf.dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
break;
}
switch (zx29spi->read) {
case READING_NULL:
/* Use the same as for writing */
rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
rx_conf.count = zx29spi->cur_transfer->len;
break;
case READING_U8:
rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
rx_conf.count = zx29spi->cur_transfer->len;
break;
case READING_U16:
rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
rx_conf.count = zx29spi->cur_transfer->len;
break;
case READING_U32:
rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
rx_conf.count = zx29spi->cur_transfer->len;
break;
}
switch (zx29spi->write) {
case WRITING_NULL:
/* Use the same as for reading */
tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
tx_conf.count = zx29spi->cur_transfer->len;
break;
case WRITING_U8:
tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
tx_conf.count = zx29spi->cur_transfer->len;
break;
case WRITING_U16:
tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
tx_conf.count = zx29spi->cur_transfer->len;
break;
case WRITING_U32:
tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
tx_conf.count = zx29spi->cur_transfer->len;
break;
}
dmaengine_slave_config(rxchan,(struct dma_slave_config*)&rx_conf);
dmaengine_slave_config(txchan,(struct dma_slave_config*)&tx_conf);
/* Submit and fire RX and TX with TX last so we're ready to read! */
if (zx29spi->rx) {
//printk("%s,tx=%p,rx=%p,len=%d\n",__func__,zx29spi->tx,zx29spi->rx,zx29spi->cur_transfer->len);
//printk("tx_conf:sb_len=%d,db_len=%d, sb_size=%d,db_size=%d\n",tx_conf.dma_control.src_burst_len, tx_conf.dma_control.dest_burst_len, tx_conf.dma_control.src_burst_size, tx_conf.dma_control.dest_burst_size);
//printk("rx_conf:sb_len=%d,db_len=%d, sb_size=%d,db_size=%d\n",rx_conf.dma_control.src_burst_len, rx_conf.dma_control.dest_burst_len, rx_conf.dma_control.src_burst_size, rx_conf.dma_control.dest_burst_size);
rxdesc= rxchan->device->device_prep_interleaved_dma(rxchan,NULL,0);
txdesc= txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_callback;
rxdesc->callback_param = zx29spi;
// txdesc->callback = dma_callback_tx;
// txdesc->callback_param = zx29spi;
dmaengine_submit(rxdesc);
dma_async_issue_pending(rxchan);
if (transfer->tx_dma) {
/* SPI RX buffer may overflow in DMA busy situation. */
dmaengine_submit(txdesc);
dma_async_issue_pending(txchan);
zx29spi->dma_running = TX_TRANSFER | RX_TRANSFER;
enable_irq(zx29spi->irq); /* detect overflow through interrupt */
} else {
if(zx29spi->mode == ZX29_SSP_MASTER_TYPE)
zx29_fill_txfifo(zx29spi);
zx29spi->dma_running = RX_TRANSFER;
}
}
else if (zx29spi->tx){
txdesc = txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
txdesc->callback = dma_callback;
txdesc->callback_param = zx29spi;
dmaengine_submit(txdesc);
dma_async_issue_pending(txchan);
zx29spi->dma_running = TX_TRANSFER;
}
return 0;
}
#if 0
static bool zx29_dma_filter_fn(struct dma_chan *chan, void *param)
{
dma_peripheral_id peri_id = (dma_peripheral_id) param;
#if 0
if ((chan->chan_id == (unsigned int)peri_id) && \
(strcmp(dev_name(chan->device->dev), "a1200000.dma") == 0))
return true;
chan->private = param;
return false;
#endif
if (chan->chan_id == (unsigned int)peri_id)
return true;
chan->private = param;
return false;
}
#endif
extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
static int zx29_dma_probe(struct zx29_spi *zx29spi)
{
dma_cap_mask_t mask;
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/*
* We need both RX and TX channels to do DMA, else do none
* of them.
*/
zx29spi->dma_rx_channel = dma_request_channel(mask,
zx29_dma_filter_fn,
zx29spi->master_info->dma_rx_param);
if (!zx29spi->dma_rx_channel) {
dev_dbg(&zx29spi->pdev->dev, "no RX DMA channel!\n");
dev_err(&zx29spi->pdev->dev, "no RX DMA channel!,dma_rx_param=:%d\n",zx29spi->master_info->dma_rx_param);
goto err_no_rxchan;
}
zx29spi->dma_tx_channel = dma_request_channel(mask,
zx29_dma_filter_fn,
zx29spi->master_info->dma_tx_param);
if (!zx29spi->dma_tx_channel) {
dev_dbg(&zx29spi->pdev->dev, "no TX DMA channel!\n");
dev_err(&zx29spi->pdev->dev, "no TX DMA channel!\n");
goto err_no_txchan;
}
zx29spi->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!zx29spi->dummypage) {
dev_dbg(&zx29spi->pdev->dev, "no DMA dummypage!\n");
dev_err(&zx29spi->pdev->dev, "no DMA dummypage!\n");
goto err_no_dummypage;
}
dev_info(&zx29spi->pdev->dev, "setup for DMA on RX %s, TX %s\n",
dma_chan_name(zx29spi->dma_rx_channel),
dma_chan_name(zx29spi->dma_tx_channel));
return 0;
err_no_dummypage:
dma_release_channel(zx29spi->dma_tx_channel);
err_no_txchan:
dma_release_channel(zx29spi->dma_rx_channel);
zx29spi->dma_rx_channel = NULL;
err_no_rxchan:
dev_err(&zx29spi->pdev->dev,
"Failed to work in dma mode, work without dma!\n");
dev_dbg(&zx29spi->pdev->dev,
"Failed to work in dma mode, work without dma!\n");
return -ENODEV;
}
static void terminate_dma(struct zx29_spi *zx29spi)
{
struct dma_chan *rxchan = zx29spi->dma_rx_channel;
struct dma_chan *txchan = zx29spi->dma_tx_channel;
dmaengine_terminate_all(rxchan);
dmaengine_terminate_all(txchan);
// unmap_free_dma_scatter(zx29spi);
zx29spi->dma_running = 0;
}
static void zx29_dma_remove(struct zx29_spi *zx29spi)
{
if (zx29spi->dma_running)
terminate_dma(zx29spi);
if (zx29spi->dma_tx_channel)
dma_release_channel(zx29spi->dma_tx_channel);
if (zx29spi->dma_rx_channel)
dma_release_channel(zx29spi->dma_rx_channel);
kfree(zx29spi->dummypage);
}
#endif
static irqreturn_t zx29_spi_irq(int irqno, void *dev_id)
{
struct zx29_spi *zx29spi = dev_id;
disable_irq_nosync(zx29spi->irq);
up(&zx29spi->sema_dma);
//pr_info("spi_irq %X-%X\n", zx29spi->dma_running, readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)));
return IRQ_HANDLED;
}
static void print_info_data(void * data, int len) {
int i = 0;
unsigned char *p = data;
if(p) {
for(i = 0;i <= (len-8);i+=8) {
printk("%02x %02x %02x %02x %02x %02x %02x %02x \r\n",p[i],p[i+1],p[i+2],p[i+3],p[i+4],p[i+5],p[i+6],p[i+7]);
}
printk("\n");
}
}
static int zx29_flush_rxfifo(struct zx29_spi *zx29spi,void *buf)
{
int ret = 0;
struct spi_transfer transfer;
unsigned char data[64] = {0};
uint32_t fifo_sr = 0;
uint32_t rd_max = 0;
uint32_t rx_fifo_cnt_msk= SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
transfer.tx_buf = 0;
transfer.rx_buf = data;
transfer.len = 0;
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
}else {
rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
}
while(rd_max--) {
*(u8 *) transfer.rx_buf =
readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
transfer.rx_buf++;
transfer.len++;
}
memcpy(buf,data,transfer.len);
//dev_info(&zx29spi->pdev->dev,"spi_fifo_sr = %d transfer.len=%d \n",fifo_sr,transfer.len);
//print_info_data(data,transfer.len);
return transfer.len;
}
int get_spi_rx_fifo(struct spi_device *spi,unsigned char *buf)
{
struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
int ret = 0;
if(!spi || !buf || !zx29spi)
return ret;
if(!zx29spi->zx29_flush_rxfifo)
return ret;
return zx29spi->zx29_flush_rxfifo(zx29spi,buf);
}
void set_spi_timing(struct spi_device *spi,unsigned int param)
{
struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
printk("val set before: 0x%x \n",readl((SPI_TIMING_OFFSET+zx29spi->virtbase)));
writel(param, (SPI_TIMING_OFFSET+zx29spi->virtbase));
printk("val set after: 0x%x \n",readl((SPI_TIMING_OFFSET+zx29spi->virtbase)));
}
void slave_mode_set(struct spi_device *spi,unsigned int mode)
{
unsigned int regval = 0;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)) & (~(SPI_FMT_CTRL_MASK_POL|SPI_FMT_CTRL_MASK_PHA));
printk("val set before: 0x%x \n",regval);
switch(mode){
case 0:
break;
case 1:
regval |= SPI_FMT_CTRL_MASK_PHA;
break;
case 2:
regval |= SPI_FMT_CTRL_MASK_POL;
break;
case 3:
regval |= (SPI_FMT_CTRL_MASK_POL|SPI_FMT_CTRL_MASK_PHA);
break;
default:
break;
}
writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
//while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4 & 0x1;
cpu_relax();
}
while (!regval && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000)
dev_info(&zx29spi->pdev->dev, "wait sspe timeout, slave_mode_set failed! diff_ns= 0x%x \n",diff_ns);
else
printk("val set after: 0x%x \n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)));
return;
}
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
#define SSP0_PARA_BASE_ADDR 0x1400030
#define SSP1_PARA_BASE_ADDR 0x1400048
#define SSP_MASK_SW_WRST (0x1L << 9)
#define SSP_MASK_SW_PRST (0x1L << 8)
static int zx29_slave_ctrl_reset(struct zx29_spi *zx29spi)
{
void __iomem *addr = NULL;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
volatile unsigned int val = 0;
if(!strcmp(zx29spi->pdev->name,"1410000.ssp")) {
addr = ioremap(SSP1_PARA_BASE_ADDR, 0x1000);
}else{
addr = ioremap(SSP0_PARA_BASE_ADDR, 0x1000);
}
if(addr){
val = *(volatile unsigned int *)addr;
//dev_info(&zx29spi->pdev->dev, "val = 0x%x 0x%x\n",val,(~(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST)));
*(volatile unsigned int *)addr = val & (~(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST));
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 1 & 0x1;
cpu_relax();
}while(val && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000)
dev_info(&zx29spi->pdev->dev, "zx29_slave_assert_ctrl failed!!! \n");
else
dev_info(&zx29spi->pdev->dev, "zx29_slave_assert_ctrl success! \n");
val = *(volatile unsigned int *)addr;
*(volatile unsigned int *)addr = val|(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST);
udelay(500);
iounmap(addr);
}
return 0;
}
static int zx29_slave_ctrl_reinit(struct zx29_spi *zx29spi)
{
volatile unsigned int regval;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
zx29_slave_ctrl_reset(zx29spi);
/* Disable SPI */
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
load_spi_default_config(zx29spi);
writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
if(!strcmp(zx29spi->pdev->name,"1410000.ssp")) {
regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
dev_info(&zx29spi->pdev->dev," %s set non-camera mode regval:0x%x \n",zx29spi->pdev->name,regval);
}
writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
k_time_start = ktime_get();
do{
diff_ns = ktime_sub(ktime_get(),k_time_start);
regval = ((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1);
cpu_relax();
}while((regval == 0) && diff_ns < 100000000);
if(diff_ns >= 100000000)
dev_info(&zx29spi->pdev->dev, "wait sspen timeout!!! \n");
else
dev_info(&zx29spi->pdev->dev,"ssp enabled \n",regval);
return 0;
}
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
static int zx29_slave_do_interrupt_dma_transfer(struct zx29_spi *zx29spi)
{
struct spi_transfer *transfer = zx29spi->cur_transfer;
int ret = 0;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
unsigned int fifo_sr = 0;
if((void *)transfer->tx_dma != NULL){
zx29spi->tx = (void *)transfer->tx_dma;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
if((void *)transfer->rx_dma != NULL){
zx29spi->rx = (void *)transfer->rx_dma;
zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
/*if tx is null, use rx buffer as a dummy tx buffer.*/
if((void *)transfer->tx_dma == NULL){
zx29spi->tx = (void *)transfer->rx_dma;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
}
zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
/* If we're using DMA, set up DMA here */
if (zx29spi->cur_chip->enable_dma) {
/* Configure DMA transfer */
zx29spi->trans_done = false; //yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck
ret = configure_dma(zx29spi);
if (ret) {
dev_err(&zx29spi->pdev->dev, "configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
}
if (zx29spi->cur_chip->enable_dma)
{
extern void spi_dev_send_dma_cfg_down(struct spi_device *spi);
struct spi_device *spi = zx29spi->cur_msg->spi;
spi_dev_send_dma_cfg_down(spi);
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
//down(&zx29spi->sema_dma);
ret = wait_event_freezable(zx29spi->wait, zx29spi->trans_done);
if(ret){
terminate_dma(zx29spi);
disable_irq_nosync(zx29spi->irq);
zx29spi->dma_running = 0;
zx29_slave_ctrl_reinit(zx29spi);
goto err_config_dma;
}
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
//printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
cpu_relax();
}
while (fifo_sr && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000) {
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
if(fifo_sr)
dev_info(&zx29spi->pdev->dev, "bus busy... time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
}
if (zx29spi->dma_running == (TX_TRANSFER | RX_TRANSFER)) {
u32 intr_status;
intr_status = readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase));
if (intr_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) {
terminate_dma(zx29spi);
dev_err(&zx29spi->cur_msg->spi->dev, "spi rx fifo overflow status = %X!!\n", intr_status);
ret = -EIO;
} else
disable_irq_nosync(zx29spi->irq);
}
zx29spi->dma_running = 0;
}
err_config_dma:
if(ret)
{
dev_err(&zx29spi->pdev->dev, "down_interruptible, ret=%d\n",ret);
}
return ret;
}
static int zx29_do_interrupt_dma_transfer(struct zx29_spi *zx29spi)
{
u32 irqflags = ENABLE_ALL_INTERRUPTS;
struct spi_transfer *transfer = zx29spi->cur_transfer;
int ret = 0;
static int sc_debug_info_record_cnt[4] ={0};
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
unsigned int fifo_sr = 0;
if((void *)transfer->tx_dma != NULL){
zx29spi->tx = (void *)transfer->tx_dma;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
if((void *)transfer->rx_dma != NULL){
zx29spi->rx = (void *)transfer->rx_dma;
zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
/*if tx is null, use rx buffer as a dummy tx buffer.*/
if((void *)transfer->tx_dma == NULL){
zx29spi->tx = (void *)transfer->rx_dma;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
}
zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
//printk("zx29spi->cur_chip->enable_dma= 0x%x transfer->tx_dma=0x%x transfer->rx_dma=0x%x\n",zx29spi->cur_chip->enable_dma,transfer->tx_dma,transfer->rx_dma);
/* If we're using DMA, set up DMA here */
if (zx29spi->cur_chip->enable_dma) {
/* Configure DMA transfer */
ret = configure_dma(zx29spi);
if (ret) {
dev_err(&zx29spi->pdev->dev, "configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
/* Disable interrupts in DMA mode, IRQ from DMA controller */
irqflags = DISABLE_ALL_INTERRUPTS;
}
/* config interrupts */
/* writel(irqflags, (SPI_INTR_EN_OFFSET+zx29spi->virtbase)); //spi interrupt mode is not supported. */
/* Enable SSP, turn on interrupts */
// writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
if (zx29spi->cur_chip->enable_dma)
{
ret = down_timeout(&zx29spi->sema_dma, msecs_to_jiffies(1500));
//printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
if (ret < 0) {
panic("spi transfer timeout,times(%d)\n",sc_debug_info_record_cnt[zx29spi->pdev->id]);
if(sc_debug_info_record_cnt[zx29spi->pdev->id] < 5) {
sc_debug_info_record(MODULE_ID_CAP_SPI, "%s transfer timeout:0x%x 0x%x 0x%x \n",zx29spi->pdev->name,readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),
readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)),readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)));
}
sc_debug_info_record_cnt[zx29spi->pdev->id]++;
}
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
cpu_relax();
}
while (fifo_sr && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000) {
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
if(fifo_sr)
dev_info(&zx29spi->pdev->dev, "bus busy.... time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
}
if (zx29spi->dma_running == (TX_TRANSFER | RX_TRANSFER)) {
u32 intr_status;
intr_status = readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase));
if (intr_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) {
terminate_dma(zx29spi);
dev_err(&zx29spi->cur_msg->spi->dev, "spi rx fifo overflow status = %X!!\n", intr_status);
ret = -EIO;
} else
disable_irq_nosync(zx29spi->irq);
}
zx29spi->dma_running = 0;
}
err_config_dma:
if(ret)
{
dev_err(&zx29spi->pdev->dev, "down_interruptible, ret=%d\n",ret);
}
// writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
return ret;
}
static int zx29_do_polling_transfer(struct zx29_spi *zx29spi)
{
struct spi_transfer *transfer = zx29spi->cur_transfer;
int ret = 0;
unsigned int fifo_sr = 0;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
dev_dbg(&zx29spi->pdev->dev, "polling transfer ongoing ...\n");
if (!zx29spi->tx && !zx29spi->rx) {
return ret;
}
k_time_start = ktime_get();
/*read and write*/
while ((zx29spi->tx < zx29spi->tx_end) || (zx29spi->rx < zx29spi->rx_end)) {
readwriter(zx29spi);
diff_ns = ktime_sub(ktime_get(),k_time_start);
if(diff_ns >= 1000000000) /*1s*/{
dev_info(&zx29spi->pdev->dev, "do_polling time out,diff_ns=%lld len=0x%x tx=0x%x tx_end=0x%x rx=0x%x rx_end=0x%x \n",
diff_ns,zx29spi->cur_transfer->len,zx29spi->tx,zx29spi->tx_end,zx29spi->rx,zx29spi->rx_end);
ret = -EIO;
break;
}
}
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
cpu_relax();
}while (fifo_sr && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000) {
fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
if(fifo_sr) {
dev_info(&zx29spi->pdev->dev, "bus busy.. time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
ret = -EIO;
}
}
return ret;
}
static int zx29_spi_map_mssg(struct zx29_spi *zx29spi,
struct spi_message *msg)
{
struct device *dev;
struct spi_transfer *transfer;
int ret = 0;
static int sc_debug_info_record_tx_cnt[4] ={0};
static int sc_debug_info_record_rx_cnt[4] ={0};
if(!zx29spi || !msg)
return -EFAULT;
if (msg->is_dma_mapped || !msg->spi->dma_used || !zx29spi->master_info->enable_dma) {
return 0;
}
dev = &zx29spi->pdev->dev;
/* Map until end or first fail */
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
if (/*transfer->len <= zx29spi->vendor->fifodepth ||*/ transfer->tx_dma || transfer->rx_dma )
continue;
if (transfer->tx_buf != NULL) {
transfer->tx_dma = dma_map_single(dev,(void *)transfer->tx_buf, transfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, transfer->tx_dma)) {
dev_err(dev, "dma_map_single spi Tx failed,times(%d)\n",sc_debug_info_record_tx_cnt[zx29spi->pdev->id]);
if(sc_debug_info_record_tx_cnt[zx29spi->pdev->id] < 5)
sc_debug_info_record(MODULE_ID_CAP_SPI, "%s tx_dma_map failed \n",zx29spi->pdev->name);
transfer->tx_dma = 0;
ret |= -ENOMEM;
sc_debug_info_record_tx_cnt[zx29spi->pdev->id]++;
}
}
if (transfer->rx_buf != NULL) {
transfer->rx_dma = dma_map_single(dev, transfer->rx_buf, transfer->len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, transfer->rx_dma)) {
dev_err(dev, "dma_map_single spi Rx failed,times(%d)\n",sc_debug_info_record_rx_cnt[zx29spi->pdev->id]);
if(sc_debug_info_record_rx_cnt[zx29spi->pdev->id] < 5)
sc_debug_info_record(MODULE_ID_CAP_SPI, "%s rx_dma_map failed \n",zx29spi->pdev->name);
transfer->rx_dma = 0;
ret |= -ENOMEM;
sc_debug_info_record_rx_cnt[zx29spi->pdev->id]++;
}
if (!transfer->rx_dma && transfer->tx_dma && transfer->tx_buf) {
dma_unmap_single(dev, transfer->tx_dma, transfer->len, DMA_TO_DEVICE);
transfer->tx_dma = 0;
}
}
}
return ret;
}
static void zx29_spi_unmap_mssg(struct zx29_spi *zx29spi,
struct spi_message *msg)
{
struct device *dev = &zx29spi->pdev->dev;
struct spi_transfer *transfer;
if (msg->is_dma_mapped || !msg->spi->dma_used || !zx29spi->master_info->enable_dma)
return;
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
if ( (!transfer->tx_buf && transfer->tx_dma) || (! transfer->rx_buf && transfer->rx_dma) )
continue;
if (transfer->rx_buf != NULL && transfer->rx_dma)
dma_unmap_single(dev, transfer->rx_dma, transfer->len, DMA_FROM_DEVICE);
if (transfer->tx_buf != NULL && transfer->tx_dma)
dma_unmap_single(dev, transfer->tx_dma, transfer->len, DMA_TO_DEVICE);
}
}
static int zx29_slave_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct zx29_spi *zx29spi = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
unsigned cs_change = 1;
const int nsecs = 100;
int ret = 0;
zx29spi->cur_msg = msg;
/* Setup the SPI using the per chip configuration */
zx29spi->cur_chip = spi_get_ctldata(msg->spi);
ret = zx29_spi_map_mssg(zx29spi, msg);
/* continue with polling mode */
if(ret){
dev_info(&zx29spi->pdev->dev, "ret = %d\n",ret);
goto out;
}
//restore_state(zx29spi);
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
zx29spi->cur_transfer = transfer;
if((void *)transfer->tx_buf != NULL){
zx29spi->tx = (void *)transfer->tx_buf;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
else
zx29spi->tx = zx29spi->tx_end = NULL;
if((void *)transfer->rx_buf != NULL){
zx29spi->rx = (void *)transfer->rx_buf;
zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
#if 0
/*if tx is null, use rx buffer as a dummy tx buffer.*/
if((void *)transfer->tx_buf == NULL){
zx29spi->tx = (void *)transfer->rx_buf;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
#endif
}
else
zx29spi->rx = zx29spi->rx_end = NULL;
zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
if (/*transfer->rx_buf || */transfer->rx_dma)
flush(zx29spi);
writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
if (zx29spi->cur_chip->xfer_type == POLLING_TRANSFER || (!transfer->tx_dma && !transfer->rx_dma)) {
if (zx29spi->tx < zx29spi->tx_end)
zx29spi->cur_transfer->len = writer(zx29spi);
if(zx29spi->rx < zx29spi->rx_end)
zx29spi->cur_transfer->len = reader(zx29spi);
#if defined(CONFIG_DEBUG_FS)
zx29spi->spi_poll_cnt ++;
#endif
} else {
struct chip_data *chip = zx29spi->cur_chip;
if (transfer->rx_buf || transfer->rx_dma) {
writel((chip->fifo_ctrl | (SPI_FIFO_CTRL_MASK_RX_DMA_EN | SPI_FIFO_CTRL_MASK_TX_DMA_EN)),
(SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
} else {
writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_TX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
}
ret = zx29_slave_do_interrupt_dma_transfer(zx29spi);
#if defined(CONFIG_DEBUG_FS)
zx29spi->spi_dma_cnt ++;
#endif
/* clear TX/RX DMA Enable */
writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
}
if (ret) {
pr_info("ssp:transfer error,transfer=%p\n", transfer);
break;
}
/* Update total byte transferred */
msg->actual_length += zx29spi->cur_transfer->len;
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
}
out:
zx29_spi_unmap_mssg(zx29spi, msg);
msg->status = ret;
spi_finalize_current_message(master);
#if SPI_PSM_CONTROL
zx29_spi_set_idle(&zx29spi->psm_lock);
#endif
return ret;
}
static int zx29_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct zx29_spi *zx29spi = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
unsigned cs_change = 1;
const int nsecs = 100;
int ret = 0;
ktime_t k_time_start = 0;
ktime_t diff_ns = 0;
unsigned int reg_val = 0;
pm_stay_awake(&zx29spi->pdev->dev);
//printk(KERN_INFO "ssp:in function %s \n", __FUNCTION__);
#if SPI_PSM_CONTROL
zx29_spi_set_active(&zx29spi->psm_lock);
#endif
//mutex_lock(&zx29spi->spi_lock);
//printk(KERN_INFO "ssp:lock \n");
/* Initial message state */
zx29spi->cur_msg = msg;
/* Setup the SPI using the per chip configuration */
zx29spi->cur_chip = spi_get_ctldata(msg->spi);
if ((clk_get_rate(zx29spi->spi_clk) / 2) != spi->max_speed_hz) {
clk_set_rate(zx29spi->spi_clk, spi->max_speed_hz * 2);
}
restore_state(zx29spi);
ret = zx29_spi_map_mssg(zx29spi, msg);
/* continue with polling mode */
if(ret){
dev_info(&zx29spi->pdev->dev, "ret = %d\n",ret);
goto out;
}
//while (readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK);
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
cpu_relax();
}
while (reg_val && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000) {
reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
if(reg_val) {
dev_info(&zx29spi->pdev->dev, "wait sspe back time_out diff_ns=%lld \n",diff_ns);
goto out;
}
}
writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
zx29spi->cur_transfer = transfer;
//if (transfer->bits_per_word || transfer->speed_hz)
// dev_warn(&msg->spi->dev, "ignore bits & speed setting in transfer.");
if((void *)transfer->tx_buf != NULL){
zx29spi->tx = (void *)transfer->tx_buf;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
else
zx29spi->tx = zx29spi->tx_end = NULL;
if((void *)transfer->rx_buf != NULL){
zx29spi->rx = (void *)transfer->rx_buf;
zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
/*if tx is null, use rx buffer as a dummy tx buffer.*/
if((void *)transfer->tx_buf == NULL){
zx29spi->tx = (void *)transfer->rx_buf;
zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
}
}
else
zx29spi->rx = zx29spi->rx_end = NULL;
zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
if (transfer->rx_buf || transfer->rx_dma)
flush(zx29spi);
writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
if (cs_change) {
zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_ACTIVE);
}
cs_change = transfer->cs_change;
if (zx29spi->cur_chip->xfer_type == POLLING_TRANSFER || (!transfer->tx_dma && !transfer->rx_dma)) {
ret = zx29_do_polling_transfer(zx29spi);
#if defined(CONFIG_DEBUG_FS)
zx29spi->spi_poll_cnt ++;
#endif
} else {
struct chip_data *chip = zx29spi->cur_chip;
if (transfer->rx_buf || transfer->rx_dma) {
writel((chip->fifo_ctrl | (SPI_FIFO_CTRL_MASK_RX_DMA_EN | SPI_FIFO_CTRL_MASK_TX_DMA_EN)),
(SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
} else {
writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_TX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
}
ret = zx29_do_interrupt_dma_transfer(zx29spi);
#if defined(CONFIG_DEBUG_FS)
zx29spi->spi_dma_cnt ++;
#endif
/* clear TX/RX DMA Enable */
writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
}
if (ret) {
pr_info("ssp:transfer error,transfer=%p\n", transfer);
break;
}
/* Update total byte transferred */
msg->actual_length += zx29spi->cur_transfer->len;
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
if (cs_change) {
zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_INACTIVE);
ndelay(nsecs);
}
}
if (ret || !cs_change) {
zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_INACTIVE);
}
//while (~ readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK);
k_time_start = ktime_get();
do {
diff_ns = ktime_sub(ktime_get(),k_time_start);
reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
cpu_relax();
}
while (!reg_val && diff_ns < 100000000); //100ms
if(diff_ns >= 100000000) {
reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
if(!reg_val) {
dev_info(&zx29spi->pdev->dev, "wait sspe back time_out diff_ns=%lld \n",diff_ns);
goto out;
}
}
writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
out:
zx29_spi_unmap_mssg(zx29spi, msg);
//mutex_unlock(&zx29spi->spi_lock);
//printk(KERN_INFO "ssp:unlock \n");
msg->status = ret;
spi_finalize_current_message(master);
#if SPI_PSM_CONTROL
zx29_spi_set_idle(&zx29spi->psm_lock);
#endif
pm_relax(&zx29spi->pdev->dev);
return ret;
}
/* yu.dong@20240715 [T106BUG-641] SPI packet loss problem, merged into ZXW patch start */
/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
#define SPI_SLAVE_RX_BUFF_SIZE 4096
#define SPI_SLAVE_RX_MAX_PACK_NUM 15
#define SPI_SLAVE_RX_PACK_LEN 146
#define SPI_SLAVE_RX_LIST_BUFF_LEN (SPI_SLAVE_RX_MAX_PACK_NUM*SPI_SLAVE_RX_PACK_LEN)
static dma_channel_def slave_rx_conf[SPI_SLAVE_RX_MAX_PACK_NUM] = {0};
//yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss.
#define SPI_MAGIC 0x55555555
static bool rxbuf_is_free_space(struct spi_device *spi)
{
if (spi->recv_pos < spi->rd_pos) {
if ((spi->rd_pos - spi->recv_pos) > SPI_SLAVE_RX_PACK_LEN)
return 1;
else
return 0;
}
else {
if ((SPI_SLAVE_RX_BUFF_SIZE - spi->recv_pos + spi->rd_pos ) > SPI_SLAVE_RX_PACK_LEN)
return 1;
else
return 0;
}
}
/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss start */
static void dma_cyclic_callback(void *data)
{
struct spi_device *spi = (struct spi_device *)data;
struct zx29_spi *zx29spi = NULL;
int index = 0;
unsigned int end = 0;
zx29spi = spi_master_get_devdata(spi->master);
zx29spi->spi_poll_cnt++;
end = *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLAVE_RX_PACK_LEN + SPI_SLAVE_RX_PACK_LEN - 4);
while((end != SPI_MAGIC) && index < SPI_SLAVE_RX_MAX_PACK_NUM) {
if(!rxbuf_is_free_space(spi)) {
printk("rx_buff not enough space!!!!!");
zx29spi->spi_dma_cnt++;
break;
}else {
if((spi->recv_pos + SPI_SLAVE_RX_PACK_LEN) <= SPI_SLAVE_RX_BUFF_SIZE) {
memcpy(spi->rx_buf + spi->recv_pos,spi->cyc_buf + spi->cyc_index * SPI_SLAVE_RX_PACK_LEN,SPI_SLAVE_RX_PACK_LEN);
}else {
memcpy(spi->rx_buf + spi->recv_pos,spi->cyc_buf + spi->cyc_index * SPI_SLAVE_RX_PACK_LEN,SPI_SLAVE_RX_BUFF_SIZE - spi->recv_pos);
memcpy(spi->rx_buf,spi->cyc_buf + spi->cyc_index * SPI_SLAVE_RX_PACK_LEN + (SPI_SLAVE_RX_BUFF_SIZE - spi->recv_pos),SPI_SLAVE_RX_PACK_LEN-(SPI_SLAVE_RX_BUFF_SIZE-spi->recv_pos));
}
*(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLAVE_RX_PACK_LEN + SPI_SLAVE_RX_PACK_LEN - 4) = SPI_MAGIC;
spi->recv_pos = (spi->recv_pos + SPI_SLAVE_RX_PACK_LEN)%SPI_SLAVE_RX_BUFF_SIZE;
spi->cyc_index = (spi->cyc_index + 1)%SPI_SLAVE_RX_MAX_PACK_NUM;
zx29spi->spi_dma_cnt++;
index++;
end = *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLAVE_RX_PACK_LEN + SPI_SLAVE_RX_PACK_LEN - 4);
}
if(spi->is_rd_waiting == true && spi->recv_done == 0) {
wake_up(&spi->rd_wait);
spi->recv_done = 1;
}
}
if((end != SPI_MAGIC) && index == SPI_SLAVE_RX_MAX_PACK_NUM)
printk("cyc_buf be covered!!!!!");
return;
}
/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss end */
static int zx29_slave_config_dma(struct zx29_spi *zx29spi,struct spi_device *spi)
{
struct chip_data *chip = NULL;
struct dma_chan *rxchan = NULL;
struct dma_async_tx_descriptor *rxdesc;
unsigned short transfer_len = SPI_SLAVE_RX_PACK_LEN;
int i;
chip = zx29spi->cur_chip = spi->controller_state;
if (spi->rx_dma)
flush(zx29spi);
writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_RX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
rxchan = zx29spi->dma_rx_channel;
/* Check that the channels are available */
if (!rxchan)
return -ENODEV;
/*
* If supplied, the DMA burstsize should equal the FIFO trigger level.
* Notice that the DMA engine uses one-to-one mapping. Since we can
* not trigger on 2 elements this needs explicit mapping rather than
* calculation.
*/
for(i = 0;i < SPI_SLAVE_RX_MAX_PACK_NUM;i++) {
switch (zx29spi->rx_lev_trig) {
case SPI_RX_1_OR_MORE_ELEM:
slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_1;
slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_1;
break;
case SPI_RX_4_OR_MORE_ELEM:
slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
break;
case SPI_RX_8_OR_MORE_ELEM:
slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_8;
slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_8;
break;
case SPI_RX_16_OR_MORE_ELEM:
slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_16;
slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_16;
break;
case SPI_RX_32_OR_MORE_ELEM:
slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_ALL;
slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
break;
default:
slave_rx_conf[i].dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
slave_rx_conf[i].dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
break;
}
switch (zx29spi->read) {
case READING_NULL:
/* Use the same as for writing */
slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
slave_rx_conf[i].count = transfer_len;
break;
case READING_U8:
slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
slave_rx_conf[i].count = transfer_len;
break;
case READING_U16:
slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
slave_rx_conf[i].count = transfer_len;
break;
case READING_U32:
slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
slave_rx_conf[i].count = transfer_len;
break;
}
slave_rx_conf[i].src_addr = (SPI_DR_OFFSET+zx29spi->phybase);
slave_rx_conf[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
slave_rx_conf[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
slave_rx_conf[i].dest_addr = (unsigned int)spi->rx_dma + transfer_len*i;
slave_rx_conf[i].link_addr = 1;
//yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss.
*(volatile unsigned int *)(spi->cyc_buf + transfer_len*i + transfer_len -4) = SPI_MAGIC;
}
dmaengine_slave_config(rxchan,(struct dma_slave_config*)&slave_rx_conf[0]);
/* Submit and fire RX and TX with TX last so we're ready to read! */
if (spi->rx_dma) {
rxdesc = rxchan->device->device_prep_dma_cyclic(rxchan,NULL,SPI_SLAVE_RX_MAX_PACK_NUM * SPI_SLAVE_RX_PACK_LEN, SPI_SLAVE_RX_PACK_LEN,0,0);
if (!rxdesc) {
printk(KERN_INFO "!!ERROR DESC !!![%s][%d]\n",__func__,__LINE__);
dmaengine_terminate_all(rxchan);
return -EBUSY;
}
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_cyclic_callback;
rxdesc->callback_param = spi;
dmaengine_submit(rxdesc);
dma_async_issue_pending(rxchan);
zx29spi->dma_running = RX_TRANSFER;
}
return 0;
}
static int zx29_slave_rd_start(struct spi_device *spi)
{
struct zx29_spi *zx29spi = NULL;
struct device *dev;
int status = 0;
static int wd_wait_queue_init = 0;
printk("zx29_slave_rd_start...\r\n");
zx29spi = spi_master_get_devdata(spi->master);
if (!zx29spi)
return -EINVAL;
dev = &zx29spi->pdev->dev;
spi->cyc_index = 0;
spi->rd_pos = spi->recv_pos = 0;
spi->cyc_buf = dma_alloc_coherent(dev, SPI_SLAVE_RX_BUFF_SIZE, &spi->rx_dma, GFP_KERNEL);
if (dma_mapping_error(dev, spi->rx_dma)) {
dev_err(dev, "dma_map_single spi rx failed\n");
return -ENOMEM;
}
if(wd_wait_queue_init == 0) {
init_waitqueue_head(&spi->rd_wait);
spi->recv_done = false;
spi->is_rd_waiting = false;
wd_wait_queue_init = 1;
}
status = zx29_slave_config_dma(zx29spi,spi);
return status;
}
static int zx29_slave_rd_stop(struct spi_device *spi)
{
struct zx29_spi *zx29spi = NULL;
struct device *dev;
int status = 0;
struct chip_data *chip = NULL;
struct dma_chan *rxchan = NULL;
zx29spi = spi_master_get_devdata(spi->master);
if (!zx29spi)
return -EINVAL;
dev = &zx29spi->pdev->dev;
chip = zx29spi->cur_chip= spi->controller_state;
writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
rxchan = zx29spi->dma_rx_channel;
/* Submit and fire RX and TX with TX last so we're ready to read! */
if(spi->rx_dma) {
dmaengine_terminate_all(rxchan);
zx29spi->dma_running = 0;
}
if(spi->cyc_buf != NULL && spi->rx_dma) {
dma_free_coherent(dev, SPI_SLAVE_RX_BUFF_SIZE, spi->cyc_buf, spi->rx_dma);
spi->cyc_buf = NULL;
}
spi->cyc_index = 0;
spi->rd_pos = spi->recv_pos = 0;
spi->recv_done = false;
spi->is_rd_waiting = false;
printk("zx29_slave_rd_stop...\r\n");
return status;
}
/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
/* yu.dong@20240715 [T106BUG-641] SPI packet loss problem, merged into ZXW patch end*/
static int zx29_prepare_transfer_hardware(struct spi_master *master)
{
return 0;
}
static int zx29_unprepare_transfer_hardware(struct spi_master *master)
{
//struct zx29_spi *zx29spi = spi_master_get_devdata(master);
//dev_warn(&zx29spi->pdev->dev,"in function %s\n", __FUNCTION__);
/* nothing more to do - disable spi/ssp and power off */
//writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
return 0;
}
static int verify_controller_parameters(struct zx29_spi *zx29spi,
struct spi_config_chip const *chip_info)
{
if ((chip_info->iface < SPI_INTERFACE_MOTOROLA_SPI)
|| (chip_info->iface > SPI_INTERFACE_ISI_SPI)) {
dev_err(&zx29spi->pdev->dev,
"interface is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->hierarchy != SPI_MASTER)
&& (chip_info->hierarchy != SPI_SLAVE)) {
dev_err(&zx29spi->pdev->dev,
"hierarchy is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->com_mode != INTERRUPT_TRANSFER)
&& (chip_info->com_mode != DMA_TRANSFER)
&& (chip_info->com_mode != POLLING_TRANSFER)) {
dev_err(&zx29spi->pdev->dev,
"Communication mode is configured incorrectly\n");
return -EINVAL;
}
switch (chip_info->rx_lev_trig) {
case SPI_RX_1_OR_MORE_ELEM:
case SPI_RX_4_OR_MORE_ELEM:
case SPI_RX_8_OR_MORE_ELEM:
/* These are always OK, all variants can handle this */
break;
case SPI_RX_16_OR_MORE_ELEM:
if (zx29spi->vendor->fifodepth < 16) {
dev_err(&zx29spi->pdev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
case SPI_RX_32_OR_MORE_ELEM:
if (zx29spi->vendor->fifodepth < 32) {
dev_err(&zx29spi->pdev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
default:
dev_err(&zx29spi->pdev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
break;
}
switch (chip_info->tx_lev_trig) {
case SPI_TX_1_OR_MORE_EMPTY_LOC:
case SPI_TX_4_OR_MORE_EMPTY_LOC:
case SPI_TX_8_OR_MORE_EMPTY_LOC:
/* These are always OK, all variants can handle this */
break;
case SPI_TX_16_OR_MORE_EMPTY_LOC:
if (zx29spi->vendor->fifodepth < 16) {
dev_err(&zx29spi->pdev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
case SPI_TX_32_OR_MORE_EMPTY_LOC:
if (zx29spi->vendor->fifodepth < 32) {
dev_err(&zx29spi->pdev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
default:
dev_err(&zx29spi->pdev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
break;
}
return 0;
}
static struct vendor_data vendor_arm = {
.fifodepth = 16,
.max_bpw = 32,
.loopback = true,
};
/*
* A piece of default chip info unless the platform
* supplies it.
*/
static const struct spi_config_chip spi_default_chip_info = {
.com_mode = DMA_TRANSFER,//INTERRUPT_TRANSFER,//POLLING_TRANSFER,
.iface = SPI_INTERFACE_MOTOROLA_SPI,
.hierarchy = SPI_MASTER,
.slave_tx_disable = DO_NOT_DRIVE_TX,
.rx_lev_trig = SPI_RX_8_OR_MORE_ELEM,
.tx_lev_trig = SPI_TX_4_OR_MORE_EMPTY_LOC,
// .ctrl_len = SSP_BITS_8,
// .wait_state = SSP_MWIRE_WAIT_ZERO,
// .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
// .cs_control = default_cs_control,
};
/*
*/
static void spi_set_gpio_function(int dev_id)
{
if (pinctrl_select_state(ssp_pins[dev_id].pctrl, ssp_pins[dev_id].pfunc) < 0) {
printk("spi%d setting spi pin ctrl failed\n",dev_id);
}
return;
}
static void spi_set_gpio_gpio(int dev_id)
{
if (pinctrl_select_state(ssp_pins[dev_id].pctrl, ssp_pins[dev_id].pgpio) < 0) {
printk("spi%d setting spi pin ctrl failed\n",dev_id);
}
return;
}
static void spi_set_gpio_val(int gpio_num, int val)
{
//zx29_gpio_output_data(gpio_num, val);
gpio_set_value(gpio_num,val);
}
static int spi_get_gpio_val(int gpio_num)
{
//zx29_gpio_set_direction(gpio,GPIO_IN);
return gpio_get_value(gpio_num);
}
static void spi_time_delay(int delay/*us*/)
{
udelay(delay);
}
void spi_fun_mode_stop(int dev_id)
{
spi_set_gpio_gpio(dev_id);
}
void spi_gpio_mode_start(int dev_id)
{
//mutex_lock(&g_zx29_spi->spi_lock); //spi control function mutex.
/* set clk tx rx cs to gpio */
//spi_set_gpio_gpio(dev_id);
gpio_direction_output(ssp_pins[dev_id].gpio_cs,SPI_GPIO_HIGH);
gpio_direction_output(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
gpio_direction_output(ssp_pins[dev_id].gpio_tx,0);//value ?
gpio_direction_input(ssp_pins[dev_id].gpio_rx);
return ;
}
EXPORT_SYMBOL(spi_gpio_mode_start);
void spi_gpio_mode_stop(int dev_id)
{
/* set clk tx rx cs to function */
spi_set_gpio_function(dev_id);
//mutex_unlock(&g_zx29_spi->spi_lock); //spi control function mutex.
}
EXPORT_SYMBOL(spi_gpio_mode_stop);
void spi_gpio_write_single8(int dev_id,unsigned char data)
{
int i;
//printk("spi_gpio_write_single8 %x\n", data);
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_LOW);/* CS invail*/
for( i=7; i>=0; i-- )
{
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
if ((data >> i) & 0x1)
{
spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_HIGH);
}
else
{
spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
}
spi_time_delay(1);
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
spi_time_delay(1);
}
spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_HIGH);
spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
}
EXPORT_SYMBOL(spi_gpio_write_single8);
/*******************************************************************************
* Function:
* Description:
* Parameters:
* Input:
*
* Output:
*
* Returns:
*
*
* Others:
********************************************************************************/
unsigned char spi_gpio_read_single8(int dev_id)
{
int i;
unsigned char readData = 0;
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_LOW);/* CS */
for( i=7; i>=0; i-- )
{
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
spi_time_delay(1);
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
if( spi_get_gpio_val(ssp_pins[dev_id].gpio_rx) )/* lcd tx rx */
{
readData |= (1 << i);
}
spi_time_delay(1);
}
spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_HIGH);
//printk("spi_gpio_read_single8 %x\n", readData);
return readData;
}
EXPORT_SYMBOL(spi_gpio_read_single8);
/**
* @brief spi gpio mode, cs control
*
* This function used for lcd 3-wires spi mode.
* before cs pull down, spi pads will change to gpio mode.
* after cs pull high, spi pads gpio mode recovery to spi mode.
*
* @param level 0: cs line pull down, no-zero: cs line pull up.
*
* @retval none
*/
void spi_gpio_3wire_cs(int dev_id,unsigned char level)
{
if(level){
spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,SPI_GPIO_HIGH);
gpio_direction_input(ssp_pins[dev_id].gpio_tx);
/* zx29_gpio_function_sel(GPIO_AP_SPI0_CS, GPIO_AP_SPI0_CS_FUN); */
//zx29_gpio_function_sel(GPIO_AP_SPI0_CLK, GPIO_AP_SPI0_CLK_FUN);
//zx29_gpio_function_sel(GPIO_AP_SPI0_TXD, GPIO_AP_SPI0_TXD_FUN);
//mutex_unlock(&g_zx29_spi->spi_lock); //spi control function mutex.
}
else{
//mutex_lock(&g_zx29_spi->spi_lock);
/* zx29_gpio_function_sel(GPIO_AP_SPI0_CS, GPIO_AP_CS_GPIO_FUN); */
//zx29_gpio_function_sel(GPIO_AP_SPI0_CLK, GPIO_AP_CLK_GPIO_FUN);
//zx29_gpio_function_sel(GPIO_AP_SPI0_TXD, GPIO_AP_TXD_GPIO_FUN);
gpio_direction_output(ssp_pins[dev_id].gpio_cs,SPI_GPIO_LOW);
gpio_direction_output(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
gpio_direction_output(ssp_pins[dev_id].gpio_tx,SPI_GPIO_LOW);
spi_set_gpio_val(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,SPI_GPIO_LOW);
}
}
EXPORT_SYMBOL(spi_gpio_3wire_cs);
/**
* @brief spi gpio mode, one byte write.
*
* This function used for lcd 3-wires spi mode.
* txd line used tx function and rx function at different time.
*
* @param reg one byte write data.
*
* @retval none
*/
void spi_gpio_3wire_write8(int dev_id,unsigned char reg)
{
int i;
//unsigned char readData = 0;
//write
spi_time_delay(50);
for (i = 0; i < 8; i++)
{
gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
spi_time_delay(50);
if ((reg & 0x80)==0x80)
{
gpio_set_value(ssp_pins[dev_id].gpio_tx, SPI_GPIO_HIGH);
}
else
{
gpio_set_value(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
}
spi_time_delay(50);
gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
spi_time_delay(50);
reg <<= 1;
}
//spi_time_delay(50);
}
EXPORT_SYMBOL(spi_gpio_3wire_write8);
/**
* @brief spi gpio mode, one byte read.
*
* This function used for lcd 3-wires spi mode.
* txd line used tx function and rx function at different time.
*
* @param none.
*
* @retval one byte readed data.
*/
unsigned char spi_gpio_3wire_read8(int dev_id)
{
int i;
unsigned char readData = 0;
//read
gpio_direction_input(ssp_pins[dev_id].gpio_tx);
spi_time_delay(50);
readData = 0;
for (i = 0; i < 8; i++)
{
readData <<= 1;
gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
spi_time_delay(50);
if (SPI_GPIO_HIGH == gpio_get_value(ssp_pins[dev_id].gpio_tx))
{
readData |= 0x01;
}
gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
spi_time_delay(50);
}
//spi_time_delay(50);
//printk("spi_gpio_read_single8 %x\n", readData);
return readData;
}
EXPORT_SYMBOL(spi_gpio_3wire_read8);
static void zx29_setup_to_regs(struct chip_data *chip,struct zx29_spi *zx29spi)
{
unsigned int regval = 0;
ktime_t k_time_start = 0;
ktime_t k_time_end = 0;
ktime_t diff_ns = 0;
/* yu.dong@20240715 [T106BUG-641] SPI packet loss problem, merged into ZXW patch start */
/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
if(false == zx29spi->master->slave)
pm_stay_awake(&zx29spi->pdev->dev);
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
writel(chip->fmt_ctrl, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
//writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
writel(chip->com_ctrl, (SPI_COM_CTRL_OFFSET + zx29spi->virtbase));
//writel(chip->timing, (SPI_TIMING_OFFSET + zx29spi->virtbase));
writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
//while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
k_time_start = ktime_get();
do {
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
diff_ns = ktime_sub(ktime_get(),k_time_start);
cpu_relax();
}
while (!regval && diff_ns < 10000000);
if(diff_ns >= 10000000) {
dev_info(&zx29spi->pdev->dev, " zx29_setup_to_regs failed! diff_ns=%lld \n",diff_ns);
}
if(false == zx29spi->master->slave)
pm_relax(&zx29spi->pdev->dev);
/* yu.dong@20240715 [T106BUG-641] SPI packet loss problem, merged into ZXW patch end */
}
/**
* zx29_setup - setup function registered to SPI master framework
* @spi: spi device which is requesting setup
*
* This function is registered to the SPI framework for this SPI master
* controller. If it is the first time when setup is called by this device,
* this function will initialize the runtime state for this chip and save
* the same in the device structure. Else it will update the runtime info
* with the updated chip info. Nothing is really being written to the
* controller hardware here, that is not done until the actual transfer
* commence.
*/
static int zx29_setup(struct spi_device *spi)
{
struct spi_config_chip const *chip_info;
struct chip_data *chip;
unsigned speed_hz;
int status = 0;
struct zx29_spi *zx29spi = NULL;
unsigned int bits =0;
u8 iface = 0;
u32 tmp;
if (!spi)
return -EINVAL;
bits = spi->bits_per_word;
zx29spi = spi_master_get_devdata(spi->master);
if (!zx29spi)
return -EINVAL;
iface = zx29spi->iface_mode;
/* Get controller_state if one is supplied */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip) {
dev_err(&spi->dev, "cannot allocate controller state\n");
return -ENOMEM;
}
dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n");
}
/* Get controller data if one is supplied */
chip_info = spi->controller_data;
if (chip_info == NULL) {
chip_info = &spi_default_chip_info;
/* spi_board_info.controller_data not is supplied */
dev_dbg(&spi->dev, "using default controller_data settings\n");
} else
dev_dbg(&spi->dev, "using user supplied controller_data settings\n");
/*
* We can override with custom divisors, else we use the board
* frequency setting
*/
/* set spi clock source at 104MHz/1 */
//writel(chip ->clk_div-1, M0_SSP_CLKDIV_REG_VA);
speed_hz = spi->max_speed_hz;
// clk_set_rate(zx29spi->spi_clk, speed_hz * 2); /* f(ssp_clk) = 2*f(ssp_sclk_out) */
spi->max_speed_hz = clk_round_rate(zx29spi->spi_clk, speed_hz * 2) / 2;
if (spi->max_speed_hz != speed_hz)
dev_dbg(&spi->dev, "round speed %dHz differs from requested %dHz.", spi->max_speed_hz, speed_hz);
status = verify_controller_parameters(zx29spi, chip_info);
if (status) {
dev_err(&spi->dev, "controller data is incorrect");
goto err_config_params;
}
zx29spi->rx_lev_trig = chip_info->rx_lev_trig;
zx29spi->tx_lev_trig = chip_info->tx_lev_trig;
/* Now set controller state based on controller data */
//chip->xfer_type = chip_info->com_mode;
chip->xfer_type = spi->dma_used ? DMA_TRANSFER : POLLING_TRANSFER;
dev_dbg(&spi->dev, "chip->xfer_type = 0x%x \n",chip->xfer_type);
if (!chip_info->cs_control) {
chip->cs_control = default_cs_control;
if (spi->master->num_chipselect != 1)
dev_err(&spi->dev, "chip select function is NULL!\n");
} else
chip->cs_control = chip_info->cs_control;
/* Check bits per word with vendor specific range */
if ((bits <= 3) || (bits > zx29spi->vendor->max_bpw)) {
status = -ENOTSUPP;
dev_err(&spi->dev, "illegal data size for this controller!\n");
dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
zx29spi->vendor->max_bpw);
goto err_config_params;
} else if (bits <= 8) {
dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
chip->n_bytes = 1;
chip->read = READING_U8;
chip->write = WRITING_U8;
} else if (bits <= 16) {
dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
chip->n_bytes = 2;
chip->read = READING_U16;
chip->write = WRITING_U16;
} else {
dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
chip->n_bytes = 4;
chip->read = READING_U32;
chip->write = WRITING_U32;
}
/* Now Initialize all register settings required for this chip */
chip->com_ctrl = 0;
chip->fmt_ctrl = 0;
chip->fifo_ctrl = 0;
chip->timing = 0;
if ((chip->xfer_type == DMA_TRANSFER)
&& ((zx29spi->master_info)->enable_dma)) {
chip->enable_dma = true;
dev_dbg(&spi->dev, "DMA mode set in controller state\n");
} else {
chip->enable_dma = false;
dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
}
SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
if (zx29spi->rx_lev_trig == SPI_RX_8_OR_MORE_ELEM)
SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
else
SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
if (zx29spi->tx_lev_trig == SPI_TX_8_OR_MORE_EMPTY_LOC)
SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
else
SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
SPI_WRITE_BITS(chip->fmt_ctrl, bits - 1, SPI_FMT_CTRL_MASK_DSS, 4);
SPI_WRITE_BITS(chip->fmt_ctrl, chip_info->iface, SPI_FMT_CTRL_MASK_FRF, 0);
if((iface== SPI_TI_FORMAT)||(iface== SPI_ISI_FORMAT)){
printk("qhf %s set iface = %d\n",__func__,iface);
SPI_WRITE_BITS(chip->fmt_ctrl, iface, SPI_FMT_CTRL_MASK_FRF, 0);
}
/* Stuff that is common for all versions */
if (spi->mode & SPI_CPOL)
tmp = SPI_CLK_POL_IDLE_HIGH;
else
tmp = SPI_CLK_POL_IDLE_LOW;
SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_POL, 2);
if (spi->mode & SPI_CPHA)
tmp = SPI_CLK_SECOND_EDGE;
else
tmp = SPI_CLK_FIRST_EDGE;
SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_PHA, 3);
/* Loopback is available on all versions except PL023 */
if (zx29spi->vendor->loopback) {
if (spi->mode & SPI_LOOP)
tmp = LOOPBACK_ENABLED;
else
tmp = LOOPBACK_DISABLED;
SPI_WRITE_BITS(chip->com_ctrl, tmp, SPI_COM_CTRL_MASK_LBM, 0);
}
// SPI_WRITE_BITS(chip->com_ctrl, SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
SPI_WRITE_BITS(chip->com_ctrl, chip_info->hierarchy, SPI_COM_CTRL_MASK_MS, 2);
// SPI_WRITE_BITS(chip->com_ctrl, chip_info->slave_tx_disable, SPI_COM_CTRL_MASK_SOD, 3);
if(spi->trans_gaped) {
chip->enable_trans_gap = true;
}
SPI_WRITE_BITS(chip->timing, spi->trans_gap_num, SPI_TIMING_MASK_T_CS_DESEL, 0);
/* Save controller_state */
spi_set_ctldata(spi, chip);
if(zx29spi->mode == ZX29_SSP_SLAVE_TYPE) {
SPI_WRITE_BITS(chip->com_ctrl, SPI_SLAVE_MODE, SPI_COM_CTRL_MASK_MS, 2);
zx29_setup_to_regs(chip,zx29spi);
}
if(zx29spi->mode == ZX29_SSP_MASTER_TYPE) {
if(spi->setup_immediately == 1)
zx29_setup_to_regs(chip,zx29spi);
}
return status;
err_config_params:
spi_set_ctldata(spi, NULL);
kfree(chip);
return status;
}
/**
* zx29_cleanup - cleanup function registered to SPI master framework
* @spi: spi device which is requesting cleanup
*
* This function is registered to the SPI framework for this SPI master
* controller. It will free the runtime state of chip.
*/
static void zx29_cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
spi_set_ctldata(spi, NULL);
kfree(chip);
}
static int zx29_spi_clock_init(struct zx29_spi *zx29spi)
{
int status = 0;
struct platform_device *pdev = zx29spi->pdev;
/* work clock */
zx29spi->spi_clk = devm_clk_get(&pdev->dev, "work_clk");
if (IS_ERR(zx29spi->spi_clk)) {
status = PTR_ERR(zx29spi->spi_clk);
dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
return status;
}
/* enable spiclk at function zx29_setup */
if (device_property_read_u32(&pdev->dev, "clock-frequency", &zx29spi->clkfreq))
zx29spi->clkfreq = SPI_SPICLK_FREQ_26M;
status = clk_set_rate(zx29spi->spi_clk, zx29spi->clkfreq);
if(status) {
dev_err(&pdev->dev,"clc_set_rate err status=%d \n",status);
return status;
}
/* enable ssp clock source */
clk_prepare_enable(zx29spi->spi_clk);
/* apb clock */
zx29spi->pclk = devm_clk_get(&pdev->dev, "apb_clk");
if (IS_ERR(zx29spi->pclk)) {
status = PTR_ERR(zx29spi->pclk);
dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
return status;
}
clk_prepare_enable(zx29spi->pclk);
return status;
}
static void spicc_clkgate_ctrl(struct zx29_spi *zx29spi,unsigned char is_enable)
{
if (is_enable) {
clk_enable(zx29spi->spi_clk);
clk_enable(zx29spi->pclk);
} else {
clk_disable(zx29spi->spi_clk);
clk_disable(zx29spi->pclk);
}
}
static int zx29_spi_slave_clock_init(struct zx29_spi *zx29spi)
{
int status=0;
struct platform_device *pdev = zx29spi->pdev;
/* work clock */
zx29spi->spi_clk = devm_clk_get(&pdev->dev, "work_clk");
if (IS_ERR(zx29spi->spi_clk)) {
status = PTR_ERR(zx29spi->spi_clk);
dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
return status;
}
/* enable spiclk at function zx29_setup */
//if (device_property_read_u32(&pdev->dev, "clock-frequency", &zx29spi->clkfreq))
zx29spi->clkfreq = SPI_SPICLK_FREQ_156M; /*salve */
status = clk_set_rate(zx29spi->spi_clk, zx29spi->clkfreq);
if(status) {
dev_err(&pdev->dev,"clc_set_rate err status=%d \n",status);
return status;
}
/* enable ssp clock source */
clk_prepare_enable(zx29spi->spi_clk);
/* apb clock */
zx29spi->pclk = devm_clk_get(&pdev->dev, "apb_clk");
if (IS_ERR(zx29spi->pclk)) {
status = PTR_ERR(zx29spi->pclk);
dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
return status;
}
clk_prepare_enable(zx29spi->pclk);
spicc_clkgate_ctrl(zx29spi,true);
return status;
}
static int zx29_spi_init_pinctrl(struct platform_device *pdev)
{
struct pinctrl *pctrl;
enum of_gpio_flags flags;
int ret;
struct zx29_spi *zx29spi = NULL;
if(!pdev) {
printk("pdev not exist \n");
return -1;
}
zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
if (IS_ERR(zx29spi)) {
dev_warn(&pdev->dev, "Failed to get zx29->ssp%d pins",pdev->id);
pctrl = NULL;
return 0;
}
pctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(pctrl)) {
dev_warn(&pdev->dev, "Failed to get zx29->ssp%d pins",pdev->id);
pctrl = NULL;
return 0;
}
ssp_pins[pdev->id].pctrl=pctrl;
ssp_pins[pdev->id].pcs_gpio_active = pinctrl_lookup_state(pctrl, "cs_gpio_active");
if (IS_ERR(ssp_pins[pdev->id].pcs_gpio_active)) {
dev_err(&pdev->dev, "missing cs_gpio_active \n");
}
ssp_pins[pdev->id].pcs_gpio_sleep = pinctrl_lookup_state(pctrl, "cs_gpio_sleep");
if (IS_ERR(ssp_pins[pdev->id].pcs_gpio_sleep)) {
dev_err(&pdev->dev, "missing cs_gpio_sleep \n");
}
ssp_pins[pdev->id].pcs_func = pinctrl_lookup_state(ssp_pins[pdev->id].pctrl, "cs_func");
if (IS_ERR(ssp_pins[pdev->id].pcs_func)) {
dev_err(&pdev->dev, "missing cs_func \n");
}
if(zx29spi->master->slave == false) {
if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_active) < 0) {
printk("spi%d setting cs_gpio pin ctrl failed\n",pdev->id);
}
}else {
if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_func) < 0) {
printk("spi%d setting cs_func pin ctrl failed\n",pdev->id);
}
}
ssp_pins[pdev->id].gpio_cs = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
if (!gpio_is_valid(ssp_pins[pdev->id].gpio_cs)) {
pr_info("ssp%d gpio_cs no found\n",pdev->id);
}
ssp_pins[pdev->id].gpio_clk = of_get_gpio_flags(pdev->dev.of_node, 1, &flags);
if (!gpio_is_valid(ssp_pins[pdev->id].gpio_clk)) {
pr_info("ssp%d gpio_clk no found\n",pdev->id);
}
ssp_pins[pdev->id].gpio_tx = of_get_gpio_flags(pdev->dev.of_node, 2, &flags);
if (!gpio_is_valid(ssp_pins[pdev->id].gpio_tx)) {
pr_info("ssp%d gpio_tx no found\n",pdev->id);
}
ssp_pins[pdev->id].gpio_rx = of_get_gpio_flags(pdev->dev.of_node, 3, &flags);
if (!gpio_is_valid(ssp_pins[pdev->id].gpio_rx)) {
pr_info("ssp%d gpio_rx no found\n",pdev->id);
}
if(zx29spi->master->slave == false)
gpio_direction_output(ssp_pins[pdev->id].gpio_cs,SPI_GPIO_HIGH);
return 0;
}
static void zx29_spi_get_platformInfo(struct platform_device *pdev,struct zx29_spi_controller *platform_info)
{
struct device *dev=&pdev->dev;
u32 dma_tx,dma_rx,enable_dma;
if (device_property_read_u16(dev, "bus_id", &platform_info->bus_id)) {
platform_info->bus_id = pdev->id;
}
if (device_property_read_u8(dev, "num_chipselect", &platform_info->num_chipselect)) {
platform_info->num_chipselect = 1;
}
#if 0
if (device_property_read_u32(dev, "enable_dma",&enable_dma)) {
dev_err(&pdev->dev,"enable_dma get failed");
platform_info->enable_dma = 0;
}
else {
platform_info->enable_dma = enable_dma;
}
#endif
if (device_property_read_u32(dev, "autosuspend_delay", &platform_info->autosuspend_delay))
platform_info->autosuspend_delay = 0;
if(device_property_read_u32(dev, "dma_rx", &dma_rx)){
dev_err(&pdev->dev,"dma_rx get failed");
}
platform_info->dma_rx_param = (void*)dma_rx;
device_property_read_u32(dev, "dma_tx", &dma_tx);
platform_info->dma_tx_param = (void*)dma_tx;
dev_dbg(&pdev->dev,"get dma_rx=0x%x dma_tx=0x%x enable_dma=0x%x",dma_rx,dma_tx,platform_info->enable_dma);
}
#if defined(CONFIG_DEBUG_FS)
#define dump_register(reg) \
{ \
.name = __stringify(reg), \
.offset = SPI_ ##reg##_OFFSET, \
}
static const struct debugfs_reg32 spi_regs[] = {
dump_register(VER_REG),
dump_register(COM_CTRL),
dump_register(FMT_CTRL),
dump_register(DR),
dump_register(FIFO_CTRL),
dump_register(FIFO_SR),
dump_register(INTR_EN),
dump_register(INTR_SR),
dump_register(TIMING),
};
//#define Strcat(x, fmt, ...) sprintf(x, "%s" #fmt, x, __VA_ARGS__)
static void debugfs_spi_init(struct zx29_spi *zx29spi)
{
struct dentry *root;
struct dentry *node;
char tmp[32];
if(!zx29spi)
return;
//create root
sprintf(tmp,"spi%d_zx29", zx29spi->pdev->id);
root = debugfs_create_dir(tmp, NULL);
if (!root) {
dev_err(&zx29spi->pdev->dev, "debugfs_create_dir %s err\n", tmp);
goto err;
}
//create regs
zx29spi->spi_regset.regs = (struct debugfs_reg32 *)spi_regs;
zx29spi->spi_regset.nregs = sizeof(spi_regs)/sizeof(struct debugfs_reg32);
zx29spi->spi_regset.base = zx29spi->virtbase;
debugfs_create_regset32("spi_regs", S_IRUGO, root, &zx29spi->spi_regset);
//create info
debugfs_create_u32("poll_cnt", S_IRUGO, root, &zx29spi->spi_poll_cnt);
debugfs_create_u32("dma_cnt", S_IRUGO, root, &zx29spi->spi_dma_cnt);
zx29spi->spi_root = (void *)root;
return;
err:
dev_err(&zx29spi->pdev->dev, "debugfs_spi_init err\n");
}
#endif
static int zx29_spi_init_irq(struct platform_device *pdev, struct zx29_spi *zx29spi)
{
int irq = 0,ret = 0;
if(!zx29spi || !pdev) {
ret = -ENOENT;
return ret;
}
irq = platform_get_irq(pdev, 0);
if (irq == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_IRQ\n");
ret = -ENOENT;
return ret;
}
zx29spi->irq = irq;
dev_dbg(&pdev->dev, "used interrupt num is %d\n", zx29spi->irq);
ret = devm_request_irq(&pdev->dev, zx29spi->irq, zx29_spi_irq,
IRQF_TRIGGER_HIGH | IRQF_NO_THREAD | IRQF_ONESHOT, dev_name(&pdev->dev), zx29spi);
if (ret < 0) {
dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", ret);
return ret;
}
disable_irq_nosync(zx29spi->irq);
return ret;
}
static int zx29_spi_probe_of_master(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx29_spi_controller *platform_info=NULL ;
struct spi_master *master;
struct zx29_spi *zx29spi = NULL; /*Data for this driver */
struct resource *regs = NULL;
struct resource *gpio = NULL;
struct resource *irq = NULL;
struct device_node *np = pdev->dev.of_node;
int status = 0, i,ret;
u32 regval = 0;
platform_info = devm_kzalloc(&pdev->dev, sizeof(struct zx29_spi_controller), GFP_KERNEL);
if(platform_info == NULL)
return 0;
platform_info->bus_id = 0,
platform_info->num_chipselect = 1,
platform_info->enable_dma = 1,
platform_info->autosuspend_delay=0,
/* Allocate master with space for data */
master = spi_alloc_master(dev, sizeof(struct zx29_spi));
if (master == NULL) {
dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
status = -ENOMEM;
goto err_no_master;
}
zx29spi = spi_master_get_devdata(master);
memset(zx29spi,0,sizeof(struct zx29_spi));
pdev->id = of_alias_get_id(np, "spi");
if(pdev->id < 0){
printk("zx29_ssp of_alias_get_id fail ret:%d\n", pdev->id);
status = -ENOMEM;
goto err_no_master;
}
snprintf(zx29spi->name, sizeof(zx29spi->name), "zx29-spi%d", pdev->id);
zx29_spi_get_platformInfo(pdev,platform_info);
//mutex_init(&zx29spi->spi_lock);
g_zx29_spi[pdev->id] = zx29spi;
zx29spi->master = master;
zx29spi->master_info = platform_info;
zx29spi->pdev = pdev;
zx29spi->vendor = &vendor_arm;
zx29spi->mode = ZX29_SSP_MASTER_TYPE;
zx29spi->zx29_flush_rxfifo = zx29_flush_rxfifo;
sema_init(&zx29spi->sema_dma, 0);
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
init_waitqueue_head(&zx29spi->wait);
zx29spi->trans_done = false;
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
dev_set_drvdata(&pdev->dev, zx29spi);
device_init_wakeup(&pdev->dev, true);
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
*/
master->bus_num = platform_info->bus_id;
master->num_chipselect = platform_info->num_chipselect;
master->cleanup = zx29_cleanup;
master->setup = zx29_setup;
master->prepare_transfer_hardware = zx29_prepare_transfer_hardware;
master->transfer_one_message = zx29_transfer_one_message;
master->unprepare_transfer_hardware = zx29_unprepare_transfer_hardware;
//master->rt = platform_info->rt;
/*
* Supports mode 0-3, loopback, and active low CS..
*/
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS|SPI_LOOP;
dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
zx29_spi_init_pinctrl(pdev);
/* registers */
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (regs == NULL){
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
status = -ENOENT;
goto err_no_registers;
}
zx29spi->phybase = regs->start;
zx29spi->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (zx29spi->virtbase == NULL) {
status = -ENOMEM;
goto err_no_ioremap;
}
dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
regs->start, zx29spi->virtbase);
#if defined(CONFIG_DEBUG_FS)
debugfs_spi_init(zx29spi);
#endif
/*clock init*/
status = zx29_spi_clock_init(zx29spi);
if(status)
goto err_no_clk;
/* Initialize transfer pump */
//tasklet_init(&zx29spi->pump_transfers, pump_transfers,(unsigned long)zx29spi);
/* Disable SPI */
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
load_spi_default_config(zx29spi);
writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
if(!strcmp(pdev->name,"1410000.ssp")) {
regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
dev_info(&pdev->dev,"%s set non-camera mode regval:0x%x \n",pdev->name,regval);
}
status = zx29_spi_init_irq(pdev,zx29spi);
if(status != 0) {
dev_err(&pdev->dev, "zx29_spi_init_irq err!!! \n");
goto err_no_irq;
}
/* Get DMA channels */
if (platform_info->enable_dma) {
status = zx29_dma_probe(zx29spi);
if (status != 0) {
platform_info->enable_dma = 0;
sc_debug_info_record(MODULE_ID_CAP_SPI, "%s dma probe failed \n",pdev->name);
}
}
#if SPI_PSM_CONTROL
wake_lock_init(&zx29spi->psm_lock, WAKE_LOCK_SUSPEND, zx29spi->name);
#endif
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto err_spi_register;
}
dev_info(&pdev->dev," probe succeeded\n");
/* let runtime pm put suspend */
if (platform_info->autosuspend_delay > 0) {
dev_info(&pdev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay);
pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay);
pm_runtime_use_autosuspend(dev);
pm_runtime_put_autosuspend(dev);
} else {
pm_runtime_put(dev);
}
return 0;
err_spi_register:
#if SPI_PSM_CONTROL
wake_lock_destroy(&zx29spi->psm_lock);
#endif
if (platform_info->enable_dma)
zx29_dma_remove(zx29spi);
err_no_irq:
clk_disable(zx29spi->spi_clk);
// err_no_clk_en:
//clk_unprepare(pl022->clk);
//err_clk_prep:
clk_put(zx29spi->spi_clk);
err_no_clk:
// iounmap(zx29spi->virtbase);
err_gpios:
/* add */
err_no_ioremap:
err_no_registers:
spi_master_put(master);
err_no_master:
err_no_pdata:
return status;
}
static int zx29_spi_probe_of_slave(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx29_spi_controller *platform_info=NULL ;
struct spi_master *master;
struct zx29_spi *zx29spi = NULL; /*Data for this driver */
struct resource *regs = NULL;
struct resource *gpio = NULL;
struct resource *irq = NULL;
struct device_node *np = pdev->dev.of_node;
int status = 0, i,ret;
u32 regval = 0;
platform_info = devm_kzalloc(&pdev->dev, sizeof(struct zx29_spi_controller), GFP_KERNEL);
if(platform_info == NULL)
return 0;
platform_info->bus_id = 0,
platform_info->num_chipselect = 1,
platform_info->enable_dma = 1,
platform_info->autosuspend_delay=0,
/* Allocate master with space for data */
master = spi_alloc_master(dev, sizeof(struct zx29_spi));
if (master == NULL) {
dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
status = -ENOMEM;
goto err_no_master;
}
master->slave = true;
zx29spi = spi_master_get_devdata(master);
memset(zx29spi,0,sizeof(struct zx29_spi));
pdev->id = of_alias_get_id(np, "spi");
if(pdev->id < 0){
printk("zx29_ssp of_alias_get_id fail ret:%d\n", pdev->id);
goto err_no_master;
}
snprintf(zx29spi->name, sizeof(zx29spi->name), "zx29-spi%d", pdev->id);
zx29_spi_get_platformInfo(pdev,platform_info);
//mutex_init(&zx29spi->spi_lock);
g_zx29_spi[pdev->id] = zx29spi;
zx29spi->master = master;
zx29spi->master_info = platform_info;
zx29spi->pdev = pdev;
zx29spi->vendor = &vendor_arm;
zx29spi->mode = ZX29_SSP_SLAVE_TYPE;
zx29spi->zx29_flush_rxfifo = zx29_flush_rxfifo;
sema_init(&zx29spi->sema_dma, 0);
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
init_waitqueue_head(&zx29spi->wait);
zx29spi->trans_done = false;
/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
dev_set_drvdata(&pdev->dev, zx29spi);
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
*/
master->bus_num = platform_info->bus_id;
master->num_chipselect = platform_info->num_chipselect;
master->cleanup = zx29_cleanup;
master->setup = zx29_setup;
master->prepare_transfer_hardware = zx29_prepare_transfer_hardware;
master->transfer_one_message = zx29_slave_transfer_one_message;
master->unprepare_transfer_hardware = zx29_unprepare_transfer_hardware;
/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
master->spi_slave_rd_start = zx29_slave_rd_start;
master->spi_slave_rd_stop = zx29_slave_rd_stop;
/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
//master->rt = platform_info->rt;
/*
* Supports mode 0-3, loopback, and active low CS..
*/
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS|SPI_LOOP;
dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
zx29_spi_init_pinctrl(pdev);
/* registers */
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (regs == NULL){
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
status = -ENOENT;
goto err_no_registers;
}
zx29spi->phybase = regs->start;
zx29spi->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (zx29spi->virtbase == NULL) {
status = -ENOMEM;
goto err_no_ioremap;
}
dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
regs->start, zx29spi->virtbase);
#if defined(CONFIG_DEBUG_FS)
debugfs_spi_init(zx29spi);
#endif
/*clock init*/
status = zx29_spi_slave_clock_init(zx29spi);
if(status)
goto err_no_clk;
/* Initialize transfer pump */
//tasklet_init(&zx29spi->pump_transfers, pump_transfers,(unsigned long)zx29spi);
/* Disable SPI */
regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
load_spi_default_config(zx29spi);
writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
if(!strcmp(pdev->name,"1410000.ssp")) {
regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
dev_info(&pdev->dev," %s set non-camera mode regval:0x%x \n",pdev->name,regval);
}
writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
dev_info(&pdev->dev,"ssp enabled \n",regval);
/* irq*/
status = zx29_spi_init_irq(pdev,zx29spi);
if(status != 0) {
dev_err(&pdev->dev, "zx29_spi_init_irq err!!! \n");
goto err_no_irq;
}
/* Get DMA channels */
if (platform_info->enable_dma) {
status = zx29_dma_probe(zx29spi);
if (status != 0) {
platform_info->enable_dma = 0;
sc_debug_info_record(MODULE_ID_CAP_SPI, "%s dma probe failed",pdev->name);
}
}
#if SPI_PSM_CONTROL
wake_lock_init(&zx29spi->psm_lock, WAKE_LOCK_SUSPEND, zx29spi->name);
#endif
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto err_spi_register;
}
dev_info(&pdev->dev," probe succeeded\n");
/* let runtime pm put suspend */
if (platform_info->autosuspend_delay > 0) {
dev_info(&pdev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay);
pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay);
pm_runtime_use_autosuspend(dev);
pm_runtime_put_autosuspend(dev);
} else {
pm_runtime_put(dev);
}
return 0;
err_spi_register:
#if SPI_PSM_CONTROL
wake_lock_destroy(&zx29spi->psm_lock);
#endif
if (platform_info->enable_dma)
zx29_dma_remove(zx29spi);
err_no_irq:
clk_disable(zx29spi->spi_clk);
// err_no_clk_en:
//clk_unprepare(pl022->clk);
//err_clk_prep:
clk_put(zx29spi->spi_clk);
err_no_clk:
// iounmap(zx29spi->virtbase);
err_gpios:
/* add */
err_no_ioremap:
err_no_registers:
spi_master_put(master);
err_no_master:
err_no_pdata:
return status;
}
static int zx29_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
const struct zx29_ssp_device_of_data *data;
enum zx29_ssp_device_mode mode;
int status = -EINVAL;
match = of_match_device(zx29_spi_of_match, dev);
if (!match)
return -EINVAL;
data = (struct zx29_ssp_device_of_data *)match->data;
mode = (enum zx29_ssp_device_mode)data->mode;
dev_info(&pdev->dev,"%s mode \n", (mode==0)?"MASTER":(mode==1)?"SLAVE":"UNKNOWN");
if(mode == ZX29_SSP_MASTER_TYPE)
status = zx29_spi_probe_of_master(pdev);
if(mode == ZX29_SSP_SLAVE_TYPE)
status = zx29_spi_probe_of_slave(pdev);
return status;
}
static int __exit zx29_spi_remove(struct platform_device *pdev)
{
struct zx29_spi *zx29spi = dev_get_drvdata(&pdev->dev);
struct resource * gpio = NULL;
//struct resource * irq = NULL;
int i;
if (!zx29spi)
return 0;
/*
* undo pm_runtime_put() in probe. I assume that we're not
* accessing the primecell here.
*/
pm_runtime_get_noresume(&pdev->dev);
spi_unregister_master(zx29spi->master);
load_spi_default_config(zx29spi);
if (zx29spi->master_info->enable_dma)
zx29_dma_remove(zx29spi);
/*
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if( irq != NULL )
{
free_irq(irq->start, zx29spi);
}
*/
devm_free_irq(&pdev->dev, zx29spi->irq, zx29spi);
clk_disable(zx29spi->spi_clk);
clk_put(zx29spi->spi_clk);
clk_disable(zx29spi->pclk);
clk_put(zx29spi->pclk);
#if defined(CONFIG_DEBUG_FS)
if(zx29spi->spi_root){
printk(KERN_INFO "spi:debugfs_remove_recursive \n");
debugfs_remove_recursive(zx29spi->spi_root);
}
#endif
// iounmap(zx29spi->virtbase);
//amba_release_regions(adev);
//tasklet_disable(&zx29spi->pump_transfers);
spi_master_put(zx29spi->master);
//amba_set_drvdata(adev, NULL);
dev_set_drvdata(&pdev->dev, NULL);
#if SPI_PSM_CONTROL
wake_lock_destroy(&zx29spi->psm_lock);
#endif
return 0;
}
static const struct zx29_ssp_device_of_data zx29_ssp_master_of_data = {
.mode = ZX29_SSP_MASTER_TYPE,
};
static const struct zx29_ssp_device_of_data zx29_ssp_slave_of_data = {
.mode = ZX29_SSP_SLAVE_TYPE,
};
static const struct of_device_id zx29_spi_of_match[] = {
{
.compatible = "zte,zx29_ssp",
.data = &zx29_ssp_master_of_data,
},
{
.compatible = "zte,zx29_ssp_slave",
.data = &zx29_ssp_slave_of_data,
},
{},
};
MODULE_DEVICE_TABLE(of, zx29_spi_of_match);
#ifdef CONFIG_PM
static int zx29_spi_suspend(struct platform_device *pdev,pm_message_t state)
{
struct zx29_spi *zx29spi = NULL;
if(pdev == NULL)
return -1;
if(pdev && &pdev->dev)
pinctrl_pm_select_sleep_state(&pdev->dev);
zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
if(zx29spi && (zx29spi->master->slave == true)) {
if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_sleep) < 0) {
printk("spi%d setting cs_gpio pin ctrl failed\n",pdev->id);
return -1;
}
}
if(zx29spi&&zx29spi->master->slave == false) {
if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_sleep) < 0) {
printk("spi%d setting cs_gpio_sleep pin ctrl failed\n",pdev->id);
return -1;
}
printk("spi%d setting cs_gpio_sleep pin ctrl\n",pdev->id);
}
return 0;
}
static int zx29_spi_resume(struct platform_device *pdev)
{
struct zx29_spi *zx29spi = NULL;
if(pdev == NULL)
return -1;
if(pdev && &pdev->dev)
pinctrl_pm_select_default_state(&pdev->dev);
zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
if(zx29spi && (zx29spi->master->slave == true)) {
if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_func) < 0) {
printk("spi%d setting cs_func pin ctrl failed\n",pdev->id);
return -1;
}
}
if(zx29spi&&zx29spi->master->slave == false) {
if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_active) < 0) {
printk("spi%d setting cs_gpio_active pin ctrl failed\n",pdev->id);
return -1;
}
printk("spi%d setting cs_gpio_active pin ctrl\n",pdev->id);
gpio_direction_output(ssp_pins[pdev->id].gpio_cs,SPI_GPIO_HIGH);
}
return 0;
}
#endif
static struct platform_driver zx29_spi_driver = {
.driver = {
.name = "zx29_ssp",
.of_match_table = of_match_ptr(zx29_spi_of_match),
.owner = THIS_MODULE,
},
.probe = zx29_spi_probe,
#ifdef CONFIG_PM
.suspend = zx29_spi_suspend,
.resume = zx29_spi_resume,
#endif
.remove = __exit_p(zx29_spi_remove),
};
static int __init zx29_spi_init(void)
{
return platform_driver_register(&zx29_spi_driver);
}
static void __exit zx29_spi_exit(void)
{
platform_driver_unregister(&zx29_spi_driver);
}
module_init(zx29_spi_init);
module_exit(zx29_spi_exit);
MODULE_DESCRIPTION("zx29 spi controller driver");
MODULE_AUTHOR("zte");
MODULE_LICENSE("GPL");