[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.05_CAP.15.05(SDK4.4)diff_15.11
Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No
Change-Id: Ief12bb341bd859dd73c7a8c5fa3d25e5ba7e1c6d
diff --git a/upstream/linux-5.10/drivers/tty/serial/zx29_uart.c b/upstream/linux-5.10/drivers/tty/serial/zx29_uart.c
new file mode 100755
index 0000000..60629b6
--- /dev/null
+++ b/upstream/linux-5.10/drivers/tty/serial/zx29_uart.c
@@ -0,0 +1,4389 @@
+/****************************************************************************/
+/*
+ * zx29_uart.c sanchips
+ *
+ * (C) Copyright 2003-2007, gaowei
+ * (C) Copyright 2003-2007, sanchips
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/sched/clock.h>
+#include <linux/soc/zte/spinlock.h>
+
+#if 0
+#include <mach/gpio_def.h>
+#include <mach/irqs.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/debug.h>
+#include <mach/iomap.h>
+#include <mach/dma.h>
+#include <mach/dma_cfg.h>
+#endif
+//#include <linux/wakelock.h>
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+
+#include <linux/dma/zx-dma.h>
+//#include "../../dma/zte/zx298501_dma.h"
+
+#include "zx29_uart.h"
+#include <linux/soc/zte/rpmsg.h>
+#include <linux/soc/sc/drv_idle.h>
+#include "pub_debug_info.h"
+//#include <linux/soc/zte/pm/drv_idle.h>
+//#include <mach/pcu.h>
+//#define DEBUG_UART
+
+#ifdef DEBUG_UART
+#pragma GCC optimize("O0")
+#endif
+
+#define UART_WCLK_NAME "uartclk"
+#define UART_APBCLK_NAME "apb_pclk"
+
+#define CONFIG_SERIAL_ZX29_DMA 1
+
+
+extern bool xp2xp_Ap2CpIsApWakeup(void);
+extern int xp2xp_enable_4line(void);
+
+extern signed int zx29_dma_stop(unsigned int channel_id);
+extern signed int zx29_dma_get_transfer_num(unsigned int channel_id);
+
+
+
+char uart_names[5][12] = {
+ "zx29_uart.0",
+ "zx29_uart.1",
+ "zx29_uart.2",
+ "zx29_uart.3",
+ "zx29_uart.4"
+};
+
+#if CONFIG_SERIAL_ZX29_DMA
+#define ZX29_DMA_BUFFER_SIZE PAGE_SIZE
+#define UART_DMA_RX_MAX_COUNT 2
+//#define RX_DMA_TIMEOUT (HZ / 10)//60
+#define RX_DMA_TIMEOUT (HZ / 100)
+#define RX_DMA_WORK 1
+struct zx29_sgbuf {
+ struct scatterlist sg;
+ dma_addr_t dma_addr;
+ char *buf;
+};
+
+struct zx29_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ dma_channel_def rx_def[UART_DMA_RX_MAX_COUNT];
+ u32 rx_index;
+ bool use_buf_b;
+ struct zx29_sgbuf sgbuf_a;
+ struct zx29_sgbuf sgbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+ atomic_t count;
+ bool used;
+};
+
+struct zx29_dmatx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ dma_channel_def tx_def;
+ struct scatterlist sg;
+ char *buf;
+ bool queued;
+ atomic_t count;
+};
+#define UART_DMA_CYCLE_RX_CONFIG_COUNT 5
+struct zx29_dma_cycle_data{
+ int id;
+ int flg_enter_th;
+ int flg_enter_to;
+ char flg_overrun;
+ char flg_pe;
+ char flg_be;
+ char flg_fe;
+ char from_resume;
+ unsigned long cnt_callback_total;
+ unsigned long cnt_th_total;
+ int cnt_callback;
+ int cnt_th;
+ struct zx29_sgbuf sgbuf[UART_DMA_CYCLE_RX_CONFIG_COUNT];
+ dma_channel_def rxdef[UART_DMA_CYCLE_RX_CONFIG_COUNT];
+};
+struct zx29_dma_cycle_data uart_dma_cycle[5];
+#endif
+
+
+
+#define UART_NUM 5
+int g_uart_overrun[5];
+ktime_t g_hr_interval;
+
+
+int g_cons_id_cmdline;
+EXPORT_SYMBOL(g_cons_id_cmdline);
+
+#ifdef DEBUG_CONSOLE
+#undef DEBUG_CONSOLE
+#endif
+#define DEBUG_CONSOLE g_cons_id_cmdline
+/****************************************************************************/
+
+/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
+ * name, but it is legally reserved for the 8250 driver. */
+#define SERIAL_zx29_MAJOR TTY_MAJOR
+#define SERIAL_MINOR_START 64
+
+#define UART_PORT_AUTOBAUD_ON 1
+#define UART_PORT_AUTOBAUD_OFF 0
+#define UART_PORT_AUTOBAUD_BYTE 2
+#define UART_AT_SENDOK_NUM 6
+#define UART_AUTOBAUD_LEVEL 5
+#define UART_AUTOBAUD_CHECKBYTE 4
+#define UART_AUTOBAUD_RATE 115200
+#define UART1_AUTOBAUD_RATE 921600
+
+
+unsigned char uart_port_autobaud_buffer[UART_PORT_AUTOBAUD_BYTE] = {0};
+unsigned char uart_port_autobaud_gtflag = 0 ;
+unsigned char uart_port_autobaud_suflag = 0 ;
+unsigned char g_console_open_flag = 1;
+
+
+unsigned char UART_AT_send_ok[UART_AT_SENDOK_NUM] =
+ {
+ 0x0d,0x0a,0x4F,0x4B,0x0d,0x0a
+ };
+
+unsigned char UART_baud_check[UART_AUTOBAUD_LEVEL][UART_AUTOBAUD_CHECKBYTE]=
+ {
+ {0x61,0x74,0x41,0x54},{0x06,0x9e,0x06,0x98},{0x1c,0x80,0x1c,0x00},
+ {0xe0,0x00,0xe0,0x00},{0x00,0x00,0x00,0x00},
+ };
+unsigned int UART_baud[UART_AUTOBAUD_LEVEL] =
+ {
+ 115200,57600,38400,19200,9600
+ };
+unsigned int UART_termios_cflag[UART_AUTOBAUD_LEVEL] =
+ {
+ B115200,B57600,B38400,B19200,B9600
+ };
+
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+/****************************************************************************/
+/*
+ * Local per-uart structure.
+ */
+struct zx29_uart_port
+{
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned int old_status;
+ unsigned char imr; /* Local interrupt mask reg mirror */
+#if CONFIG_SERIAL_ZX29_DMA
+ unsigned char dmacr; /* DMA reg*/
+#endif
+ bool rts_state;
+ bool autorts; /* hardware flow control */
+ struct clk *wclk; /* uart work clock */
+ struct clk *busclk; /* uart apb clock */
+ bool autobaud;
+ bool autobaud_state;
+ unsigned int baudrate;
+ bool uartwake;
+
+ int irq;
+ int irq_state;
+ int rxd_irq;
+ struct tasklet_struct write_wakeup;
+ bool rxd_wakeup;
+ int rxd_int_depth;
+ bool enter_suspend;
+#if CONFIG_SERIAL_ZX29_DMA
+ /* DMA stuff */
+ bool using_tx_dma;
+ bool using_rx_dma;
+ struct zx29_dmarx_data dmarx;
+ struct zx29_dmatx_data dmatx;
+ struct timer_list rx_dma_timer;
+ struct hrtimer rx_dma_hrtimer;
+ struct task_struct *dma_compl_th;
+ struct semaphore sema;
+ struct semaphore sema_cyclic;
+ bool port_close;
+ bool work_state;
+ size_t pre_pending;
+ struct zx29_sgbuf *sg2tty;
+ size_t sg2tty_len;
+ struct zx29_sgbuf *curr_sg;
+ int enable_ctsrts;
+ int enable_wakeup;
+
+ struct notifier_block wakeup_notifier;
+
+#endif
+ //means application decide close and release DMA &wakelock
+ int app_ctrl;
+ int sleep_state;
+ //if app_ctrl is set or using kernel control sleep,set this flag
+ int uart_power_mode;
+};
+
+
+
+static struct zx29_uart_port zx29_uart_ports[UART_NUM];
+
+#define zx29_MAXPORTS ARRAY_SIZE(zx29_uart_ports)
+typedef struct __UART_STATIC{
+ int cnt;
+ char head[16];
+ unsigned long long s_time;
+ int func_step;
+ unsigned int fr;
+ unsigned int ris;
+}uart_static;
+#define STATIC_UART_ID 0
+uart_static g_uart_static[256] = {0};
+int g_uart_static_cnt = 0;
+void test_uart_static(int uart_id, char *buf, int cnt, int steps)
+{
+ if(uart_id != STATIC_UART_ID)
+ return;
+ if(buf){
+ if(cnt >= 16){
+ strncpy(g_uart_static[g_uart_static_cnt].head, buf, 16);
+ }else{
+ memcpy(g_uart_static[g_uart_static_cnt].head, buf, cnt);
+ }
+ }
+ g_uart_static[g_uart_static_cnt].cnt = cnt;
+ g_uart_static[g_uart_static_cnt].s_time = local_clock();
+ g_uart_static[g_uart_static_cnt].func_step = steps;
+ g_uart_static[g_uart_static_cnt].fr = UART_GET_FR(&zx29_uart_ports[uart_id].port);
+ g_uart_static[g_uart_static_cnt].ris = UART_GET_RIS(&zx29_uart_ports[uart_id].port);
+
+ if(++g_uart_static_cnt >= 256)
+ g_uart_static_cnt = 0;
+}
+
+
+
+
+#define zx29_MAXPORTS ARRAY_SIZE(zx29_uart_ports)
+void zx29_uart_stop_rx(struct uart_port *port);
+
+#if CONFIG_SERIAL_ZX29_DMA
+static inline bool zx29_dma_tx_start(struct zx29_uart_port *zup);
+static inline void zx29_dma_tx_stop(struct zx29_uart_port *zup);
+static bool zx29_dma_tx_irq(struct zx29_uart_port *zup);
+static int zx29_uart_dma_tx_chars(struct zx29_uart_port *zup);
+void uart_dma_rx_callback(void *data);
+void uart_dma_rx_callback_use_dma_cyclic(void * data);
+static void zx29_uart_dma_rx_chars(struct zx29_uart_port *zup,
+ //u32 pending, bool use_buf_b,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ bool readfifo, unsigned long *flags);
+static inline void zx29_dma_rx_stop(struct zx29_uart_port *zup);
+static inline bool zx29_dma_rx_available(struct zx29_uart_port *zup);
+static inline bool zx29_dma_rx_running(struct zx29_uart_port *zup);
+static int zx29_dma_rx_trigger_dma(struct zx29_uart_port *zup);
+static int zx29_dma_rx_trigger_dma_use_dma_cyclic(struct zx29_uart_port *zup);
+
+static void zx29_uart_rx_dma_chars(struct zx29_uart_port *zup, unsigned long *flags);
+dma_peripheral_id uart_get_rx_dma_peripheral_id(struct zx29_uart_port *zup);
+
+#if RX_DMA_WORK
+static void zx29_uart_rx_timeout_chars(struct zx29_uart_port *zup, unsigned long *flags);
+static inline bool zx29_dma_rx_work_scheduled(struct zx29_uart_port *zup);
+
+static void zx29_uart_rt_dma(struct zx29_uart_port *zup, unsigned long *flags);
+static void uart_dma_cycle_deinit(struct zx29_uart_port *zup);
+#endif
+#endif
+
+
+
+/*******************************************************************************
+* Function: uart_wakeup_callback.
+* Description: uart_wakeup_callback.
+* Parameters:
+* Input:val:means wakeup or sleep notify to other device
+*
+* Output:v:means devices been called return result
+*
+* Returns:
+*
+* Others:
+********************************************************************************/
+int uart_wakeup_callback(struct notifier_block * nb, unsigned long val, void * v)
+{
+ int *call_result = (int *)v;
+ unsigned long flags = 0;
+ struct zx29_uart_port *zup = container_of(nb, struct zx29_uart_port, wakeup_notifier);
+
+ if(!zup || zup->port_close){
+ *call_result |= 0;
+ return 0;
+ }
+ struct platform_device *pdev = zup->port.private_data;
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ if(val == 1){//wakeup
+ zup->sleep_state = 0;
+ pm_stay_awake(&pdev->dev);
+ zx29_uart_rx_dma_chars(zup, &flags);
+
+ }else{//sleep
+ zup->sleep_state = 1;
+ zx29_uart_stop_rx(&zup->port);
+ pm_relax(&pdev->dev);
+
+ }
+ *call_result |= 0;
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return 0;
+}
+
+int zx29_get_sleep_state(int uart_index)
+{
+ if(uart_index < 0 || uart_index > 2){
+ printk("invalid uart index\n");
+ return -1;
+ }
+
+ return zx29_uart_ports[uart_index].sleep_state;
+}
+EXPORT_SYMBOL_GPL(zx29_get_sleep_state);
+
+void zx29_set_sleep_state(int state, int uart_index)
+{
+ if(uart_index < 0 || uart_index > 2){
+ printk("invalid uart index\n");
+ return ;
+ }
+ printk(" uart %d, state change to:%d\n", uart_index, state);
+ zx29_uart_ports[uart_index].sleep_state = (state ? 1: 0);
+}
+EXPORT_SYMBOL_GPL(zx29_set_sleep_state);
+
+static ssize_t sleep_state_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "\n wakeup_enable = %d\n",zx29_uart_ports[pdev->id].sleep_state);
+}
+
+static ssize_t sleep_state_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ flag = simple_strtoul(buf, NULL, 16);
+ //pdata->uart_wakeup_enable = flag;
+ zx29_uart_ports[pdev->id].sleep_state = (flag ? 1: 0);
+ return count;
+}
+
+DEVICE_ATTR(sleep_state, S_IRUGO | S_IWUSR, sleep_state_show,
+ sleep_state_store);
+//bool uart_dma_filter_fn (struct dma_chan *chan, void *param)
+//{
+// dma_peripheral_id peri_id = (dma_peripheral_id) param;
+// if (chan->chan_id == (unsigned int)peri_id){
+// printk("uart_dma_filter_fn, peri_id:%d, ok\n", peri_id);
+// return true;
+// }
+// chan->private = param;
+//
+// return false;
+//}
+static void zx29_uart_console_putc(struct uart_port *port, int c);
+void zx29_uart_putc(struct uart_port *port, int c);
+
+#if CONFIG_SERIAL_ZX29_DMA
+void uart_mod_timer(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ unsigned long t_delay = 0;
+ t_delay = msecs_to_jiffies(RX_DMA_TIMEOUT);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ //printk("uart_mod_timer, delay %d jiffies\n", t_delay);
+ mod_timer(&(zup->rx_dma_timer), jiffies + t_delay);
+
+ spin_lock_irqsave(&zup->port.lock, *flags);
+}
+#endif
+/**
+* Show the console_input attribute.
+*/
+static ssize_t console_input_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "\n console_input = %d\n",g_console_open_flag);
+}
+
+/**
+ * Store the console_input attribure.
+ * 0: disable console input function,only out put log
+ * 1: able console input, can input commands
+ */
+static ssize_t console_input_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ flag = simple_strtoul(buf, NULL, 16);
+ g_console_open_flag = flag;
+
+ return count;
+}
+
+DEVICE_ATTR(console_input, S_IRUGO | S_IWUSR, console_input_show,
+ console_input_store);
+
+static ssize_t ctsrts_input_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+// struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ if(pdev->id < 0 || pdev->id >= UART_NUM){
+ printk("ctsrts_input_store, invalid uart id, return error\n");
+ return 0;
+ }
+// return sprintf(buf, "\n ctsrts_input = %d\n",pdata->uart_ctsrtsuse);
+return sprintf(buf, "\n uart %d ctsrts_input = %d\n", pdev->id, zx29_uart_ports[pdev->id].enable_ctsrts);
+
+}
+
+static ssize_t ctsrts_input_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+
+ if(pdev->id != 0){
+ printk("ctsrts_input_store, invalid uart id, only uart support hardware control\n");
+ }
+ flag = simple_strtoul(buf, NULL, 16);
+ zx29_uart_ports[pdev->id].enable_ctsrts = flag;
+
+ return count;
+}
+
+DEVICE_ATTR(ctsrts_input, S_IRUGO | S_IWUSR, ctsrts_input_show,
+ ctsrts_input_store);
+
+static ssize_t wakeup_enable_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "\n wakeup_enable = %d\n",1);
+}
+
+static ssize_t wakeup_enable_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ if(pdev->id != 4){
+ printk("\nctsrts_input_store, invalid uart id, only lp_uart(uart 4) support wakeup\n");
+ }
+ flag = simple_strtoul(buf, NULL, 16);
+ zx29_uart_ports[pdev->id].enable_wakeup = flag;
+
+ return count;
+}
+
+DEVICE_ATTR(wakeup_enable, S_IRUGO | S_IWUSR, wakeup_enable_show,
+ wakeup_enable_store);
+
+static ssize_t app_ctrl_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "%d\n",zx29_uart_ports[pdev->id].app_ctrl);
+}
+
+static ssize_t app_ctrl_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ flag = simple_strtoul(buf, NULL, 16);
+ // pdata->uart_wakeup_enable = flag;
+ zx29_uart_ports[pdev->id].app_ctrl = (flag == 0) ? 0 : 1;
+
+ return count;
+}
+DEVICE_ATTR(app_ctrl, S_IRUGO | S_IWUSR, app_ctrl_show,
+ app_ctrl_store);
+
+int rxd_wake_cnt = 0;
+static ssize_t statics_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "\n RX:%u,TX:%u,OE:%u,brk:%u,FE:%u,PE:%u ,rxd_wake_cnt:%d\n",
+ zx29_uart_ports[pdev->id].port.icount.rx,
+ zx29_uart_ports[pdev->id].port.icount.tx,
+ zx29_uart_ports[pdev->id].port.icount.overrun,
+ zx29_uart_ports[pdev->id].port.icount.brk,
+ zx29_uart_ports[pdev->id].port.icount.frame,
+ zx29_uart_ports[pdev->id].port.icount.parity,
+ rxd_wake_cnt
+ );
+}
+DEVICE_ATTR(statics, S_IRUGO, statics_show, NULL);
+#define VEHICLE_USE_ONE_UART_LOG 1
+#if VEHICLE_USE_ONE_UART_LOG
+#define ICP_CORE_ID_PS CORE_PS0
+#define ICP_CORE_ID_CAP 1
+#define ICP_CHANNEL_CONSOLE_UART 7
+#define ICP_MSG_LEN_CONSOLE_UART 2
+#define ICP_BUFFERSIZE_CONSOLE_TOGGLE 16
+#define SYMB_PS_CORE_ID ICP_CORE_ID_PS
+#define SYMB_CAP_CORE_ID ICP_CORE_ID_CAP
+#define SYMB_WHAT_CORE_ID 3
+#define ENABLE_CURRENT_CONSOLE_UART 1
+#define DISABLE_CURRENT_CONSOLE_UART 0
+#define ENABLE_TOGGLE 1
+#define DISABLE_TOGGLE 0
+unsigned char g_core_id_occupy_uart = 0;
+unsigned char g_cap_uart_toggle = 0;
+static irqreturn_t zx29_uart_interrupt(int irq, void *dev_id);
+static void restart_current_cons_uart(void)
+{
+ struct zx29_uart_port *zup = &zx29_uart_ports[DEBUG_CONSOLE];
+ struct uart_port *port = &zup->port;
+ enable_irq(port->irq);
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ spin_lock(&zup->port.lock);
+ tasklet_schedule(&zup->write_wakeup);
+ spin_unlock(&zup->port.lock);
+}
+static void forbid_current_cons_uart(void)
+{
+ struct zx29_uart_port *zup = &zx29_uart_ports[DEBUG_CONSOLE];
+ struct uart_port *port = &zup->port;
+ disable_irq(port->irq);
+ g_core_id_occupy_uart = SYMB_PS_CORE_ID;
+}
+static void process_ps2cap_rpmsg(char *arr)
+{
+ if((arr[0] == SYMB_CAP_CORE_ID) && (arr[1] == ENABLE_CURRENT_CONSOLE_UART)){
+ restart_current_cons_uart();
+ }else if((arr[0] == SYMB_CAP_CORE_ID) && (arr[1] == DISABLE_CURRENT_CONSOLE_UART)){
+ printk("current console uart not enable.\n");
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }else if((arr[0] == SYMB_WHAT_CORE_ID) && (arr[1] == SYMB_PS_CORE_ID)){
+ g_core_id_occupy_uart = SYMB_PS_CORE_ID;
+ forbid_current_cons_uart();
+ }else if((arr[0] == SYMB_WHAT_CORE_ID) && (arr[1] == SYMB_CAP_CORE_ID)){
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }
+ else{
+ printk("%s error!!\n",__func__);
+ }
+}
+static void icp_callback_ps2cap(void *buf, unsigned int len)
+{
+ char *arr_ps2cap;
+ if (len==0){
+ printk("%s empty.\n", __func__);
+ return ;
+ }
+ arr_ps2cap = (char *)buf;
+ process_ps2cap_rpmsg(arr_ps2cap);
+}
+static void echo_to_change_other_uart(uint32_t val)
+{
+ int ret;
+ if(val > ENABLE_TOGGLE)
+ {
+ printk("echo para error!!!\n");
+ return;
+ }
+ char arr[2] = {0};
+ arr[0] = SYMB_PS_CORE_ID;
+ arr[1] = val;
+ T_RpMsg_Msg icp_msg;
+ icp_msg.coreID = CORE_PS0;
+ icp_msg.chID = ICP_CHANNEL_CONSOLE_UART;
+ icp_msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ icp_msg.buf = arr;
+ icp_msg.len = ICP_MSG_LEN_CONSOLE_UART;
+ ret = rpmsgWrite(&icp_msg);
+ if(ret == 0){
+ if(val == ENABLE_TOGGLE)
+ g_core_id_occupy_uart = SYMB_PS_CORE_ID;
+ else if(val == DISABLE_TOGGLE)
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }else
+ printk("echo_to_change_ohter_uart fail.\n");
+}
+static ssize_t console_uart_toggle_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "\n console_uart_toggle_show %d. \n", g_cap_uart_toggle);
+}
+static ssize_t console_uart_toggle_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ flag = simple_strtoul(buf, NULL, 16);
+ if(flag == ENABLE_TOGGLE){
+ g_cap_uart_toggle = 1;
+ forbid_current_cons_uart();
+ echo_to_change_other_uart(flag);
+ }else if(flag == DISABLE_TOGGLE){
+ g_cap_uart_toggle = 0;
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }
+ return count;
+}
+DEVICE_ATTR(console_uart_toggle, S_IRUGO | S_IWUSR, console_uart_toggle_show,
+ console_uart_toggle_store);
+static void notify_occupy_uart_coreid_to_other(void)
+{
+ char arr[2] = {0};
+ arr[0] = SYMB_WHAT_CORE_ID;
+ arr[1] = g_core_id_occupy_uart;
+ T_RpMsg_Msg icp_msg;
+ icp_msg.coreID = CORE_AP;
+ icp_msg.chID = ICP_CHANNEL_CONSOLE_UART;
+ icp_msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ icp_msg.buf = arr;
+ icp_msg.len = ICP_MSG_LEN_CONSOLE_UART;
+ rpmsgWrite(&icp_msg);
+}
+static ssize_t coreid_occupy_uart_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "\n core %d occupy cons uart now! \n",g_core_id_occupy_uart);
+}
+static ssize_t coreid_occupy_uart_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ flag = simple_strtoul(buf, NULL, 16);
+ g_core_id_occupy_uart = flag;
+ if(flag == SYMB_CAP_CORE_ID){
+ g_cap_uart_toggle = 0;
+ }else if(SYMB_PS_CORE_ID){
+ g_cap_uart_toggle = 1;
+ }
+ return count;
+}
+DEVICE_ATTR(coreid_occupy_uart, S_IRUGO | S_IWUSR, coreid_occupy_uart_show,
+ coreid_occupy_uart_store);
+#endif
+
+//extern int (*pm_callback_fn)(void);
+#ifdef CONFIG_CPU_IDLE
+typedef int (*pm_callback_fn)(void);
+extern int zx_pm_register_callback(pm_callback_fn enter_cb, pm_callback_fn exit_cb);
+
+extern void disable_irq_nosync(unsigned int irq);
+extern void enable_irq(unsigned int irq);
+
+void uart_rxd_int_disable(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->rxd_int_depth++;
+}
+EXPORT_SYMBOL(uart_rxd_int_disable);
+
+int uart_0_pm_enter(void)
+{
+ struct zx29_uart_port *zup = &zx29_uart_ports[0];
+
+ //zDrvInt_UnmaskIrq(UART0_RXD_INT);
+ if(zup->irq_state == 0 || zup->imr== 0)
+ return 0;
+
+ //pcu_int_clear(PCU_UART0_RXD_INT);
+ if(!zup->rxd_int_depth){
+ //enable_irq(UART0_RXD_INT);
+ zup->rxd_int_depth++;
+ }
+ return 0;
+}
+
+int uart_0_pm_exit(void)
+{
+
+ return 0;
+}
+#endif
+/****************************************************************************/
+
+static int zx29_sgbuf_init(struct dma_chan *chan, struct zx29_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ dma_addr_t dma_addr;
+
+ sg->buf = dma_alloc_coherent(chan->device->dev,
+ ZX29_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
+ if (!sg->buf){
+ printk("zx29_sgbuf_init fail, no mem\n");
+ return -ENOMEM;
+ }
+ sg_init_table(&sg->sg, 1);
+ sg_set_page(&sg->sg, phys_to_page(dma_addr),
+ ZX29_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+ sg_dma_address(&sg->sg) = dma_addr;
+ sg_dma_len(&sg->sg) = ZX29_DMA_BUFFER_SIZE;
+ sg->dma_addr = dma_addr;
+ return 0;
+}
+
+static void zx29_sgbuf_free(struct dma_chan *chan, struct zx29_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ if (sg->buf) {
+ dma_free_coherent(chan->device->dev,
+ ZX29_DMA_BUFFER_SIZE, sg->buf,
+ sg_dma_address(&sg->sg));
+ sg->dma_addr = NULL;
+ }
+}
+
+
+/****************************************************************************/
+static unsigned int zx29_uart_tx_empty(struct uart_port *port)
+{
+ return (UART_GET_FR(port)&(UART_FR_TXBUSY|UART_FR_TXFF)) ? 0 : TIOCSER_TEMT;
+}
+
+/****************************************************************************/
+static void zx29_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int control = 0;
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->sigs = mctrl;
+ control = UART_GET_CR(&zup->port);
+ if(mctrl & TIOCM_DTR)
+ control |= UART_CR_DTR;
+ else
+ control &= ~ UART_CR_DTR;
+
+ if(mctrl & TIOCM_RTS)
+ control |= UART_CR_RTS;
+ else
+ control &= ~UART_CR_RTS;
+
+ if(mctrl & TIOCM_LOOP)
+ control |= UART_CR_LBE;
+ else
+ control &= ~UART_CR_LBE;
+
+ /* We need to disable auto-RTS if we want to turn RTS off */
+ if (zup->autorts) {
+ if (mctrl & TIOCM_RTS)
+ control |= UART_CR_RTSEN;
+ else
+ control &= ~UART_CR_RTSEN;
+ }
+ UART_PUT_CR(port, control);
+}
+
+/****************************************************************************/
+static unsigned int zx29_uart_get_mctrl(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned int mctrl = 0;
+ unsigned int uart_flag = 0;
+
+ uart_flag = UART_GET_FR(port);
+
+ mctrl = (uart_flag&UART_FR_CTS) ?TIOCM_CTS : 0;
+ mctrl |= (zup->sigs & TIOCM_RTS);
+ mctrl |= (uart_flag&UART_FR_DCD) ? TIOCM_CD : 0;
+ mctrl |= (uart_flag&UART_FR_DSR) ? TIOCM_DSR : 0;
+ mctrl |= (uart_flag&UART_FR_RI) ? TIOCM_RI : 0;
+
+ return mctrl;
+}
+
+/****************************************************************************/
+static void zx29_uart_start_tx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned int control = 0;
+ unsigned int reg_bak[10] = {0};
+ struct circ_buf *xmit = &zup->port.state->xmit;
+ int count = 0;
+#if VEHICLE_USE_ONE_UART_LOG
+ if((port->line == DEBUG_CONSOLE))
+ {
+ if(g_core_id_occupy_uart == SYMB_PS_CORE_ID){
+ #if 1
+ count = uart_circ_chars_pending(xmit);
+ while(count-- > 0)
+ {
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ if (uart_circ_empty(xmit))
+ break;
+ }
+ #endif
+ return;
+ }
+ count = uart_circ_chars_pending(xmit);
+ while(count-- > 0)
+ {
+ zx29_uart_console_putc(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit)){
+ break;
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ {
+ spin_lock(&zup->port.lock);
+ tasklet_schedule(&zup->write_wakeup);
+ spin_unlock(&zup->port.lock);
+ return;
+ }
+ return;
+ }
+else
+#endif
+{
+ if(!(UART_GET_RIS(port)&UART_TXIS) && (UART_GET_FR(port) & UART_FR_TXFE))
+ {
+ if(!(UART_GET_RIS(port)&UART_TXIS))
+ {
+ count = uart_circ_chars_pending(xmit);
+ if(count >= zup->port.fifosize)
+ count = 15;//sent data more than TX ifls, TXIS will coming soon
+ if(count != 0){
+ do {
+ zx29_uart_putc(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit) || (UART_GET_RIS(port)&UART_TXIS))
+ break;
+ } while (--count > 0);
+ }
+ }
+
+ }
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ if(!uart_console(port))
+ {
+ if (!zx29_dma_tx_start(zup))
+ {
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+ if(!(UART_GET_RIS(port)&UART_TXIS)){
+ if((UART_GET_FR(port) & UART_FR_TXFF))
+ return;
+ count = uart_circ_chars_pending(xmit);
+ while (count > 0) {
+ UART_PUT_CHAR(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit) || (UART_GET_RIS(port)&UART_TXIS) ||
+ (UART_GET_FR(port) & UART_FR_TXFF))
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+ }
+#else
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+#endif
+}
+
+static void uart_write_wakeup_task(unsigned long _port)
+{
+ struct uart_port *port = (void *)_port;
+struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ struct platform_device *pdev=port->private_data;
+ //printk("wakeup_task,port:%d, rxd_wakeup:%d\n", port->line, zup->rxd_wakeup);
+
+ if(zup->rxd_wakeup){
+ //rxd wake
+ printk("wakeup_task,port:%d, rxd_wakeup:%d\n", port->line, zup->rxd_wakeup);
+ pm_wakeup_dev_event(&pdev->dev, 5000, false);
+ disable_irq(zup->rxd_irq);
+ zup->rxd_wakeup = false;
+ } else {
+ uart_write_wakeup(port);
+ }
+
+}
+
+#if CONFIG_SERIAL_ZX29_DMA
+int dma_complete_thread_use_dma_cyclic(void *ptr)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)ptr;
+ size_t pending;
+ int dma_count = 0;
+ struct device *dev = NULL;
+ dev = zup->dmarx.chan->device->dev;
+ int uart_id = zup->port.line;
+ while(down_interruptible(&zup->sema_cyclic) == 0)
+ {
+ if(zup->port_close || !uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_th].dma_addr)
+ break;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ uart_dma_cycle[uart_id].cnt_th_total++;
+ uart_dma_cycle[uart_id].cnt_th++;
+ zup->sg2tty = &uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_th];
+ zup->sg2tty_len = 4096;
+ pending = zup->sg2tty_len;
+ if(uart_dma_cycle[uart_id].flg_be || uart_dma_cycle[uart_id].flg_fe|| uart_dma_cycle[uart_id].flg_pe){
+ printk("error in uart%d: fe %u ,be %u pe %u.\n",zup->port.line,zup->port.icount.frame,
+ zup->port.icount.brk,zup->port.icount.parity);
+ uart_dma_cycle[uart_id].flg_be = 0;
+ uart_dma_cycle[uart_id].flg_fe = 0;
+ uart_dma_cycle[uart_id].flg_pe = 0;
+ }
+ dma_sync_sg_for_cpu(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ zup->sg2tty->buf, pending);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock_irqsave(&zup->port.lock, flags);
+ dma_sync_sg_for_device(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ uart_dma_cycle[uart_id].flg_enter_th = (uart_dma_cycle[uart_id].flg_enter_th+1)%UART_DMA_CYCLE_RX_CONFIG_COUNT;
+ uart_dma_cycle[uart_id].cnt_callback--;
+ if(!hrtimer_active(&zup->rx_dma_hrtimer))
+ hrtimer_restart(&zup->rx_dma_hrtimer);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ }
+ return 0;
+}
+int dma_complete_thread(void *ptr)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)ptr;
+
+ size_t pending;
+ struct dma_tx_state state;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ bool lastbuf;
+ int dma_count = 0;
+ struct zx29_sgbuf *sgbuf = NULL;
+ struct device *dev = NULL;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ dev = zup->dmarx.chan->device->dev;
+
+ while(down_interruptible(&zup->sema) == 0)
+ {
+ if(zup->port_close)
+ break;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ // tty = zup->port.state->port.tty;
+ if(!zup->sg2tty)
+ panic("dma_complete_thread, buffer 2 tty is invalid\n");
+ // dev = zup->dmarx.chan->device->dev;
+ pending = zup->sg2tty_len;
+ if(zx29_dma_rx_running(zup)){
+
+ test_uart_static(zup->port.line, NULL, 0, 10);
+ //uart_mod_timer(zup, &flags);
+ if(!hrtimer_active(&zup->rx_dma_hrtimer))
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ }
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+ dma_sync_sg_for_cpu(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+ //BUG();
+
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ zup->sg2tty->buf, pending);
+ test_uart_static(zup->port.line, zup->sg2tty->buf, pending, 11);
+ tty_flip_buffer_push(&zup->port.state->port);
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->port.icount.rx += dma_count;
+
+ //if(zup->port.line == 0)
+ //printk("yanming dma_complete_thread, dma2tty:%d\n", dma_count);
+ if (dma_count < pending){
+ sc_debug_info_record(MODULE_ID_CAP_UART, "uart%d couldn't insert all characters \n",zup->port.line);
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+
+ }
+#if 0
+ zup->work_state = false;
+ zup->pre_pending = 0;
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+#endif
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ }
+
+ return 0;
+}
+#endif
+
+/****************************************************************************/
+static void zx29_uart_stop_tx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->imr &= ~UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+#ifdef CONFIG_SERIAL_ZX29_UART_CONSOLE
+ if((port->line == DEBUG_CONSOLE) && uart_tx_stopped(port))
+ {
+ //uart_write_wakeup(port);
+ tasklet_schedule(&zup->write_wakeup);
+ }
+#endif
+
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_tx_stop(zup);
+#endif
+
+ zx_cpuidle_set_free(IDLE_FLAG_UART);
+
+}
+
+/****************************************************************************/
+void zx29_uart_stop_rx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+
+ zup->imr &= ~(UART_RXIM|UART_RTIM|UART_FEIM|UART_PEIM|UART_BEIM|UART_OEIM);
+ UART_PUT_IMSC(port, zup->imr);
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_rx_stop(zup);
+#endif
+}
+
+/****************************************************************************/
+static void zx29_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ unsigned int lcr_h;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ lcr_h = UART_GET_LCRH(port);
+ if (break_state == -1)
+ lcr_h |= UART_LCRH_BRK;
+ else
+ lcr_h &= ~UART_LCRH_BRK;
+ UART_PUT_LCRH(port, lcr_h);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+}
+
+/****************************************************************************/
+static void zx29_uart_enable_ms(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->imr |= UART_RIMIM|UART_CTSMIM|UART_DCDMIM|UART_DSRMIM;
+ UART_PUT_IMSC(port, zup->imr);
+}
+
+/****************************************************************************/
+/*--------------------------------------------------------------------
+ * Reads up to 256 characters from the FIFO or until it's empty and
+ * inserts them into the TTY layer. Returns the number of characters
+ * read from the FIFO.
+ --------------------------------------------------------------------*/
+static int zx29_uart_fifo_to_tty(struct zx29_uart_port *zup)
+{
+ struct uart_port *port = &zup->port;
+ u32 status, ch, i = 0;
+ unsigned int flag, max_count = 256;
+ int fifotaken = 0;
+ u8 uart_poll_char[16] ={0};
+
+ while (max_count--) {
+ status = UART_GET_FR(port);
+ if (status & UART_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = UART_GET_CHAR(port) | UART_DUMMY_DR_RX;
+
+#if 0
+ if(g_console_open_flag == 0 &&
+ port->line == DEBUG_CONSOLE){
+ if((ch&0xff) == 't'){
+ memset(uart_poll_char, 0, sizeof(uart_poll_char));
+ uart_poll_char[0] = 't';
+ i = 0;
+ printk("ch = %c i = %d\n",ch,i);
+ }else if ((ch&0xff) == 'y' && (i == 1)){
+ uart_poll_char[1] = 'y';
+ printk("ch = %c i = %d\n",ch,i);
+ }else if ((ch&0xff) == 'o' && (i == 2)){
+ uart_poll_char[2] = 'o';
+ printk("ch = %c i = %d\n",ch,i);
+ }else if ((ch&0xff) == 'p' && (i == 3)){
+ uart_poll_char[3] = 'p';
+ printk("ch = %c i = %d\n",ch,i);
+
+ }else if ((ch&0xff) == 'e' && (i == 4)){
+ uart_poll_char[4] = 'e';
+ printk("ch = %c i = %d\n",ch,i);
+
+ }else if ((ch&0xff) == 'n' && (i == 5)){
+ uart_poll_char[5] = 'n';
+ printk("ch = %c i = %d\n",ch,i);
+ g_console_open_flag = 1;
+ printk("ch = %c i = %d,g_console_open_flag:%d\n",ch,i,g_console_open_flag);
+ }else {
+ i = 10;
+ }
+ i++;
+ }
+#endif
+ flag = TTY_NORMAL;
+ if(zup->autobaud_state == UART_PORT_AUTOBAUD_ON)
+ {
+ if(zup->port.icount.rx < UART_PORT_AUTOBAUD_BYTE)
+ {
+ uart_port_autobaud_buffer[zup->port.icount.rx] = ch;
+ }
+ else
+ {
+ uart_port_autobaud_gtflag = 1 ;
+ }
+ }
+ zup->port.icount.rx++;
+ if(zup->autobaud_state == UART_PORT_AUTOBAUD_OFF)
+ {
+ if(fifotaken < 16){
+ uart_poll_char[fifotaken] = ch & 0xFF;
+ }
+ fifotaken++;
+
+ if (unlikely(ch & UART_DR_ERROR)) {
+ if (ch & UART_DR_BE) {
+ ch &= ~(UART_DR_FE | UART_DR_PE);
+ zup->port.icount.brk++;
+ if (uart_handle_break(&zup->port))
+ continue;
+ } else if (ch & UART_DR_PE)
+ zup->port.icount.parity++;
+ else if (ch & UART_DR_FE)
+ zup->port.icount.frame++;
+ else if (ch & UART_DR_OE){
+ zup->port.icount.overrun++;
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ ch &= zup->port.read_status_mask;
+
+ if (ch & UART_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART_DR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&zup->port, ch & 255))
+ continue;
+ if(g_console_open_flag || port->line != DEBUG_CONSOLE){
+ uart_insert_char(&zup->port, ch, UART_DR_OE, ch, flag);
+ }
+ }
+ }
+
+ test_uart_static(zup->port.line, uart_poll_char, fifotaken, 3);
+
+ return fifotaken;
+}
+
+/****************************************************************************/
+static void zx29_uart_rx_chars(struct zx29_uart_port *zup)
+{
+ unsigned long flags;
+
+ //struct tty_struct *tty = zup->port.state->port.tty;
+
+ zx29_uart_fifo_to_tty(zup);
+ spin_unlock(&zup->port.lock);
+
+ tty_flip_buffer_push(&zup->port.state->port);
+
+#if CONFIG_SERIAL_ZX29_DMA
+ if(!uart_console(&zup->port)){//console doesn't use dma rcv data
+ if (zx29_dma_rx_available(zup)) {
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ dev_dbg(zup->port.dev, "could not trigger RX DMA job "
+ "fall back to interrupt mode again\n");
+ zup->imr |= UART_RXIM;
+ } else{
+ zup->imr &= ~UART_RXIM;
+ }
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }
+ }
+#endif
+RX_END:
+ spin_lock(&zup->port.lock);
+
+}
+
+/****************************************************************************/
+static void zx29_uart_tx_chars(struct zx29_uart_port *zup)
+{
+ struct circ_buf *xmit = &zup->port.state->xmit;
+ unsigned long flags;
+ int count;
+
+ if (zup->port.x_char) {
+ UART_PUT_CHAR(&zup->port, zup->port.x_char);
+ zup->port.icount.tx++;
+ zup->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&zup->port)) {
+ zx29_uart_stop_tx(&zup->port);
+ return;
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ /* If we are using DMA mode, try to send some characters. */
+ if(!uart_console(&(zup->port)))
+ {
+ if (zx29_dma_tx_irq(zup))
+ return;
+ }
+#endif
+ count = zup->port.fifosize >> 1;
+ do {
+ zx29_uart_putc(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ {
+ spin_unlock(&zup->port.lock);
+ //uart_write_wakeup(&zup->port);
+ tasklet_schedule(&zup->write_wakeup);
+ spin_lock(&zup->port.lock);
+ }
+
+ if (uart_circ_empty(xmit))
+ zx29_uart_stop_tx(&zup->port);
+}
+
+#if CONFIG_SERIAL_ZX29_DMA
+
+dma_peripheral_id uart_get_rx_dma_peripheral_id(struct zx29_uart_port *zup)
+{
+ struct uart_port *port = &zup->port;
+ if(port->line < UART0 || port->line > UART4){
+ printk("get_rx_dma_peripheral_id,fail, invalid port->line:%d\n", port->line);
+ }
+ if(port->line == UART0){
+ return DMA_CH_UART0_RX;
+ } else if(port->line == UART1){
+ return DMA_CH_UART1_RX;
+ }else if(port->line == UART2){
+ return DMA_CH_UART2_RX;
+ }
+// else if(port->line == UART3){
+// return DMA_CH_UART3_RX;
+// }else if(port->line == UART4){
+// return DMA_CH_UART4_RX;
+// }
+
+ return DMA_CH_NUM;
+}
+
+/*
+ * We received a transmit interrupt without a pending X-char but with
+ * pending characters.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * false if we want to use PIO to transmit
+ * true if we queued a DMA buffer
+ */
+static bool zx29_dma_tx_irq(struct zx29_uart_port *zup)
+{
+ if (!zup->using_tx_dma)
+ return false;
+
+ /*
+ * If we already have a TX buffer queued, but received a
+ * TX interrupt, it will be because we've just sent an X-char.
+ * Ensure the TX DMA is enabled and the TX IRQ is disabled.
+ */
+ if (zup->dmatx.queued) {
+ zup->dmacr |= UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ zup->imr &= ~UART_TXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ return true;
+ }
+
+ /*
+ * We don't have a TX buffer queued, so try to queue one.
+ * If we successfully queued a buffer, mask the TX IRQ.
+ */
+ if (zx29_uart_dma_tx_chars(zup) > 0) {
+ zup->imr &= ~UART_TXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * Stop the DMA transmit (eg, due to received XOFF).
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void zx29_dma_tx_stop(struct zx29_uart_port *zup)
+{
+ if (zup->dmatx.queued) {
+ zup->dmacr &= ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ }
+}
+
+
+/*
+ * Try to start a DMA transmit, or in the case of an XON/OFF
+ * character queued for send, try to get that character out ASAP.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * false if we want the TX IRQ to be enabled
+ * true if we have a buffer queued
+ */
+static inline bool zx29_dma_tx_start(struct zx29_uart_port *zup)
+{
+ u16 dmacr;
+
+ if (!zup->using_tx_dma)
+ return false;
+
+ if (!zup->port.x_char) {
+ /* no X-char, try to push chars out in DMA mode */
+ bool ret = true;
+
+ if (!zup->dmatx.queued) {
+ if (zx29_uart_dma_tx_chars(zup) > 0) {
+ zup->imr &= ~UART_TXIM;
+ ret = true;
+ } else {
+ zup->imr |= UART_TXIM;
+ ret = false;
+ }
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ } else if (!(zup->dmacr & UART_TXDMAE)) {
+ zup->dmacr |= UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ }
+ return ret;
+ }
+
+ /*
+ * We have an X-char to send. Disable DMA to prevent it loading
+ * the TX fifo, and then see if we can stuff it into the FIFO.
+ */
+ dmacr = zup->dmacr;
+ zup->dmacr &= ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+
+ if (UART_GET_FR(&zup->port) & UART_FR_TXFF) {
+ /*
+ * No space in the FIFO, so enable the transmit interrupt
+ * so we know when there is space. Note that once we've
+ * loaded the character, we should just re-enable DMA.
+ */
+ return false;
+ }
+
+ UART_PUT_CHAR(&zup->port, zup->port.x_char);
+ //writew(uap->port.x_char, uap->port.membase + UART01x_DR);
+ zup->port.icount.tx++;
+ zup->port.x_char = 0;
+
+ /* Success - restore the DMA state */
+ zup->dmacr = dmacr;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ //writew(dmacr, uap->port.membase + UART011_DMACR);
+
+ return true;
+}
+
+/****************************************************************************/
+
+//#if CONFIG_SERIAL_ZX29_DMA
+/*
+ * Flush the transmit buffer.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static void zx29_dma_flush_buffer(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)port;
+ if (!zup->using_tx_dma)
+ return;
+
+ /* Avoid deadlock with the DMA engine callback */
+ //dmaengine_terminate_all(zup->dmatx.chan);
+ if (zup->dmatx.queued) {
+
+ //printk(KERN_INFO "zx29_dma_flush_buffer enter[%s][%d] Port[%d]\n",__func__,__LINE__,port->line);
+ dma_unmap_sg(zup->dmatx.chan->device->dev, &zup->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ zup->dmatx.queued = false;
+ zup->dmacr &= ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ }
+}
+
+static int zx29_dma_rx_trigger_dma(struct zx29_uart_port *zup)
+{
+ struct dma_chan *rxchan = zup->dmarx.chan;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ struct zx29_sgbuf *sgbuf;
+
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ if (!rxchan)
+ {
+ printk("[%s][%d]\n",__func__,__LINE__);
+ return -EIO;
+ }
+
+ /* Start the RX DMA job */
+
+ sgbuf = zup->dmarx.use_buf_b ?
+ &zup->dmarx.sgbuf_b : &zup->dmarx.sgbuf_a;
+ /*
+
+ sgbuf = zup->dmarx.use_buf_b ?
+ &zup->dmarx.sgbuf_a : &zup->dmarx.sgbuf_b;
+ */
+ zup->dmarx.rx_def[zup->dmarx.rx_index].link_addr=0;
+ zup->dmarx.rx_def[zup->dmarx.rx_index].dest_addr=(unsigned int)(sgbuf->dma_addr);
+ zup->dmarx.rx_def[zup->dmarx.rx_index].count=ZX29_DMA_BUFFER_SIZE;//fifo or max buffer?
+ wmb();
+
+ dmaengine_slave_config(rxchan, (struct dma_slave_config*)&zup->dmarx.rx_def[zup->dmarx.rx_index]);
+ desc = rxchan->device->device_prep_interleaved_dma(rxchan,NULL,0);
+
+
+ /*
+ * If the DMA engine is busy and cannot prepare a
+ * channel, no big deal, the driver will fall back
+ * to interrupt mode as a result of this error code.
+ */
+ if (!desc) {
+ printk(KERN_INFO "!!ERROR DESC !!![%s][%d]Port:[%d]\n",__func__,__LINE__,zup->port.line);
+ sc_debug_info_record(MODULE_ID_CAP_UART, "uart%d ERROR DESC \n",zup->port.line);
+ zup->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ //zx29_dma_force_stop(rx_id);
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = uart_dma_rx_callback;
+ desc->callback_param = zup;
+ zup->curr_sg = sgbuf;
+ wmb();
+
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+ atomic_inc(&zup->dmarx.count);
+ zup->dmarx.rx_index = (zup->dmarx.rx_index +1)%UART_DMA_RX_MAX_COUNT;
+ zup->dmacr |= UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ zup->dmarx.running = true;
+ zup->dmarx.used = true;
+ zup->imr &= ~(UART_RXIM | UART_RTIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+
+
+ return 0;
+}
+static int zx29_dma_rx_trigger_dma_use_dma_cyclic(struct zx29_uart_port *zup)
+{
+ struct dma_chan *rxchan = zup->dmarx.chan;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ int uart_id = zup->port.line;
+ if (!rxchan)
+ {
+ printk("[%s][%d]\n",__func__,__LINE__);
+ return -EIO;
+ }
+ dmaengine_slave_config(rxchan, (struct dma_slave_config*)&uart_dma_cycle[uart_id].rxdef);
+ desc = rxchan->device->device_prep_dma_cyclic(rxchan,NULL,(ZX29_DMA_BUFFER_SIZE *5) , ZX29_DMA_BUFFER_SIZE,0,0);
+ if (!desc) {
+ printk(KERN_INFO "!!ERROR DESC !!![%s][%d]Port:[%d]\n",__func__,__LINE__,zup->port.line);
+ zup->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+ desc->callback = uart_dma_rx_callback_use_dma_cyclic;
+ desc->callback_param = zup;
+ wmb();
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+ zup->dmacr |= UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ uart_dma_cycle[uart_id].flg_enter_th = 0;
+ zup->dmarx.running = true;
+ zup->dmarx.used = true;
+ zup->imr &= ~(UART_RXIM | UART_RTIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ return 0;
+}
+
+void uart_dma_rx_callback(void *data)
+{
+ unsigned long flags;
+
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)data;
+ int uart_id = zup->port.line;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct device *dev = NULL;
+// struct dma_tx_state state;
+ unsigned int ris_status;
+
+ bool lastbuf;
+ int dma_count = 0;
+ struct zx29_sgbuf *sgbuf = zup->curr_sg;
+ size_t pending;
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 4;
+ test_uart_static(zup->port.line, NULL, 0, 20);
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ zx29_dma_stop(rx_id);
+
+ dev = zup->dmarx.chan->device->dev;
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+
+ //spin_lock_irqsave(&zup->port.lock, flags);
+ zup->sg2tty = sgbuf;
+// rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ zup->sg2tty_len = zup->sg2tty->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //zx29_dma_force_stop(rx_id);
+ // dmaengine_terminate_all(rxchan);
+ dmarx->use_buf_b = ! dmarx->use_buf_b;
+ wmb();
+ //BUG_ON(pending > ZX29_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ //dmaengine_terminate_all(rxchan);
+
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ test_uart_static(zup->port.line, NULL, 0, 9);
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+
+ up(&zup->sema);
+}
+
+void uart_dma_rx_callback_use_dma_cyclic(void *data)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)data;
+ unsigned int ris_status;
+ int uart_id = zup->port.line;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ uart_dma_cycle[uart_id].cnt_callback_total++;
+ uart_dma_cycle[uart_id].cnt_callback++;
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ uart_dma_cycle[uart_id].flg_overrun = 1;
+ }
+ if(ris_status & UART_BEIS){
+ uart_dma_cycle[uart_id].flg_be = 1;
+ zup->port.icount.brk++;
+ }
+ if(ris_status & UART_PEIS){
+ uart_dma_cycle[uart_id].flg_pe = 1;
+ zup->port.icount.parity++;
+ }
+ if(ris_status & UART_FEIS){
+ uart_dma_cycle[uart_id].flg_fe = 1;
+ zup->port.icount.frame++;
+ }
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 26);
+ up(&zup->sema_cyclic);
+}
+static inline void zx29_dma_rx_stop(struct zx29_uart_port *zup)
+{
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ //zx29_dma_force_stop(rx_id);
+ //dmaengine_terminate_all(zup->dmarx.chan);
+ /* FIXME. Just disable the DMA enable */
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+#if 0
+ //do we need check data received?
+ if(zup->pre_pending){
+ printk("pre_pending :%d\n ", zup->pre_pending);
+ }
+#endif
+ zup->curr_sg = NULL;
+}
+
+static void zx29_dma_remove(struct zx29_uart_port *zup)
+{
+ /* TODO: remove the initcall if it has not yet executed */
+ if (zup->dmatx.chan)
+ dma_release_channel(zup->dmatx.chan);
+ if (zup->dmarx.chan)
+ dma_release_channel(zup->dmarx.chan);
+}
+
+
+static void zx29_dma_shutdown(struct zx29_uart_port *zup)
+{
+ unsigned long flags;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+
+ if (!(zup->using_tx_dma || zup->using_rx_dma))
+ return;
+ /* Disable RX and TX DMA */
+ while(UART_GET_FR(&zup->port) & (UART_FR_TXBUSY | UART_FR_TXBUSY))
+ barrier();
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ //zx29_dma_force_stop(rx_id);
+ // dmaengine_terminate_all(zup->dmarx.chan);
+ zup->dmacr &= ~(UART_DMAONERR | UART_RXDMAE | UART_TXDMAE);
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->curr_sg = NULL;
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ if (zup->using_tx_dma) {
+ /* In theory, this should already be done by zx29_dma_flush_buffer */
+ dmaengine_terminate_all(zup->dmatx.chan);
+ if (zup->dmatx.queued) {
+ dma_unmap_sg(zup->dmatx.chan->device->dev, &zup->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ zup->dmatx.queued = false;
+ }
+ if(!zup->dmatx.buf)
+ kfree(zup->dmatx.buf);
+ zup->dmatx.buf = NULL;
+ zup->using_tx_dma = false;
+ }
+ if (zup->using_rx_dma) {
+ //dmaengine_terminate_all(zup->dmarx.chan);
+ /* Clean up the RX DMA */
+ if(!zup->uart_power_mode){
+ zx29_sgbuf_free(zup->dmarx.chan, &zup->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+ zx29_sgbuf_free(zup->dmarx.chan, &zup->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ }else if(zup->uart_power_mode == 1){
+ uart_dma_cycle_deinit(zup);
+ }else
+ printk("uart%d dma shutdown fail.\n",zup->port.line);
+ zup->using_rx_dma = false;
+ zup->dmarx.used = false;
+ zup->dmarx.running = false;
+ zup->dmarx.use_buf_b = false;
+ zup->dmarx.rx_index = 0;
+ }
+ zup->pre_pending = 0;
+ zup->work_state = false;
+
+}
+
+static void zx29_shutdown_channel(struct zx29_uart_port *zup,
+ unsigned int lcrh)
+{
+ unsigned long val;
+
+ val = UART_GET_LCRH(&zup->port);
+ val &= ~(UART_LCRH_BRK | UART_LCRH_FEN);
+ UART_PUT_LCRH(&zup->port, val);
+}
+
+
+static inline bool zx29_dma_rx_available(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma;
+}
+
+static inline bool zx29_dma_rx_running(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma && zup->dmarx.running;
+}
+
+static inline bool zx29_dma_rx_used(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma && zup->dmarx.used;
+}
+
+static inline bool zx29_dma_rx_work_scheduled(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma && zup->work_state;
+}
+
+
+void uart_dma_tx_callback(void *data)
+{
+ struct zx29_uart_port *zup = data;
+ struct zx29_dmatx_data *dmatx = &zup->dmatx;
+
+ unsigned long flags;
+ u16 dmacr;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ if (zup->dmatx.queued)
+ dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+ DMA_TO_DEVICE);
+
+ dmacr = zup->dmacr;
+ zup->dmacr = dmacr & ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+
+ /*
+ * If TX DMA was disabled, it means that we've stopped the DMA for
+ * some reason (eg, XOFF received, or we want to send an X-char.)
+ *
+ * Note: we need to be careful here of a potential race between DMA
+ * and the rest of the driver - if the driver disables TX DMA while
+ * a TX buffer completing, we must update the tx queued status to
+ * get further refills (hence we check dmacr).
+ */
+ if (!(dmacr & UART_TXDMAE) || uart_tx_stopped(&zup->port) ||
+ uart_circ_empty(&zup->port.state->xmit)) {
+ zup->dmatx.queued = false;
+
+
+ zx_cpuidle_set_free(IDLE_FLAG_UART);
+
+
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return;
+ }
+
+ if (zx29_uart_dma_tx_chars(zup) <= 0) {
+ /*
+ * We didn't queue a DMA buffer for some reason, but we
+ * have data pending to be sent. Re-enable the TX IRQ.
+ */
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+}
+
+static int zx29_uart_dma_tx_chars(struct zx29_uart_port *zup)
+{
+ struct zx29_dmatx_data *dmatx = &zup->dmatx;
+ struct dma_chan *tx_chan = dmatx->chan;
+ struct dma_device *dma_dev = tx_chan->device;
+ struct dma_async_tx_descriptor *desc;
+ struct circ_buf *xmit = &zup->port.state->xmit;
+ unsigned int count;
+
+ /*
+ * Try to avoid the overhead involved in using DMA if the
+ * transaction fits in the first half of the FIFO, by using
+ * the standard interrupt handling. This ensures that we
+ * issue a uart_write_wakeup() at the appropriate time.
+ */
+
+ count = uart_circ_chars_pending(xmit);
+ if (count < (16 >> 1)) {
+ zup->dmatx.queued = false;
+ return 0;
+ }
+
+ if (xmit->tail < xmit->head)
+ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
+ else {
+ size_t first = UART_XMIT_SIZE - xmit->tail;
+ size_t second ;//= xmit->head;
+
+ if (first > count)
+ first = count;
+ second = count - first;
+ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
+ if (second)
+ memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+ }
+ dmatx->sg.length = count;
+
+ if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ zup->dmatx.queued = false;
+ dev_dbg(zup->port.dev, "unable to map TX DMA\n");
+ return -EBUSY;
+ }
+
+
+ zup->dmatx.tx_def.link_addr=0;
+ zup->dmatx.tx_def.src_addr=(unsigned int)(dmatx->sg.dma_address);
+ zup->dmatx.tx_def.count=count;
+ wmb();
+ dmaengine_slave_config(tx_chan, (struct dma_slave_config*)&zup->dmatx.tx_def);
+ desc = tx_chan->device->device_prep_interleaved_dma(tx_chan,NULL,0);
+
+ if (!desc) {
+ printk(KERN_INFO "!!!!!ERROR TX DESC[%s][%d]\n",__func__,__LINE__);
+ dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ zup->dmatx.queued = false;
+ /*
+ * If DMA cannot be used right now, we complete this
+ * transaction via IRQ and let the TTY layer retry.
+ */
+ dev_dbg(zup->port.dev, "TX DMA busy\n");
+ return -EBUSY;
+ }
+ desc->callback = (dma_async_tx_callback)uart_dma_tx_callback;
+ desc->callback_param = (void *)zup;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(tx_chan);
+ atomic_inc(&zup->dmatx.count);
+ zup->dmacr |= UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zup->dmatx.queued = true;
+
+ /*
+ * Now we know that DMA will fire, so advance the ring buffer
+ * with the stuff we just dispatched.
+ */
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx += count;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ //uart_write_wakeup(&zup->port);
+ tasklet_schedule(&zup->write_wakeup);
+
+ return 1;
+}
+
+static void zx29_uart_dma_rx_chars(struct zx29_uart_port *zup,
+ //u32 pending, bool use_buf_b,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ bool readfifo, unsigned long *flags)
+{
+ struct tty_struct *tty = zup->port.state->port.tty;
+#if 0
+ struct zx29_sgbuf *sgbuf = use_buf_b ?
+ &zup->dmarx.sgbuf_b : &zup->dmarx.sgbuf_a;
+#endif
+ struct device *dev = zup->dmarx.chan->device->dev;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+ //unsigned long flags;
+
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ /*
+ * First take all chars in the DMA pipe, then look in the FIFO.
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ sgbuf->buf, pending);
+
+ test_uart_static(zup->port.line, sgbuf->buf, pending, 6);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+ * been taken first.
+ */
+ //if (dma_count == pending && readfifo) {
+ if (readfifo) {
+ /* Clear any error flags */
+ //UART_PUT_ICR(&zup->port,UART_OEIC | UART_BEIC | UART_PEIC | UART_FEIC);
+ /*
+ * If we read all the DMA'd characters, and we had an
+ * incomplete buffer, that could be due to an rx error, or
+ * maybe we just timed out. Read any pending chars and check
+ * the error status.
+ *
+ * Error conditions will only occur in the FIFO, these will
+ * trigger an immediate interrupt and stop the DMA job, so we
+ * will always find the error in the FIFO, never in the DMA
+ * buffer.
+ */
+ test_uart_static(zup->port.line, NULL, 0, 7);
+ fifotaken = zx29_uart_fifo_to_tty(zup);
+ }
+ if((pending > 0) || (fifotaken > 0)) {
+ spin_unlock(&zup->port.lock);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock(&zup->port.lock);
+ }
+}
+static void zx29_uart_deal_dma_fifo_rx_chars_cyclic(struct zx29_uart_port *zup,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ unsigned long *flags, char *fifo_buf, int fifo_len)
+{
+ struct tty_struct *tty = zup->port.state->port.tty;
+ struct device *dev = zup->dmarx.chan->device->dev;
+ int dma_count = 0;
+ int fifo_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+ if ((pending) && (pending != 4096)) {
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ sgbuf->buf, pending);
+ test_uart_static(zup->port.line, sgbuf->buf, pending, 6);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+ if(fifo_len){
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ fifo_count = tty_insert_flip_string(&zup->port.state->port,
+ fifo_buf, fifo_len);
+ fifo_buf[0] = '\0';
+ fifo_buf[1] = '\0';
+ fifo_buf[2] = '\0';
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ }
+ zup->port.icount.rx += fifo_count;
+ if(((pending) && (pending != 4096)) || (fifo_len > 0)){
+ spin_unlock(&zup->port.lock);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock(&zup->port.lock);
+ }
+}
+
+static void zx29_uart_deal_dma_fifo_rx_chars(struct zx29_uart_port *zup,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ unsigned long *flags, char *fifo_buf, int fifo_len)
+{
+ struct tty_struct *tty = zup->port.state->port.tty;
+
+ struct device *dev = zup->dmarx.chan->device->dev;
+ int dma_count = 0;
+ int fifo_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+ if (pending) {
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ sgbuf->buf, pending);
+ test_uart_static(zup->port.line, sgbuf->buf, pending, 6);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ if(fifo_len){
+ //printk("qq >> fifo len %d.\n",fifo_len);
+ fifo_count = tty_insert_flip_string(&zup->port.state->port,
+ fifo_buf, fifo_len);
+ //printk("qq >>fifo count %d,buf is %x %x %x .\n",fifo_count, fifo_buf[0],fifo_buf[1],fifo_buf[2]);
+ fifo_buf[0] = '\0';
+ fifo_buf[1] = '\0';
+ fifo_buf[2] = '\0';
+ //memset(fifo_buf, '\0', 4);
+ }
+
+ zup->port.icount.rx += fifo_count;
+ test_uart_static(zup->port.line, fifo_buf, fifo_count, 18);
+ if(pending > 0 || (fifo_len > 0)){
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ }
+}
+
+#if 0
+static void zx29_dma_rx_irq(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct zx29_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+
+ uint32_t ris_status = UART_GET_RIS(&zup->port);
+ //printk("rx irq\n");
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+
+ if(zx29_dma_rx_running(zup)){
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the block, else we may
+ * overflow the FIFO.
+ */
+ // if(zx29_dma_stop(rx_id))
+ // printk( "uart%d unable to pause DMA transfer\n", zup->port.line);
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ //zx29_dma_force_stop(rx_id);
+ //dmaengine_terminate_all(rxchan);
+
+ //dmastat = zx29_dma_get_status();//Normally,this value is insignificance.
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);//state.residue;
+ BUG_ON(pending > ZX29_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ //dmaengine_terminate_all(rxchan);
+
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, false, flags);
+ }
+
+ /* Switch buffer & re-trigger DMA job */
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("uart%d could not retrigger RX DMA job\n",zup->port.line);
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ }
+#if RX_DMA_WORK
+ //printk("add timer\n");
+ else{
+ // mod_timer(&(zup->rx_dma_timer), jiffies + msecs_to_jiffies(RX_DMA_TIMEOUT));
+ uart_mod_timer(zup, flags);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+#endif
+
+}
+#endif
+/****************************************************************************/
+static void zx29_uart_rx_dma_chars(struct zx29_uart_port *zup, unsigned long *flags)
+{
+
+ struct tty_struct *tty = zup->port.state->port.tty;
+ //zx29_uart_fifo_to_tty(zup);
+// spin_unlock(&zup->port.lock);
+ if (zx29_dma_rx_available(zup)) {
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM | UART_RTIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }
+#if RX_DMA_WORK
+//printk("add timer\n");
+ else{
+ //mod_timer(&(zup->rx_dma_timer), jiffies + msecs_to_jiffies(RX_DMA_TIMEOUT));
+ uart_mod_timer(zup, flags);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+#endif
+ }
+
+ //tty_flip_buffer_push(tty);
+ //spin_lock(&zup->port.lock);
+}
+
+
+/****************************************************************************/
+static void zx29_uart_rx_timeout_chars(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ int rt_cnt = 0;
+// unsigned long flags;
+
+ int fr = UART_GET_FR(&zup->port);
+ //printk("rx_timeout_chars\n");
+
+ rt_cnt = zx29_uart_fifo_to_tty(zup);
+ if(rt_cnt){
+ if(g_console_open_flag == 1 || zup->port.line != DEBUG_CONSOLE){
+ spin_unlock(&zup->port.lock);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock(&zup->port.lock);
+ }
+ }
+}
+
+static void zx29_uart_rt_dma(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct zx29_sgbuf *sgbuf = zup->curr_sg;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ uint32_t ris_status = UART_GET_RIS(&zup->port);
+
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //printk("---zx29_uart_rt_dma, pending:%d, residue:%d\n", pending, state.residue);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ // if(!uart_console(&zup->port))
+ //BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+
+ if(zx29_dma_rx_running(zup)){
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the block, else we may
+ * overflow the FIFO.
+ */
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ //printk( "uart%d unable to pause DMA transfer\n", zup->port.line);
+ //dmastat = rxchan->device->device_tx_status(rxchan,
+ // dmarx->cookie, &state);
+ // dmastat = zx29_dma_get_status();//Normally,this value is insignificance.
+
+ //zx29_dma_force_stop(rx_id);
+ //dmaengine_terminate_all(rxchan);
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ zup->curr_sg = zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);//state.residue;
+
+ //printk("---zx29_uart_rt_dma, after stop pending:%d, residue:%d\n", pending, state.residue);
+ BUG_ON(pending > ZX29_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ //dmaengine_terminate_all(rxchan);
+
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ test_uart_static(zup->port.line, NULL, 0, 5);
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, true, flags);
+ }
+
+#if 0
+//printk("rt dma\n");
+ /* Switch buffer & re-trigger DMA job */
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("zx29_dma_rx_trigger_dma fail,uart:%d\n", zup->port.line);
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ }
+#if RX_DMA_WORK
+ //printk("add timer\n");
+ else{
+ //mod_timer(&(zup->rx_dma_timer), jiffies + msecs_to_jiffies(RX_DMA_TIMEOUT));
+ uart_mod_timer(zup, flags);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ zup->dmarx.used = true;
+ }
+#endif
+
+#endif
+
+}
+char g_fifo_residue_buf[5][4];
+char g_fifo_residue_all[5][20];
+unsigned char g_fifo_cnt[5];
+static void zx29_uart_rx_dma_timeout(struct timer_list *t)
+{
+ struct zx29_uart_port *zup = from_timer(zup, t, rx_dma_timer);
+
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ static bool dma_timeout_flag = false;
+ size_t pending, tmp_len;
+ uint32_t ris_status = 0;
+ int cancel_timer = 0;
+ int sg_idx = (dmarx->use_buf_b ? 1 : 0);
+
+ unsigned long flags;
+ struct zx29_sgbuf *sgbuf = NULL;
+ int uart_id = zup->port.line;
+ if(!zx29_dma_rx_running(zup))
+ //printk("---uart_rx_dma_timeout enter, dma stopped\n");
+ return;
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ if(zup->port_close || (zup->curr_sg == NULL)){
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return;
+ }
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ if(zup->sg2tty) {//dma complete now, later check again
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 14);
+ mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ return;
+ }
+ sgbuf = zup->curr_sg;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //printk("---uart_rx_dma_timeout enter,sg.length:%d, pending:%d, state.residue:%d\n", sgbuf->sg.length, pending, state.residue);
+ if(pending == zup->pre_pending){
+ int fr = UART_GET_FR(&zup->port);
+ //if RXBUSY,means data come again
+
+ if((fr & UART_FR_RXBUSY)){
+
+ uart_mod_timer(zup, &flags);
+ test_uart_static(zup->port.line, NULL, 0, 12);
+ goto deal_end;
+
+ }
+
+ ris_status = UART_GET_RIS(&zup->port);
+
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 1;
+ test_uart_static(zup->port.line, NULL, 0, 19);
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ tmp_len = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(tmp_len != pending){
+ pending = tmp_len;
+ }
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ if(zup->uart_power_mode){
+ int i;
+ for(i= 0;i < 3;i++){
+ fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXFE) == 0){
+ g_fifo_residue_buf[uart_id][i] = UART_GET_CHAR(&zup->port) | UART_DUMMY_DR_RX;
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++] = g_fifo_residue_buf[uart_id][i];
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ else
+ break;
+ }
+ if(i){
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++]=i;
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+
+ //zup->sg2tty = sgbuf;
+ //when app ctrl sleep ,always start dma receive
+ if(zup->sleep_state == 0){
+ //now start dma again
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ uart_mod_timer(zup, &flags);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ }
+ if(pending || (i > 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_deal_dma_fifo_rx_chars(zup, pending, sgbuf, &flags, g_fifo_residue_buf[uart_id],i);
+ }
+
+ }else{
+ //for normal mode, dma start only on rx busy after timeout came
+ if(pending || (( fr & UART_FR_RXFE) == 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, true, &flags);
+ }
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ zup->pre_pending = 0;
+ zup->work_state = false;
+ if((UART_GET_RIS(&zup->port) & (UART_RXIS | UART_RTIS)) ||
+ (UART_GET_FR(&zup->port) & UART_FR_RXBUSY)){
+ zup->imr &= ~(UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ uart_mod_timer(zup, &flags);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ UART_PUT_ICR(&zup->port,(UART_RTIS|UART_RXIS));
+ }
+ }
+
+ }
+deal_end:
+
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ }else{
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ zup->pre_pending = pending;
+ mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ //uart_mod_timer(zup, &flags);
+ }
+
+
+}
+enum hrtimer_restart zx29_uart_rx_dma_hrtimeout(struct hrtimer *t)
+{
+ struct zx29_uart_port *zup = from_timer(zup, t, rx_dma_hrtimer);
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ static bool dma_timeout_flag = false;
+ size_t pending, tmp_len;
+ uint32_t ris_status = 0;
+ int cancel_timer = 0;
+ int sg_idx = (dmarx->use_buf_b ? 1 : 0);
+ int uart_id = zup->port.line;
+ unsigned long flags;
+ struct zx29_sgbuf *sgbuf = NULL;
+ if(!zx29_dma_rx_running(zup))
+ return HRTIMER_NORESTART;
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ if(zup->port_close || (zup->curr_sg == NULL)){
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_NORESTART;
+ }
+ if(zup->sg2tty) {//dma complete now, later check again
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 14);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ if(zup->enter_suspend){
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 15);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ sgbuf = zup->curr_sg;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if((pending == zup->pre_pending)) {
+ int fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXBUSY)){
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, 0, 12);
+ goto deal_end;
+ }
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 1;
+ test_uart_static(zup->port.line, NULL, 0, 19);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ tmp_len = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(tmp_len != pending){
+ pending = tmp_len;
+ }
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ if(zup->uart_power_mode){
+ int i;
+ for(i= 0;i < 3;i++){
+ fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXFE) == 0){
+ g_fifo_residue_buf[uart_id][i] = UART_GET_CHAR(&zup->port) | UART_DUMMY_DR_RX;
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++] = g_fifo_residue_buf[uart_id][i];
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ else
+ break;
+ }
+ if(i){
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++]=i;
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ if(zup->sleep_state == 0){
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ }
+ if(pending || (i > 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_deal_dma_fifo_rx_chars(zup, pending, sgbuf, &flags, g_fifo_residue_buf[uart_id],i);
+ }
+ }else{
+ if(pending || (( fr & UART_FR_RXFE) == 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, true, &flags);
+ printk("at pending %d.\n",pending);
+ }
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ zup->pre_pending = 0;
+ zup->work_state = false;
+ if((UART_GET_RIS(&zup->port) & (UART_RXIS | UART_RTIS)) ||
+ (UART_GET_FR(&zup->port) & UART_FR_RXBUSY)){
+ zup->imr &= ~(UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ UART_PUT_ICR(&zup->port,(UART_RTIS|UART_RXIS));
+ }
+ }
+ }
+deal_end:
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }else{
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ zup->pre_pending = pending;
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, zup->pre_pending, 22);
+ return HRTIMER_RESTART;
+ }
+}
+enum hrtimer_restart zx29_uart_rx_dma_hrtimeout_cyclic(struct hrtimer *t)
+{
+ struct zx29_uart_port *zup = from_timer(zup, t, rx_dma_hrtimer);
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ size_t pending, tmp_len;
+ uint32_t ris_status = 0;
+ unsigned long flags;
+ struct zx29_sgbuf *sgbuf = NULL;
+ int uart_id = zup->port.line;
+ if(!zx29_dma_rx_running(zup))
+ return HRTIMER_NORESTART;
+ if(uart_dma_cycle[uart_id].cnt_callback > 0){
+ return HRTIMER_NORESTART;
+ }
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ sgbuf = &uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_th];
+ if(zup->port_close || (sgbuf == NULL)){
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }
+ if(zup->sema_cyclic.count > 0){
+ printk("uart has th not deal.\n");
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }
+ if((zup->sg2tty)){//dma not complete now, later check again
+ printk("dmath_cyclic not end.\n");
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 14);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ if(zup->enter_suspend){
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 15);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(((pending == zup->pre_pending) && pending) || uart_dma_cycle[uart_id].from_resume){
+ uart_dma_cycle[uart_id].from_resume = 0;
+#if 0
+ if(uart_dma_cycle[uart_id].flg_enter_th == 0)
+ uart_dma_cycle[uart_id].flg_enter_to = 4;
+ else
+ uart_dma_cycle[uart_id].flg_enter_to = uart_dma_cycle[uart_id].flg_enter_th - 1;
+ struct zx29_sgbuf *sgbuf_tmp = NULL;
+ sgbuf_tmp = &uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_to];
+ test_uart_static(zup->port.line, NULL, 0, 61);
+ if (sgbuf->sg.dma_address != (zx29_dma_cur_dst(rx_id)&0xfffff000)){
+ if(sgbuf_tmp->sg.dma_address != ((zx29_dma_cur_dst(rx_id)&0xfffff000)-0x1000)){
+ printk("uart lose dma isr enter self resume.\n");
+ up(&zup->sema_cyclic);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return;
+ }
+ }
+ #endif
+ int fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXBUSY)){
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, 0, 12);
+ goto deal_end;
+ }
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ uart_dma_cycle[uart_id].flg_overrun = 1;
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ printk("error in uart%d: fe %u ,be %u pe %u.\n",zup->port.line,zup->port.icount.frame,
+ zup->port.icount.brk,zup->port.icount.parity);
+ }
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ dmaengine_terminate_all(rxchan);
+ test_uart_static(zup->port.line, NULL, 0, 60);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ wmb();
+ int i = 0;
+ for(i= 0;i < 3;i++){
+ fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXFE) == 0){
+ g_fifo_residue_buf[uart_id][i] = UART_GET_CHAR(&zup->port) | UART_DUMMY_DR_RX;
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++] = g_fifo_residue_buf[uart_id][i];
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ else
+ break;
+ }
+ if(i){
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++]=i;
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ if (zx29_dma_rx_trigger_dma_use_dma_cyclic(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ if((pending && (pending != 4096)) || (i > 0)){
+ zx29_uart_deal_dma_fifo_rx_chars_cyclic(zup, pending, sgbuf, &flags, g_fifo_residue_buf[uart_id],i);
+ }
+ uart_dma_cycle[uart_id].cnt_th = 0;
+ uart_dma_cycle[uart_id].cnt_callback=0;
+deal_end:
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }else{
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ zup->pre_pending = pending;
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, zup->pre_pending, 22);
+ return HRTIMER_RESTART;
+ }
+}
+#endif
+
+
+static void zx29_uart_modem_status(struct zx29_uart_port *zup)
+{
+ unsigned int status, delta;
+
+ status = UART_GET_FR(&zup->port)& UART_FR_MODEM_ANY;
+
+ delta = status ^ zup->old_status;
+ zup->old_status = status;
+
+ if (!delta)
+ return;
+
+ if (delta & UART_FR_DCD)
+ uart_handle_dcd_change(&zup->port, status & UART_FR_DCD);
+
+ if (delta & UART_FR_DSR)
+ zup->port.icount.dsr++;
+
+ if (delta & UART_FR_CTS)
+ uart_handle_cts_change(&zup->port, status & UART_FR_CTS);
+
+ wake_up_interruptible(&zup->port.state->port.delta_msr_wait);
+}
+
+/****************************************************************************/
+static irqreturn_t zx29_uart_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ unsigned int status,ris, pass_counter = 256;
+ int handled = 0;
+ int uart_id = zup->port.line;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ status = UART_GET_MIS(port) & zup->imr;
+ ris = UART_GET_RIS(port);
+ if (status) {
+ do {
+ UART_PUT_ICR(port,(status & ~(UART_TXIS|UART_RTIS|UART_RXIS)));
+ if(uart_console(&zup->port)){
+ if (status & (UART_RTIS|UART_RXIS))
+ zx29_uart_rx_chars(zup);
+ }else{
+#ifdef CONFIG_CPU_IDLE
+ zup->rxd_int_depth = 0;
+#endif
+ if (status & (UART_RXIS)){
+#if CONFIG_SERIAL_ZX29_DMA
+ if(ris & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 8;
+ test_uart_static(zup->port.line, NULL, 0, 21);
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if (zx29_dma_rx_used(zup)){
+ UART_PUT_ICR(port,UART_RXIS);
+ if(!(zup->imr & UART_RTIM)){
+ zup->imr |= UART_RTIM;
+ UART_PUT_IMSC(port,zup->imr);
+ }
+
+ test_uart_static(port->line, NULL, 0, 8);
+ uart_mod_timer(zup, &flags);
+
+ }else{
+ test_uart_static(port->line, NULL, 0, 1);
+
+ zup->imr &= ~UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ zx29_uart_rx_dma_chars(zup, &flags);
+
+ zup->dmarx.used = true;
+ //when RX&RT comes both, we trigger dma and add timer,so clear RT,waiting the timer
+ if(status & (UART_RTIS))
+ status &= ~UART_RTIS;
+ }
+#else
+ zx29_uart_rx_chars(zup);
+#endif
+ }
+
+ if (status & (UART_RTIS)){
+#if CONFIG_SERIAL_ZX29_DMA
+ if(!zx29_dma_rx_running(zup)){
+ test_uart_static(port->line, NULL, 0, 2);
+ zx29_uart_rx_timeout_chars(zup, &flags);
+ }else{
+ UART_PUT_ICR(port, UART_RTIS);
+ test_uart_static(port->line, NULL, 0, 4);
+ zx29_uart_rt_dma(zup, &flags);
+ }
+#else
+ zx29_uart_rx_chars(zup);
+#endif
+ }
+ }
+
+ if (status & (UART_DSRMIS|UART_DCDMIS|UART_CTSMIS|UART_RIMIS))
+ zx29_uart_modem_status(zup);
+
+ if (status & UART_TXIS)
+ zx29_uart_tx_chars(zup);
+
+ if (pass_counter-- == 0)
+ break;
+
+ status = UART_GET_MIS(port);
+ } while (status != 0);
+ handled = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+#if CONFIG_SERIAL_ZX29_DMA
+extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
+static void uart_dma_init(struct zx29_uart_port *zup)
+{
+ int i=0;
+ struct dma_chan *chan = NULL;
+
+ atomic_set(&zup->dmarx.count, 1);
+ atomic_set(&zup->dmatx.count, 1);
+#if 1
+ if(zup->port.line == UART0)
+ {
+ zup->dmatx.tx_def.dest_addr = (unsigned int)(ZX_UART0_BASE +zx29_UART_DR);
+ }
+ else if(zup->port.line == UART1)
+ {
+ zup->dmatx.tx_def.dest_addr = (unsigned int)(ZX_UART1_BASE+zx29_UART_DR);
+ }
+ else if(zup->port.line == UART2)
+ {
+ zup->dmatx.tx_def.dest_addr = (unsigned int)(ZX_UART2_BASE+zx29_UART_DR);
+ }
+
+ zup->dmatx.tx_def.dma_control.tran_mode = TRAN_MEM_TO_PERI;
+ zup->dmatx.tx_def.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ zup->dmatx.tx_def.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmatx.tx_def.dma_control.src_burst_len = DMA_BURST_LEN_4;
+ zup->dmatx.tx_def.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmatx.tx_def.dma_control.dest_burst_len = DMA_BURST_LEN_4;
+
+ dma_cap_mask_t mask;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ if(zup->port.line == UART0)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART0_TX);
+ }
+ else if(zup->port.line == UART1)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART1_TX);
+ }
+ else if(zup->port.line == UART2)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART2_TX);
+ }
+ if(!chan){
+ printk("UART%d DMA TX channel request fail.\n", zup->port.line);
+ return;
+ }
+ zup->dmatx.chan = chan;
+
+
+
+ for(i=0;i<UART_DMA_RX_MAX_COUNT;i++)
+ {
+ if(zup->port.line == UART0)
+ {
+ zup->dmarx.rx_def[i].src_addr = ZX_UART0_BASE+zx29_UART_DR;
+ }
+ else if(zup->port.line == UART1)
+ {
+ zup->dmarx.rx_def[i].src_addr = ZX_UART1_BASE+zx29_UART_DR;
+ }
+ else if(zup->port.line == UART2)
+ {
+ zup->dmarx.rx_def[i].src_addr = ZX_UART2_BASE+zx29_UART_DR;
+ }
+
+ zup->dmarx.rx_def[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
+ zup->dmarx.rx_def[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ zup->dmarx.rx_def[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmarx.rx_def[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
+ zup->dmarx.rx_def[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmarx.rx_def[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ }
+
+ zup->dmarx.rx_index = 0;
+ chan = NULL;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ if(zup->port.line == UART0)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART0_RX);
+ }
+ else if(zup->port.line == UART1)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART1_RX);
+ }
+ else if(zup->port.line == UART2)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART2_RX);
+ }
+ if(!chan){
+ printk("UART%d DMA RX channel request fail.\n", zup->port.line);
+ return;
+ }
+ zup->dmarx.chan = chan;
+#endif
+}
+
+static int uart_dma_cycle_init(struct zx29_uart_port *zup)
+{
+ int ret;
+ int uart_id = zup->port.line;
+ uart_dma_cycle[uart_id].id = zup->port.line;
+ int i,j;
+ for(i=0;i<UART_DMA_CYCLE_RX_CONFIG_COUNT;i++){
+ ret = zx29_sgbuf_init(zup->dmarx.chan, &uart_dma_cycle[uart_id].sgbuf[i],DMA_FROM_DEVICE);
+ if(ret){
+ printk( "init uart_dma_cycle sgbuf failed,uart: %d,ret:%d\n", zup->port.line, ret);
+ for(j=0;j<i;j++){
+ zx29_sgbuf_free(zup->dmarx.chan, &uart_dma_cycle[uart_id].sgbuf[j],DMA_FROM_DEVICE);
+ }
+ return -1;
+ }
+ }
+ for(i=0;i<UART_DMA_CYCLE_RX_CONFIG_COUNT;i++){
+ if(zup->port.line == UART0)
+ uart_dma_cycle[uart_id].rxdef[i].src_addr = ZX_UART0_BASE+zx29_UART_DR;
+ else if(zup->port.line == UART1)
+ uart_dma_cycle[uart_id].rxdef[i].src_addr = ZX_UART1_BASE+zx29_UART_DR;
+ else{
+ uart_dma_cycle[uart_id].rxdef[i].src_addr = ZX_UART2_BASE+zx29_UART_DR;
+ }
+ uart_dma_cycle[uart_id].rxdef[i].dest_addr = (unsigned int)(uart_dma_cycle[uart_id].sgbuf[i].dma_addr);
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
+ uart_dma_cycle[uart_id].rxdef[i].count = ZX29_DMA_BUFFER_SIZE;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ uart_dma_cycle[uart_id].rxdef[i].link_addr = 1;
+ }
+ return 0;
+}
+static void uart_dma_cycle_deinit(struct zx29_uart_port *zup)
+{
+ int i;
+ int uart_id = zup->port.line;
+ for(i=0;i<UART_DMA_CYCLE_RX_CONFIG_COUNT;i++){
+ zx29_sgbuf_free(zup->dmarx.chan, &uart_dma_cycle[uart_id].sgbuf[i],DMA_FROM_DEVICE);
+ }
+ memset(uart_dma_cycle[uart_id].rxdef, 0, sizeof(uart_dma_cycle[uart_id].rxdef));
+}
+static void uart_dma_startup(struct zx29_uart_port *zup)
+{
+ int ret = 0;
+ if (!zup->dmatx.chan)
+ {
+ printk("tx_chan is error[%s][%d]\n",__func__,__LINE__);
+ return;
+ }
+
+ zup->dmatx.buf = kmalloc(ZX29_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
+ if (!zup->dmatx.buf) {
+ printk("tx_buf is error[%s][%d]\n",__func__,__LINE__);
+ return;
+ }
+
+ sg_init_one(&zup->dmatx.sg, zup->dmatx.buf, ZX29_DMA_BUFFER_SIZE);
+
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ zup->port.fifosize = 16;//ZX29_DMA_BUFFER_SIZE;
+ zup->using_tx_dma = true;
+
+ if(!zup->uart_power_mode)
+ {
+ if (!zup->dmarx.chan)
+ {
+ printk(KERN_INFO "[%s][%d]uart_%d rx_chan is error\n",__func__,__LINE__, zup->port.line);
+ goto skip_rx;
+ }
+
+ /* Allocate and map DMA RX buffers */
+ ret = zx29_sgbuf_init(zup->dmarx.chan, &zup->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ printk(KERN_INFO "[%s][%d] uart_%d rx_buf_a is error\n",__func__,__LINE__, zup->port.line);
+ //dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ // "RX buffer A", ret);
+ goto skip_rx;
+ }
+
+ ret = zx29_sgbuf_init(zup->dmarx.chan, &zup->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ printk( "failed to init DMA uart: %d RX buffer B ,ret:%d\n", zup->port.line, ret);
+ zx29_sgbuf_free(zup->dmarx.chan, &zup->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+
+ zup->using_rx_dma = true;
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->curr_sg = NULL;
+#if RX_DMA_WORK
+ timer_setup(&(zup->rx_dma_timer), zx29_uart_rx_dma_timeout, 0);
+ hrtimer_init(&zup->rx_dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ zup->rx_dma_hrtimer.function = zx29_uart_rx_dma_hrtimeout;
+ g_hr_interval = ktime_set(0, 1500000);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ zup->dmarx.use_buf_b = false;
+ zup->dmarx.rx_index = 0;
+
+ zup->pre_pending = 0;
+ zup->work_state = false;
+
+ zup->dma_compl_th = kthread_run(dma_complete_thread, zup, "uart_dma_compl");
+ BUG_ON(IS_ERR(zup->dma_compl_th));
+#endif
+
+skip_rx:
+
+ /* Turn on DMA error (RX/TX will be enabled on demand) */
+ printk("uart_dma_startup, port:%d, ret:%d\n", zup->port.line,ret );
+ zup->dmacr &= ~UART_DMAONERR;
+ //zup->dmacr |= UART_DMAONERR;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ if(zup->uart_power_mode){
+ if (zup->using_rx_dma) {
+ //printk(KERN_INFO "[%s][%d]\n",__func__,__LINE__);
+ if (zx29_dma_rx_trigger_dma(zup)){
+ dev_dbg(zup->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }else{
+ mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+ }
+ }
+ }
+ else if(zup->uart_power_mode == 1)
+ {
+ ret = uart_dma_cycle_init(zup);
+ if(ret){
+ printk("uart%d dma cycle init failed,ret %d.\n",zup->port.line,ret);
+ return;
+ }
+ zup->using_rx_dma = true;
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->curr_sg = NULL;
+#if RX_DMA_WORK
+ hrtimer_init(&zup->rx_dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ zup->rx_dma_hrtimer.function = zx29_uart_rx_dma_hrtimeout_cyclic;
+ g_hr_interval = ktime_set(0, 1500000);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ zup->dmarx.use_buf_b = false;
+ zup->dmarx.rx_index = 0;
+ zup->pre_pending = 0;
+ zup->work_state = false;
+ sema_init(&zup->sema_cyclic, 0);
+ zup->dma_compl_th = kthread_run(dma_complete_thread_use_dma_cyclic, zup, "uart_dma_th_cyc");
+ BUG_ON(IS_ERR(zup->dma_compl_th));
+#endif
+ printk("uart_dma_startup, port:%d, ret:%d\n", zup->port.line,ret );
+ zup->dmacr &= ~UART_DMAONERR;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ if(zup->uart_power_mode){
+ if (zup->using_rx_dma) {
+ if (zx29_dma_rx_trigger_dma_use_dma_cyclic(zup)){
+ dev_dbg(zup->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }else{
+ hrtimer_start(&zup->rx_dma_hrtimer, g_hr_interval, HRTIMER_MODE_REL);
+ //mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+ }
+ }
+ }else
+ printk("uart%d power mode set error,dma dont startup.\n",zup->port.line);
+}
+
+
+#endif
+
+static irqreturn_t zx29_uart_rxd_irq(int irq, void *dev_id)
+{
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)dev_id;
+
+ rxd_wake_cnt++;
+ zup->rxd_wakeup = true;
+ tasklet_schedule(&zup->write_wakeup);
+ zup->rxd_int_depth = 0;
+ return IRQ_HANDLED;//IRQ_RETVAL(retval);
+}
+
+/****************************************************************************/
+static int zx29_uart_startup(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags = 0;
+ unsigned long control = 0;
+ int retval = 0;
+ struct platform_device *pdev=port->private_data;
+// struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ int i = 0,j = 0,iflag = 0;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ibrd, fbrd,lcr_h, old_cr;
+ int ret=0;
+ printk("-----zx29_uart_startup, port:%d\n", port->line);
+ #if 0//def CONFIG_ARCH_ZX297520V3_WATCH
+
+ if(port->line == 0)
+ {
+ gpio_free(pdata->uart_txd.gpionum);
+ gpio_free(pdata->uart_rxd.gpionum);
+ //printk("gpio_free err err!\n");
+ }
+
+ wmb();
+
+ #endif
+ if(DEBUG_CONSOLE != pdev->id){
+ char temp_buf[TASK_COMM_LEN]= {0};
+ int th_ctrl = 0;
+ if((strlen(get_task_comm(temp_buf,get_current())) > 0) && (strcmp(get_task_comm(temp_buf,get_current()),"at_ctl") != 0))
+ th_ctrl = 1;
+
+ //app ctrl or kernel ctrl set this
+ int kernel_ctrl = xp2xp_enable_4line();
+ zup->uart_power_mode = (kernel_ctrl | zup->app_ctrl | th_ctrl);
+ printk("zx29_uart%d open task is %s,power_mode is %d.\n",pdev->id, get_task_comm(temp_buf,get_current()),zup->uart_power_mode);
+ if(zup->uart_power_mode){
+ //pm_stay_awake(&pdev->dev);
+ }
+ }
+ //when open, clear last statistic info
+ port->icount.brk = port->icount.buf_overrun = port->icount.frame = 0;
+ port->icount.overrun = port->icount.parity = port->icount.rng = 0;
+ port->icount.rx = port->icount.tx = 0;
+ /*
+ *enable uart clock
+ *if uart is used for console, don't need do these, these was done before
+ */
+ if (DEBUG_CONSOLE != port->line) {
+ /* config uart apb_clk */
+ clk_prepare_enable(zup->busclk);
+ /* enable uart work clock */
+ clk_prepare_enable(zup->wclk);
+ }
+
+ /* Clear all pending error and receive interrupts */
+ UART_PUT_ICR(port, 0xfff);
+
+ /* Allocate the IRQ */
+ retval = request_irq(port->irq, zx29_uart_interrupt, 0, "uart-zx29", zup);
+ if (retval){
+ printk("[UART]unable to attach zx29 UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return retval;
+ }
+
+ /* set interrupt fifo level RX:1/2 Full, TX:1/2 Full */
+#if 0//CONFIG_SERIAL_ZX29_DMA
+ UART_PUT_IFLS(port, UART_IFLS_RX2_8|UART_IFLS_TX6_8);
+#else
+ UART_PUT_IFLS(port, UART_IFLS_RX2_8|UART_IFLS_TX4_8);
+#endif
+
+#if 0
+ /* Provoke TX FIFO interrupt into asserting. */
+ control = UART_CR_UARTEN | UART_CR_TXE | UART_CR_LBE;
+ UART_PUT_CR(port, control);
+ UART_PUT_FBRD(port, 0);
+ UART_PUT_IBRD(port, 1);
+ UART_PUT_LCRH(port, 0);
+ UART_PUT_CHAR(port, 0);
+ while (UART_GET_FR(port) & UART_FR_TXBUSY)
+ barrier();
+#endif
+ control = UART_CR_UARTEN | UART_CR_RXE | UART_CR_TXE;
+ //console & lp_uart don't need dma
+ if ((DEBUG_CONSOLE != port->line) && (port->line != 4)) {
+#if CONFIG_SERIAL_ZX29_DMA
+ UART_PUT_DMACR(port, UART_TXDMAE | UART_RXDMAE);
+ uart_dma_startup(zup);
+#endif
+ }
+
+ tasklet_init(&zup->write_wakeup, uart_write_wakeup_task, (unsigned long) port);
+ if((pdev->id == 0) && (zup->irq_state == 0) && (zup->uart_power_mode == 0)){
+ ret = request_irq(zup->rxd_irq,
+ zx29_uart_rxd_irq,
+ 0,
+ "uart0_rxd_wake",
+ zup);
+ if(ret<0){
+ panic("request uart0 rxd wake irq fail\n");
+ }
+ printk("--------rxd wake up interrupt ok\n");
+ enable_irq_wake(zup->rxd_irq);
+ zup->irq_state = 1;
+ zup->rxd_int_depth = 1;
+ }
+#if 0
+ /*configure gpio pin to UART*/
+ if((pdata->uart_use)/*&&(port->line == UART0 )*/)
+ {
+ retval=gpio_request(pdata->uart_rxd.gpionum,pdata->uart_rxd.gpioname);
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_txd.gpionum,pdata->uart_txd.gpioname);
+ if(retval)
+ BUG();
+ /*uart rxd*/
+ zx29_gpio_config(pdata->uart_rxd.gpionum, pdata->uart_rxd.gpiofnc);
+ if(pdata->uart_rxd.gpionum == ZX29_GPIO_121 ) {
+ //pull up gpio121
+ *(volatile unsigned int *)0xf843c82c |= 0xf0;
+ }
+ /*uart txd*/
+ zx29_gpio_config(pdata->uart_txd.gpionum, pdata->uart_txd.gpiofnc);
+#ifdef CONFIG_ARCH_ZX297520V3
+ if((pdev->id != DEBUG_CONSOLE) && (pdata->uart_wakeup_enable == 1) && (zup->irq_state == 0)){
+ zup->irq = platform_get_irq_byname(pdev, "zx29_uart_rxd_wakeup");
+ printk(KERN_INFO"zx29_uart_startup,irq:%d,%s.%d\n",zup->irq,pdata->uart_cts.gpioname,zup->irq_state);
+ if(zup->irq >= 0){
+
+ pcu_int_set_type(PCU_UART0_RXD_INT, IRQF_TRIGGER_FALLING);
+ pcu_int_clear(PCU_UART0_RXD_INT);
+ ret = request_irq(zup->irq, zx29_uart_rxd_irq,
+ IRQF_ONESHOT , "uart_rxd_irq",
+ zup);
+ printk(KERN_INFO"zx29_uart_startup, retval:%d\n",ret);
+ irq_set_irq_wake(zup->irq,1);
+#ifdef CONFIG_CPU_IDLE
+ zup->rxd_int_depth = rxd_wake_cnt = 0;
+ zx_pm_register_callback(uart_0_pm_enter, uart_0_pm_exit);
+ disable_irq_nosync(UART0_RXD_INT);
+#endif
+ zup->irq_state = 1;
+ }else{
+ printk("uart_startup, request wake irq fail:%d\n",zup->irq);
+ }
+ }
+#endif
+ if(pdata->uart_ctsrtsuse)
+ {
+ retval=gpio_request(pdata->uart_cts.gpionum,pdata->uart_cts.gpioname);
+ if(retval)
+ BUG();
+
+ retval=gpio_request(pdata->uart_rts.gpionum,pdata->uart_rts.gpioname);
+ if(retval)
+ BUG();
+/*uart cts*/
+ zx29_gpio_config(pdata->uart_cts.gpionum, pdata->uart_cts.gpiofnc);
+/*uart rts*/
+ zx29_gpio_config(pdata->uart_rts.gpionum, pdata->uart_rts.gpiofnc);
+
+ control |= (UART_CR_RTSEN |UART_CR_CTSEN );
+ control |= UART_CR_RTS; //wl write1 for allow send
+ }
+ zup->autobaud = pdata->uart_abauduse ;
+ }
+#if 0
+ if((pdata->uart_use)&&(port->line == UART1 ))
+ {
+ retval=gpio_request(pdata->uart_rx.gpionum,pdata->uart_rx.gpioname);
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_txd.gpionum,pdata->uart_tx.gpioname);
+ if(retval)
+ BUG();
+/*uart rxd*/
+ zx29_gpio_config(pdata->uart_rxd.gpionum, pdata->uart_rxd.gpiofnc);
+/*uart txd*/
+ zx29_gpio_config(pdata->uart_txdnum, pdata->uart_txdfnc);
+
+ if(pdata->uart_ctsrtsuse)
+ {
+ retval=gpio_request(pdata->uart_ctsnum,"uart1_cts");
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_rtsnum,"uart1_rts");
+ if(retval)
+ BUG();
+/*uart cts*/
+ zx29_gpio_config(pdata->uart_ctsnum, pdata->uart_ctsfnc);
+/*uart rts*/
+ zx29_gpio_config(pdata->uart_rtsnum, pdata->uart_rtsfnc);
+
+ control |= (UART_CR_RTSEN |UART_CR_CTSEN );
+ control |= UART_CR_RTS; //wl write1 for allow send
+ }
+ zup->autobaud = pdata->uart_abauduse;
+ }
+ if((pdata->uart_use)&&(port->line == UART2 ))
+ {
+ retval=gpio_request(pdata->uart_rxdnum,"uart2_rxd");
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_txdnum,"uart2_txd");
+ if(retval)
+ BUG();
+
+/*uart rxd*/
+ zx29_gpio_config(pdata->uart_rxdnum, pdata->uart_rxdfnc);
+ if(pdata->uart_rxdnum == ZX29_GPIO_121 ) {
+ //pull up gpio121
+ *(volatile unsigned int *)0xf843c82c |= 0xf0;
+ }
+/*uart txd*/
+ zx29_gpio_config(pdata->uart_txdnum, pdata->uart_txdfnc);
+
+ if(pdata->uart_ctsrtsuse)
+ {
+ retval=gpio_request(pdata->uart_ctsnum,"uart2_cts");
+ if(retval)
+ BUG();
+
+ retval=gpio_request(pdata->uart_rtsnum,"uart2_rts");
+ if(retval)
+ BUG();
+/*uart cts*/
+ zx29_gpio_config(pdata->uart_ctsnum, pdata->uart_ctsfnc);
+/*uart rts*/
+ zx29_gpio_config(pdata->uart_rtsnum, pdata->uart_rtsfnc);
+
+ control |= (UART_CR_RTSEN |UART_CR_CTSEN );
+ control |= UART_CR_RTS; //wl write1 for allow send
+ }
+ zup->autobaud = pdata->uart_abauduse ;
+ }
+#endif
+#endif
+ zup->autobaud_state = UART_PORT_AUTOBAUD_OFF;
+ UART_PUT_CR(port, control);
+
+ /*
+ * Finally, enable interrupts, only timeouts when using DMA
+ * if initial RX DMA job failed, start in interrupt mode
+ * as well.
+ */
+ spin_lock_irqsave(&zup->port.lock, flags);
+ /* Clear out any spuriously appearing RX interrupts */
+ UART_PUT_ICR(port, (UART_RTIS | UART_RXIS));
+ //when dma not running,set UART_RTIM | UART_RXIM
+ if(!zx29_dma_rx_running(zup)){
+ zup->imr = UART_RTIM | UART_RXIM;
+ UART_PUT_IMSC(port, zup->imr);
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ zup->port_close = false;
+#endif
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+
+
+ return 0;
+}
+
+/****************************************************************************/
+static void zx29_uart_shutdown(struct uart_port *port)
+{
+ printk("zx29_uart%d_shutdown.\n",port->line);
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ uint32_t val;
+ int retval = 0;
+ struct platform_device *pdev=port->private_data;
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+#if CONFIG_SERIAL_ZX29_DMA
+ zup->port_close = true;
+ up(&zup->sema);
+#endif
+ int ret;
+ tasklet_kill(&zup->write_wakeup);
+#if RX_DMA_WORK
+ if(zx29_dma_rx_work_scheduled(zup)){
+ ret = del_timer_sync(&(zup->rx_dma_timer));
+ ret = hrtimer_cancel(&zup->rx_dma_hrtimer);
+ zup->work_state = 0;
+ }
+#endif
+ /* Disable and clear all interrupts now */
+ spin_lock_irqsave(&port->lock, flags);
+ zup->imr = 0;
+ UART_PUT_IMSC(port, zup->imr);
+ UART_PUT_ICR(port, 0xFFFF);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_shutdown(zup);
+#endif
+ /* Free the interrupt */
+ free_irq(zup->port.irq, zup);
+
+ /* Disable UART transmitter and receiver */
+ zup->autorts = false;
+ val = UART_GET_CR(port);
+ if (val & UART_CR_RTS) {
+ zup->rts_state = true;
+ val = UART_CR_RTS;
+ } else
+ zup->rts_state = false;
+ val = UART_CR_UARTEN | UART_CR_TXE;
+ UART_PUT_CR(port, val);
+
+ /* disable break condition and fifos */
+ val = UART_GET_LCRH(port);
+ val &= ~(UART_LCRH_BRK | UART_LCRH_FEN);
+ UART_PUT_LCRH(port, val);
+ if(zup->uart_power_mode){
+ //pm_relax(&pdev->dev);
+ zup->app_ctrl = 0;
+ zup->uart_power_mode = 0;
+ }
+
+ if((pdev->id == 0) && (zup->irq_state == 1) && (zup->uart_power_mode == 0)){
+ free_irq(zup->rxd_irq, zup);
+ disable_irq_wake(zup->rxd_irq);
+ zup->irq_state = 0;
+ }
+
+#if 0
+ if(pdata->uart_use)
+ {
+ if(pdata->uart_ctsrtsuse)
+ {
+ gpio_free(pdata->uart_cts.gpionum);
+ gpio_free(pdata->uart_rts.gpionum);
+ }
+#ifdef CONFIG_ARCH_ZX297520V3
+ if((pdev->id != DEBUG_CONSOLE) && (pdata->uart_wakeup_enable == 1) && (zup->irq_state == 1)){
+ printk(KERN_INFO"zx29_uart_shutdown,irq:%d,%s\n",zup->irq,pdata->uart_cts.gpioname);
+ if(zup->irq){
+ free_irq(zup->irq, zup);
+ pcu_int_clear(PCU_UART0_RXD_INT);
+ irq_set_irq_wake(zup->irq, 0);
+ zup->irq_state = 0;
+ zup->rxd_int_depth = 0;
+ }
+ }
+#endif
+ gpio_free(pdata->uart_rxd.gpionum);
+ gpio_free(pdata->uart_txd.gpionum);
+
+#ifdef CONFIG_ARCH_ZX297520V3_WATCH
+ if(port->line == 0)
+ {
+ retval = gpio_request(pdata->uart_txd.gpionum, pdata->uart_txd.gpioname);
+ if(retval)
+ {
+ BUG();
+ }
+ zx29_gpio_config(pdata->uart_txd.gpionum, GPIO30_GPIO30);
+ gpio_direction_input(pdata->uart_txd.gpionum);
+
+ retval = gpio_request(pdata->uart_rxd.gpionum, pdata->uart_rxd.gpioname);
+ if(retval)
+ {
+ BUG();
+ }
+ zx29_gpio_config(pdata->uart_rxd.gpionum, GPIO29_GPIO29);
+ gpio_direction_input(pdata->uart_rxd.gpionum);
+ }
+#endif
+
+ }
+#endif
+ /* Shutdown uart clock */
+}
+
+/****************************************************************************/
+static void zx29_uart_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned int lcr_h, old_cr;
+ unsigned long flags;
+ unsigned int baud, ibrd, fbrd,j;
+
+ //temple change,using setting from cmm script
+ //if(port->line == DEBUG_CONSOLE)
+ //return;
+
+ /* Set baud rate */
+ /* Ask the core to calculate the divisor for us. */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ printk("uart port %d baud is %d.\n",port->line,baud);
+
+ //this should not hapend
+ if(baud == 0)
+ BUG_ON(1);
+ zup->baudrate = baud;
+ ibrd = port->uartclk / (baud<<4);
+ fbrd = ((port->uartclk % (baud<<4) )*8 + baud)/(2*baud);
+ UART_PUT_FBRD(port, fbrd);
+ UART_PUT_IBRD(port, ibrd);
+
+printk("-------zx29_uart_set_termios,line:%d, new baud:%d, uartclk:%d,ibrd:%d, fbrd:%d \n", port->line,
+ baud, port->uartclk, ibrd, fbrd);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr_h = UART_LCRH_WLEN_5;
+ break;
+ case CS6:
+ lcr_h = UART_LCRH_WLEN_6;
+ break;
+ case CS7:
+ lcr_h = UART_LCRH_WLEN_7;
+ break;
+ default: // CS8
+ lcr_h = UART_LCRH_WLEN_8;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ lcr_h |= UART_LCRH_STP2;
+ if (termios->c_cflag & PARENB) {
+ lcr_h |= UART_LCRH_PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_h |= UART_LCRH_EPS;
+ }
+ if (port->fifosize > 1)
+ lcr_h |= UART_LCRH_FEN;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UART_DR_OE | 255;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_DR_FE | UART_DR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_DR_BE;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_DR_FE | UART_DR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_DR_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_DR_OE;
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_DUMMY_DR_RX;
+
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ zx29_uart_enable_ms(port);
+
+ /* first, disable everything */
+ old_cr = UART_GET_CR(port);
+ UART_PUT_CR(port, 0);
+
+ if (termios->c_cflag & CRTSCTS) {
+ if (old_cr & UART_CR_RTS)
+ old_cr |= UART_CR_RTSEN;
+
+ old_cr |= UART_CR_CTSEN;
+ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
+ zup->autorts = true;
+ } else {
+ old_cr &= ~(UART_CR_CTSEN | UART_CR_RTSEN);
+ zup->autorts = false;
+ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
+ }
+
+ /*
+ * ----------v----------v----------v----------v-----
+ * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
+ * ----------^----------^----------^----------^-----
+ */
+ UART_PUT_LCRH(port, lcr_h);
+ UART_PUT_CR(port, old_cr);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ if( zup->autobaud_state == UART_PORT_AUTOBAUD_ON)
+ {
+ msleep(50);
+ zup->port.icount.rx = 0;
+
+ for( j = 0; j<UART_AT_SENDOK_NUM; j++)
+ {
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(&zup->port, UART_AT_send_ok[j]);
+ }
+
+ zup->autobaud_state = UART_PORT_AUTOBAUD_OFF;
+ }
+}
+
+/****************************************************************************/
+static const char *zx29_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ZX29) ? "zx29_UART" : NULL;
+}
+
+/****************************************************************************/
+
+static int zx29_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+// return request_mem_region(port->mapbase, SZ_4K, "uart-zx29")!= NULL ? 0 : -EBUSY;
+ return 0;
+}
+
+/****************************************************************************/
+static void zx29_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_ZX29;
+ zx29_uart_request_port(port);
+ }
+}
+
+/****************************************************************************/
+
+static void zx29_uart_release_port(struct uart_port *port)
+{
+// release_mem_region(port->mapbase, SZ_4K);
+}
+
+/****************************************************************************/
+
+static int zx29_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ZX29))
+ return -EINVAL;
+ return 0;
+}
+
+
+void zx29_uart_putc(struct uart_port *port, int c)
+{
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, c);
+}
+
+
+#ifdef CONFIG_CONSOLE_POLL
+/****************************************************************************/
+static int zx29_get_poll_char(struct uart_port *port)
+{
+ if (UART_GET_FR(port) & UART_FR_RXFE)
+ return NO_POLL_CHAR;
+
+ return UART_PUT_CHAR(port);
+}
+
+/****************************************************************************/
+static void zx29_put_poll_char(struct uart_port *port, unsigned char ch)
+{
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+static void zx29_uart_throttle_rx(struct uart_port *port)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ while(zx29_dma_get_transfer_num(rx_id) != 4096)
+ msleep(1);
+ spin_lock_irqsave(&port->lock, flags);
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+static void zx29_uart_unthrottle_rx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ uart_mod_timer(zup, &flags);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ UART_PUT_ICR(&zup->port,(UART_RTIS|UART_RXIS));
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/****************************************************************************/
+/*
+ * Define the basic serial functions we support.
+ */
+static const struct uart_ops zx29_uart_ops = {
+ .tx_empty = zx29_uart_tx_empty,
+ .set_mctrl = zx29_uart_set_mctrl,
+ .get_mctrl = zx29_uart_get_mctrl,
+ .start_tx = zx29_uart_start_tx,
+ .stop_tx = zx29_uart_stop_tx,
+ .stop_rx = zx29_uart_stop_rx,
+ .throttle = zx29_uart_throttle_rx,
+ .unthrottle = zx29_uart_unthrottle_rx,
+ .enable_ms = zx29_uart_enable_ms,
+ .break_ctl = zx29_uart_break_ctl,
+ .startup = zx29_uart_startup,
+ .shutdown = zx29_uart_shutdown,
+ .set_termios = zx29_uart_set_termios,
+#if CONFIG_SERIAL_ZX29_DMA
+ .flush_buffer = zx29_dma_flush_buffer,
+#endif
+ .type = zx29_uart_type,
+ .request_port = zx29_uart_request_port,
+ .release_port = zx29_uart_release_port,
+ .config_port = zx29_uart_config_port,
+ .verify_port = zx29_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = zx29_get_poll_char,
+ .poll_put_char = zx29_put_poll_char,
+#endif
+
+};
+
+
+/****************************************************************************/
+static int zx29_init_ports(struct zx29_uart_port *zx29_port,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ struct uart_port *port=&zx29_port->port;
+ unsigned int offset=(unsigned int)(pdev->id);
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int baud, ibrd, fbrd;
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 start*/
+ unsigned int max_bus_clk;
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 end*/
+ struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ //struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if(!regs){
+ dev_err(&pdev->dev, "zx29_init_ports, get resource fail,\n");
+ return -ENODEV;
+ }
+/*get apb clock*/
+ zx29_port->busclk = devm_clk_get(&pdev->dev, UART_APBCLK_NAME);
+ if (IS_ERR(zx29_port->busclk)) {
+ ret = PTR_ERR(zx29_port->busclk);
+ printk("failed to get zx29_port->busclk: %d\n", ret);
+ return ret;
+ }
+
+ /*get work clock*/
+ zx29_port->wclk = devm_clk_get(&pdev->dev, UART_WCLK_NAME);
+
+ if (IS_ERR(zx29_port->wclk)) {
+ ret = PTR_ERR(zx29_port->wclk);
+ printk("failed to get zx29_port->wclk: %d\n", ret);
+ return ret;
+ }
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 start*/
+ if(0 == pdev->id || 2 == pdev->id){
+ device_property_read_u32(&pdev->dev, "uart-max-bus-freq", &max_bus_clk);
+ clk_set_rate(zx29_port->wclk, max_bus_clk);
+ }
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 end*/
+ if(offset == 0){
+ clk_set_rate(zx29_port->wclk, 104 * 1000000);
+ }
+ port->line = offset;
+ port->type = PORT_ZX29;
+ port->fifosize = UART_TXFIFO_SIZE;
+ //port->iotype = UPIO_MEM;
+ //port->irq = irq->start;
+ port->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if(pdev->id == 0){
+ zx29_port->rxd_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ }
+ //port->membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+ // resource_size(regs));
+ port->membase = devm_platform_ioremap_resource(pdev, 0);
+
+ if (!port->membase)
+ return -ENODEV;
+ port->mapbase = regs->start;
+ port->mapsize = resource_size(regs);
+
+ //port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &zx29_uart_ops;
+ port->uartclk = clk_get_rate(zx29_port->wclk);
+
+ port->private_data = pdev;
+ //here is temple def
+ if(port->uartclk == 0){
+ printk("---zx29_init_ports, uartclk hard set to 26M\n");
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 start*/
+ if(0 == pdev->id || 2 == pdev->id)
+ port->uartclk = 104000000;
+ else
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 end*/
+ port->uartclk = 26000000;
+ }
+ printk("---zx29_init_ports, line:%d, irq:%d, membase:%08x, uartclk:%d\n", port->line, port->irq, port->membase, port->uartclk);
+ /*
+ * just configure clock,
+ * actually pin configuration is needed, but now gpio driver is not OK
+ * use bootloader default configuration
+ */
+ if(DEBUG_CONSOLE == pdev->id){
+ /* config uart apb_clk */
+ clk_prepare_enable(zx29_port->busclk);
+
+ /* enable uart work clock */
+ clk_prepare_enable(zx29_port->wclk);
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_SERIAL_ZX29_UART_CONSOLE
+
+#if VEHICLE_USE_ONE_UART_LOG
+static void zx29_uart_console_putc(struct uart_port *port, int c)
+{
+ if(g_core_id_occupy_uart == SYMB_PS_CORE_ID)
+ return;
+ int ret = soft_spin_lock_printf(UART_SFLOCK);
+ if(ret)
+ return;
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, c);
+ soft_spin_unlock(UART_SFLOCK);
+}
+#else
+static void zx29_uart_console_putc(struct uart_port *port, int c)
+{
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, c);
+}
+
+#endif
+
+
+
+/****************************************************************************/
+static void zx29_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = &zx29_uart_ports[co->index].port;
+
+ //spin_lock(&port->lock);
+ for (; (count); count--, s++) {
+ zx29_uart_console_putc(port, *s);
+ if (*s == '\n')
+ zx29_uart_console_putc(port, '\r');
+ }
+
+ //spin_unlock(&port->lock);
+}
+
+/***************************************************************************
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ ****************************************************************************/
+static void __init zx29_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ if (UART_GET_CR(port) & UART_CR_UARTEN) {
+ unsigned int lcr_h, ibrd, fbrd;
+
+ lcr_h = UART_GET_LCRH(port);
+ *parity = 'n';
+ if (lcr_h & UART_LCRH_PEN) {
+ if (lcr_h & UART_LCRH_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+ if ((lcr_h & 0x60) == UART_LCRH_WLEN_7)
+ *bits = 7;
+ else
+ *bits = 8;
+
+ ibrd = UART_GET_IBRD(port);
+ fbrd = UART_GET_FBRD(port);
+
+ *baud = port->uartclk * 8 / (16*8 * ibrd + 2*fbrd-1);
+ }
+}
+
+/****************************************************************************/
+static int __init zx29_uart_console_setup(struct console *co, char *options)
+{
+ printk("zx29_uart_console_setup.\n");
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ unsigned int uart_cr = 0;
+
+ if ((co->index < 0) || (co->index >= zx29_MAXPORTS))
+ co->index = CONFIG_UART_CONSOLE_ID;
+
+ port = &zx29_uart_ports[co->index].port;
+ if (port->membase == NULL)
+ return -ENODEV;
+
+ uart_cr = UART_GET_CR(port);
+ uart_cr |= UART_CR_UARTEN | UART_CR_TXE;
+ UART_PUT_CR(port,uart_cr);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ zx29_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+/****************************************************************************/
+
+static struct uart_driver zx29_uart_driver;
+int zx29_get_console_index(void)
+{
+#if 0
+ int dev_cnt = zx29_device_table_num;
+ int idx = 0;
+ struct platform_device *pdev = NULL;
+ for(idx = 0; idx < dev_cnt; idx++)
+ {
+ pdev = zx29_device_table[idx];
+ if(strcmp(pdev->name,"zx29_uart") == 0 && pdev->id == CONFIG_UART_CONSOLE_ID)
+ return idx;
+ }
+#endif
+ return CONFIG_UART_CONSOLE_ID;
+ //return -1;
+}
+static struct console zx29_uart_console = {
+ .name = "ttyS",
+ .write = zx29_uart_console_write,
+ .device = uart_console_device,
+ .setup = zx29_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &zx29_uart_driver,
+};
+static int __init zx29_uart_console_init(void)
+{
+ int console_dev_id = zx29_get_console_index();
+
+ if(console_dev_id < 0){
+ printk("console init fail, uart config fail, console_dev_id is: %d", console_dev_id);
+ return -1;
+ }
+ //zx29_init_ports(&zx29_uart_ports[DEBUG_CONSOLE], zx29_device_table[console_dev_id]);
+ register_console(&zx29_uart_console);
+ pr_info("[UART]register_console: zx29 console registered!\n");
+
+ return 0;
+}
+
+//console_initcall(zx29_uart_console_init);
+
+#define zx29_UART_CONSOLE (&zx29_uart_console)
+
+
+static void zx29_uart_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, zx29_uart_console_putc);
+}
+
+static int __init zx29_uart_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = zx29_uart_early_write;
+
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(zx29_uart, "zxic,zx29-uart", zx29_uart_early_console_setup);
+#else
+#define zx29_UART_CONSOLE NULL
+#endif /* CONFIG_zx29_UART_CONSOLE */
+static void zx29_uart_pin_ctrl(struct platform_device *pdev)
+{
+ struct pinctrl *pin_ctrl;
+ struct pinctrl_state *state0;
+ pin_ctrl = devm_pinctrl_get(&pdev->dev);
+ switch(pdev->id){
+ case 0:
+ printk("zx29_uart %d use default pinctrl.",pdev->id);
+ break;
+ case 1:
+ printk("zx29_uart %d use default pinctrl.",pdev->id);
+ break;
+
+ case 2:
+ if(IS_ERR(pin_ctrl)){
+ dev_warn(&pdev->dev, "fail to get uart2 pins.");
+ pin_ctrl = NULL;
+ return;
+ }
+ state0 = pinctrl_lookup_state(pin_ctrl, "default");
+ if(IS_ERR(state0)){
+ dev_err(&pdev->dev, "uart2 pinstate get fail.\n");
+ }
+ if(pinctrl_select_state(pin_ctrl, state0)){
+ dev_err(&pdev->dev, "uart2 select pinstate fail.\n");
+ }
+ break;
+ }
+}
+
+/*
+ * Define the zx29 UART driver structure.
+ */
+static struct uart_driver zx29_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "zx29_uart",
+ .dev_name = "ttyS",
+ .major = SERIAL_zx29_MAJOR,
+ .minor = SERIAL_MINOR_START,
+ .nr = zx29_MAXPORTS,
+ .cons = zx29_UART_CONSOLE,
+};
+
+unsigned char uart_wakelock_name[zx29_MAXPORTS][20]={{0}};
+/****************************************************************************/
+
+
+#ifdef CONFIG_PM_SLEEP
+static int zx29_uart_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct zx29_uart_port *zup = dev_get_drvdata(dev);
+ unsigned int flags;
+ if (!zup)
+ return -EINVAL;
+ if(zup->port.line == UART1)
+ return 0;
+#if 1
+ pinctrl_pm_select_sleep_state(dev);
+#endif
+
+ printk("zx29_uart%d suspend.\n",zup->port.line);
+
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ zup->enter_suspend = 1;
+ if(zup->port.line == UART2){
+ zx29_dma_stop(DMA_CH_UART2_RX);
+ zx29_dma_stop(DMA_CH_UART2_TX);
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return 0;
+ }
+
+ zx29_dma_stop(DMA_CH_UART0_RX);
+ zx29_dma_stop(DMA_CH_UART0_TX);
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+#if 0
+ ret = irq_set_irq_type(unsigned int irq, unsigned int type);
+
+#endif
+ //pcu_int_clear(PCU_UART0_RXD_INT);
+ if(zup->irq_state && (zup->rxd_int_depth == 0)){
+ struct irq_data *data_rxd;
+ data_rxd= irq_get_irq_data(zup->rxd_irq);
+ if(data_rxd){
+ if(irqd_irq_disabled(data_rxd))
+ enable_irq(zup->rxd_irq);
+ }
+ zup->rxd_int_depth = 1;
+ }
+
+ return 0;
+}
+
+static int zx29_uart_resume(struct device *dev)
+{
+ struct zx29_uart_port *zup = dev_get_drvdata(dev);
+
+ if (!zup)
+ return -EINVAL;
+ int uart_id = zup->port.line;
+ if(zup->port.line == UART1)
+ return 0;
+#if 1
+ pinctrl_pm_select_default_state(dev);
+#endif
+
+ printk("zx29_uart%d resume.\n",zup->port.line);
+
+ zup->enter_suspend = 0;
+ uart_dma_cycle[uart_id].from_resume = 1;
+ return 0;
+}
+
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx29_uart_dev_pm_ops, zx29_uart_suspend, zx29_uart_resume);
+
+
+
+static int zx29_uart_probe(struct platform_device *pdev)
+{
+ int ret=0;
+ int error;
+ char wakelock_name[20];
+ struct device_node *np = pdev->dev.of_node;
+ ret = of_alias_get_id(np, "uart");
+
+ if(ret < 0){
+ printk("-----zx29_uart_probe,of_alias_get_id fail ret:%d\n", ret);
+ return -ENODEV;
+ }
+ pdev->id = ret;
+ printk("-----zx29_uart_probe,ret:%d\n", ret);
+ // struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ struct zx29_uart_port *port = &zx29_uart_ports[pdev->id];
+
+
+#if 0//def CONFIG_SERIAL_ZX29_UART_CONSOLE
+ if(DEBUG_CONSOLE != pdev->id){
+ ret = zx29_init_ports(port,pdev);
+ }
+#else
+ ret = zx29_init_ports(port,pdev);
+
+#endif
+ if(ret < 0){
+ printk("-----zx29_uart_probe,zx29_init_ports fail ret:%d\n", ret);
+ }
+ zx29_uart_pin_ctrl(pdev);
+#if CONFIG_SERIAL_ZX29_DMA
+ if((DEBUG_CONSOLE != pdev->id ) && (pdev->id != 4))
+ {
+ uart_dma_init(port);
+ printk(KERN_INFO "[%s][%d]UART_%d DMA is OPENED\n",__func__,__LINE__,pdev->id);
+ }
+#endif
+ ret = 0;
+ ret=uart_add_one_port(&zx29_uart_driver, &port->port);
+
+ if(ret)
+ {
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_remove(port);
+#endif
+ return ret;
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ sema_init(&port->sema, 0);
+#endif
+
+ platform_set_drvdata(pdev, port);
+
+ if(pdev->id == DEBUG_CONSOLE){
+ //g_console_open_flag = pdata->uart_input_enable ? pdata->uart_input_enable : 0;
+ error = device_create_file(&pdev->dev, &dev_attr_console_input);
+#if VEHICLE_USE_ONE_UART_LOG
+ error = device_create_file(&pdev->dev, &dev_attr_console_uart_toggle);
+ error = device_create_file(&pdev->dev, &dev_attr_coreid_occupy_uart);
+ int ret;
+ ret = rpmsgCreateChannel(CORE_PS0, ICP_CHANNEL_CONSOLE_UART, ICP_BUFFERSIZE_CONSOLE_TOGGLE);
+ if(ret){
+ printk("linux5 request icp channel for uart fail %d.\n",ret);
+ }
+ rpmsgRegCallBack(CORE_PS0, ICP_CHANNEL_CONSOLE_UART, icp_callback_ps2cap);
+#endif
+ }
+
+ if(pdev->id != DEBUG_CONSOLE){
+ error = device_create_file(&pdev->dev, &dev_attr_ctsrts_input);
+ error = device_create_file(&pdev->dev, &dev_attr_wakeup_enable);
+ error = device_create_file(&pdev->dev, &dev_attr_sleep_state);
+ error = device_create_file(&pdev->dev, &dev_attr_app_ctrl);
+
+ }
+ error = device_create_file(&pdev->dev, &dev_attr_statics);
+ device_init_wakeup(&pdev->dev, true);
+/*
+ strcpy(wakelock_name, "uart_wakelock_x");
+ wakelock_name[14] = '0' + pdev->id;
+ strcpy(uart_wakelock_name[pdev->id], wakelock_name);
+ wake_lock_init(&(port->port.port_wakelock),WAKE_LOCK_SUSPEND,uart_wakelock_name[pdev->id]);
+*/
+
+ printk(KERN_INFO "TSP zx29 UART_%d probe OK\n",pdev->id);
+ return 0;
+}
+
+/****************************************************************************/
+static int /*__devexit*/ zx29_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = NULL;
+#if CONFIG_SERIAL_ZX29_DMA
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+#endif
+ int i;
+ if(pdev->id == DEBUG_CONSOLE){
+ device_remove_file(&pdev->dev, &dev_attr_console_input);
+ }
+
+ if(pdev->id != DEBUG_CONSOLE){
+ device_remove_file(&pdev->dev, &dev_attr_ctsrts_input);
+ device_remove_file(&pdev->dev, &dev_attr_wakeup_enable);
+ }
+
+ for (i = 0; (i < zx29_MAXPORTS); i++) {
+ port = &zx29_uart_ports[i].port;
+ if (port){
+ uart_remove_one_port(&zx29_uart_driver, port);
+
+
+#if CONFIG_SERIAL_ZX29_DMA
+ zup=container_of(port,struct zx29_uart_port,port);
+ zx29_dma_remove(zup);
+
+#endif
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id zx29_uart_of_match[] = {
+ { .compatible = "zxic,zx29-uart"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, zx29_uart_of_match);
+
+static struct platform_driver zx29_uart_platform_driver = {
+ .probe = zx29_uart_probe,
+ //.remove = __devexit_p(zx29_uart_remove),
+ .remove = zx29_uart_remove,
+ .driver = {
+ .name = "zx29_uart",
+ .pm = &zx29_uart_dev_pm_ops,
+ .of_match_table = of_match_ptr(zx29_uart_of_match),
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init zx29_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&zx29_uart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&zx29_uart_platform_driver);
+ if (rc){
+ uart_unregister_driver(&zx29_uart_driver);
+ return rc;
+ }
+
+ printk(KERN_INFO "zx29 UART driver registered\n");
+
+ return 0;
+}
+
+static void __exit zx29_uart_exit(void)
+{
+#ifdef CONFIG_SERIAL_ZX29_UART_CONSOLE
+ unregister_console(&zx29_uart_console);
+#endif
+ platform_driver_unregister(&zx29_uart_platform_driver);
+ uart_unregister_driver(&zx29_uart_driver);
+}
+//arch_initcall(zx29_uart_init);
+
+//subsys_initcall(zx29_uart_init);
+module_init(zx29_uart_init);
+module_exit(zx29_uart_exit);
+