[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.05_CAP.15.05(SDK4.4)diff_15.11
Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No
Change-Id: Ief12bb341bd859dd73c7a8c5fa3d25e5ba7e1c6d
diff --git a/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.c b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.c
new file mode 100755
index 0000000..bea60e1
--- /dev/null
+++ b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.c
@@ -0,0 +1,1269 @@
+/*******************************************************************************
+ * Include header files *
+ ******************************************************************************/
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <net/sock.h>
+#include <uapi/linux/sched/types.h>
+#include "zvnet_dev.h"
+#include "ram_config.h"
+#include <net/netfilter/nf_conntrack.h>
+
+/*******************************************************************************
+ * Macro definitions *
+ ******************************************************************************/
+#define USE_ZVNET_PACKET
+
+#define WATCHDOG_TIMEO (5*HZ)
+#define XMIT_RETRANS_TIMES 3
+#define ZVNET_SKB_PAD 128
+#define ZVNET_TMP_BUFF_LEN 2048
+#define ZVNET_FREE_BUFF_NUM 256
+#define ZVNET_XMIT_BUFF_NUM 64
+#define ZVNET_XMIT_MAX_QUEUE_NUM 2048
+
+/*******************************************************************************
+ * Type definitions *
+ ******************************************************************************/
+//AP´«µÝ¸øCAPµÄÊý¾Ý°üÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
+struct T_zvnet_rpmsg
+{
+ void *buff;//skb_headÖ¸Õ룬ÓÃÓÚÊÍ·Åʱ´«µÝ¸øºË¼ä£¬ÒÔ±ã¿ìËÙÊÍ·Å;
+ void *head;//ºË¼äÄÜʹÓõĵØÖ·±ß½ç£¬²»ÄÜÔ½½ç£¬·ñÔòÄÚ´æÒç³öÒì³£;ÎïÀíµØÖ·
+ unsigned short data_off;//ºË¼ä´«µÝÀ´µÄÊý¾Ý°üÊ×µØÖ·£¬Ö¸ÏòMACÖ¡Í·;ÎïÀíµØÖ·
+ unsigned short len;//Êý¾Ý°üÓÐЧ³¤¶È£¬Ò»°ãΪMACÖ¡³¤¶È
+ unsigned short end_off;//end offset
+ unsigned char dev;//cid 1->8
+ unsigned char flag;//0ÆÕͨ°ü£¬1¶þ´Îת·¢°ü£¬2¶þ´Îfastת·¢°ü
+};
+struct T_zvnet_pkt_stats
+{
+ unsigned int pkt;
+ unsigned int len;
+};
+//AP´«µÝ¸øCAPµÄCTÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
+struct T_zvnet_rpmsg_ctstat
+{
+ void *cap_nfct;
+ unsigned char in;
+ unsigned char out;
+ unsigned short flag;
+ struct T_zvnet_pkt_stats pkt[2];
+};
+/*******************************************************************************
+ * Local variable definitions *
+ ******************************************************************************/
+struct zvnet_device zvnet_dev[DDR_ZVNET_DEV_MAX];
+int *vir_addr_ap = NULL;
+struct sk_buff_head g_zvnet_skb_queue;
+struct zvnet_channel g_zvnet_chn_info;
+
+#ifdef USE_ZVNET_PACKET
+void *g_zvnet_free_buff[ZVNET_FREE_BUFF_NUM];
+int g_zvnet_free_num;
+spinlock_t g_zvnet_free_lock;
+struct semaphore g_zvnet_free_sem;
+struct semaphore g_zvnet_xmit_sem;
+struct sk_buff_head g_zvnet_skb_xmit_queue;
+
+unsigned int g_wrap_packet_size = 1000;
+module_param(g_wrap_packet_size, int, 0644);
+unsigned int g_wrap_num = 10;
+module_param(g_wrap_num, int, 0644);
+unsigned int g_wrap_timeout = 10;
+module_param(g_wrap_timeout, int, 0644);
+#endif
+
+/*******************************************************************************
+ * Global variable definitions *
+ ******************************************************************************/
+extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
+extern void v7_dma_map_area(const void *, size_t, int);
+extern void *get_ct_for_ap(struct sk_buff *skb);
+extern void put_ct_for_ap(void *ct);
+extern spinlock_t fast_fw_spinlock;
+/*******************************************************************************
+ * Local function declarations *
+ ******************************************************************************/
+static int zvnet_open(struct net_device *net);
+static int zvnet_close(struct net_device *net);
+static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net);
+static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue);
+static struct net_device_stats *zvnet_get_stats(struct net_device *net);
+static void v2xnet_init_netdev(struct net_device *net);
+static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb);
+static void zvnet_bh (unsigned long param);
+static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev);
+
+static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len);
+static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len);
+static int zvnet_channel_clear(struct zvnet_channel *chninfo);
+static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp);
+static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo);
+static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev);
+static int zvnet_receive_thread(void *argv);
+static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size);
+static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size);
+static int zvnet_channel_create(struct zvnet_device *zvnetdev);
+
+/*******************************************************************************
+ * Local function implementations *
+ ******************************************************************************/
+
+unsigned long virt_to_phys_ap(unsigned long virt)
+{
+ if(virt >= (unsigned long)vir_addr_ap && virt <= ((unsigned long)vir_addr_ap+DDR_BASE_LEN_AP))
+ return DDR_BASE_ADDR_AP + (virt - (unsigned long)vir_addr_ap);
+ return NULL;
+}
+
+unsigned long phys_to_virt_ap(unsigned long phys)
+{
+ if(phys >= DDR_BASE_ADDR_AP && phys <= (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP))
+ return (unsigned long)vir_addr_ap + (phys - DDR_BASE_ADDR_AP);
+ return NULL;
+}
+
+void check_skb_test(struct sk_buff *skb)
+{
+ if(skb && vir_addr_ap){
+ struct sk_buff *tmp_skb;
+ if((skb->capHead && (virt_to_phys_ap(skb->head) == NULL))
+ || ((skb->capHead == NULL) && virt_to_phys_ap(skb->head))){
+ dump_stack();
+ msleep(1000);
+ panic("capHead err");
+ }
+ skb_queue_walk(&g_zvnet_skb_queue, tmp_skb) {
+ if(tmp_skb == skb){
+ dump_stack();
+ msleep(1000);
+ panic("dup free");
+ }
+ }
+ }
+}
+
+int zvnet_get_index_by_netdev(struct net_device *net)
+{
+ int i;
+
+ for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
+ if(zvnet_dev[i].net == net)
+ return i;
+ }
+ return -1;
+}
+#ifdef USE_ZVNET_PACKET
+void write_free_apbuf_packet(void)
+{
+ int ret,size;
+ unsigned long flags;
+ void *buf[ZVNET_FREE_BUFF_NUM];
+
+ spin_lock_irqsave(&g_zvnet_free_lock, flags);
+ if(g_zvnet_free_num == 0){
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+ return;
+ }
+ size = (g_zvnet_free_num << 2);
+ memcpy(buf, g_zvnet_free_buff, size);
+ g_zvnet_free_num = 0;
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+ {
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
+ if((size >> 2) > ZVNET_FREE_BUFF_NUM)
+ panic("free packet err");
+ msg.buf = &buf;
+ msg.len = size;
+ zv_info("tofree size=%d", size);
+ ret = zvnetWrite(&msg);
+ if(ret < 0)
+ panic("err, ret:%d!!!!!!", ret);
+ }
+}
+#endif
+//½«CAP²à´«µÝÀ´µÄcapbufÖ¸ÕëдÈëDDR£¬ÒÔICP·½Ê½Í¨ÖªCAP²à
+void write_free_apbuf(void *head)
+{
+#ifdef USE_ZVNET_PACKET
+ unsigned long flags;
+
+ zv_info("g_zvnet_free_num=%d skb=0x%x", g_zvnet_free_num, head);
+ spin_lock_irqsave(&g_zvnet_free_lock, flags);
+ g_zvnet_free_buff[g_zvnet_free_num] = head;
+ g_zvnet_free_num++;
+ if(g_zvnet_free_num == ZVNET_FREE_BUFF_NUM){
+ int size;
+ void *buf[ZVNET_FREE_BUFF_NUM];
+
+ size = (g_zvnet_free_num << 2);
+ memcpy(buf, g_zvnet_free_buff, size);
+ g_zvnet_free_num = 0;
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+ if((size >> 2) > ZVNET_FREE_BUFF_NUM)
+ panic("free packet err");
+ {
+ int ret;
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
+ msg.buf = &buf;
+ msg.len = size;
+ if(printk_ratelimit())
+ zv_warn("tofree quick size=%d", size);
+ ret = zvnetWrite(&msg);
+ if(ret < 0)
+ panic("err, ret:%d!!!!!!", ret);
+ }
+ return;
+ }
+ if(g_zvnet_free_num > g_wrap_num)
+ up(&g_zvnet_free_sem);
+ if(g_zvnet_free_num > ZVNET_FREE_BUFF_NUM)
+ panic("free_buff err");
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+#else
+ int ret = 0;
+ long buf = (long)head;
+
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = RPMSG_WRITE_INT,
+ .buf = NULL,
+ .len = 4 };
+ msg.buf = &buf;
+ zv_info("tofree 0x%x", head);
+ ret = zvnetWrite(&msg);
+ if(ret < 0)
+ panic("err, ret:%d!!!!!!", ret);
+#endif
+}
+
+struct sk_buff *skb_build_apbuf(struct T_zvnet_rpmsg *pbuf_temp)
+{
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+ zv_info("build 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
+ if((unsigned long )pbuf_temp->head < DDR_BASE_ADDR_AP || (unsigned long )pbuf_temp->head > (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)){
+ zv_err("err 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
+ panic("addr is not APBUF mem!!!");
+ return NULL;
+ }
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (!skb)
+ {
+ write_free_apbuf(pbuf_temp->buff);
+ zv_err("alloc fail");
+ return NULL;
+ }
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+
+ //ÅжÏÊÇ·ñÊÇapbuff
+
+ if(pbuf_temp->dev < 0 || pbuf_temp->dev >= DDR_ZVNET_DEV_MAX)
+ panic("dev index error!!!");
+ skb->head = phys_to_virt_ap((unsigned long )pbuf_temp->head);
+ skb->data = skb->head + pbuf_temp->data_off;
+ skb->capHead = pbuf_temp->buff;
+ //ÐèÒª¿¼ÂÇcacheÐÐ¶ÔÆë
+ skb->truesize = SKB_TRUESIZE(skb->data - skb->head + pbuf_temp->len);//°´µÀÀíβ²¿»¹ÓпÉÓõÄÄÚ´æ¿Õ¼ä£¬ÔÝʱδ¿¼ÂÇ;SKB_DATA_ALIGN
+
+ refcount_set(&skb->users, 1);
+ skb_reset_tail_pointer(skb);
+ skb->tail += pbuf_temp->len;
+ skb->len = pbuf_temp->len;
+ skb->end = skb->head + pbuf_temp->end_off;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+ skb->dev = zvnet_dev[pbuf_temp->dev].net;
+ if(skb->len == 0 || skb->len > 2000)
+ panic("len ERR!!!!!!!!!!\n");
+ v7_dma_map_area(skb->head, sizeof(struct skb_shared_info) + pbuf_temp->end_off, DMA_FROM_DEVICE);
+ if(IFF_NOARP & skb->dev->flags)
+ memcpy(skb->data, skb->dev->dev_addr, 6);
+ atomic_set(&skb_shinfo(skb)->dataref, 1);
+ /* make sure we initialize shinfo sequentially */
+ skb_reset_network_header(skb);
+ skb_set_kcov_handle(skb, kcov_common_handle());
+ return skb;
+}
+
+int eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ netdev_warn(dev, "%s is deprecated!\n", __func__);
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void skb_debug_test(struct sk_buff *skb)
+{
+ int i;
+ int vcount = skb->len/10;
+ int rcount = skb->len%10;
+ char tmp[64] = {0};
+ char strbuf[64] = {0};
+
+ zv_info("\n");
+ for(i=0; i<vcount; i++) {
+ zv_info("%d---0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x\n",i,skb->data[0+10*i],skb->data[1+10*i],skb->data[2+10*i],
+ skb->data[3+10*i],skb->data[4+10*i],skb->data[5+10*i],skb->data[6+10*i],skb->data[7+10*i],skb->data[8+10*i],skb->data[9+10*i]);
+ }
+ if(vcount > 0) {
+ memset(tmp,0,sizeof(tmp));
+ memset(strbuf,0,sizeof(strbuf));
+ sprintf(strbuf,"%d---",vcount);
+
+ for(i=0; i<rcount; i++)
+ {
+ sprintf(tmp,"0x%x,",skb->data[10*vcount + i]);
+ strcat(strbuf,tmp);
+ }
+ zv_info("%s ",strbuf);
+ }
+ zv_info("\n");
+}
+
+static int zvnet_open(struct net_device *net)
+{
+ struct zvnet *dev = netdev_priv(net);
+
+ if(net->flags & IFF_UP) {
+ zv_dbg("%s has been opened!", dev->net->name);
+ return -EBUSY;
+ }
+ netif_start_queue (net);
+
+ return 0;
+}
+
+static int zvnet_close(struct net_device *net)
+{
+ struct zvnet *dev = netdev_priv(net);
+
+ zv_info("%s", dev->net->name);
+ netif_stop_queue(net);
+ tasklet_kill (&dev->bh);
+
+ return 0;
+}
+#ifdef USE_ZVNET_PACKET
+static void zvnet_xmit_packet(void)
+{
+ int i,j,k,ret,num;
+ unsigned long flags;
+ unsigned long flags1;
+ struct sk_buff *skb, *tmp;
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 20,
+ .flag = RPMSG_WRITE_INT};
+ static struct T_zvnet_rpmsg buff[ZVNET_XMIT_MAX_QUEUE_NUM+1];
+
+ spin_lock_irqsave(&g_zvnet_skb_xmit_queue.lock, flags);
+ if (skb_queue_empty(&g_zvnet_skb_xmit_queue)) {
+ spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
+ return;
+ }
+ i = 0;
+ skb_queue_walk_safe(&g_zvnet_skb_xmit_queue, skb, tmp) {
+ //buff[i].buff = skb;
+ buff[i].data_off = skb->data - skb->head;
+ //buff[i].head = virt_to_phys(skb->head);
+ buff[i].len = skb->len;
+ buff[i].end_off = skb->end - skb->head;
+ buff[i].dev = zvnet_get_index_by_netdev(skb->dev);
+ if(skb->capHead){
+ buff[i].buff = skb->capHead;
+#ifdef CONFIG_FASTNAT_MODULE
+ if(skb->isFastnat){
+ buff[i].head = get_ct_for_ap(skb);
+ buff[i].flag = 2;
+ }else
+#endif
+ {
+ buff[i].head = NULL;
+ buff[i].flag = 1;
+ }
+ __skb_unlink(skb, &g_zvnet_skb_xmit_queue);
+ kfree_skb(skb);
+ }else{
+ buff[i].buff = skb;
+ buff[i].head = virt_to_phys(skb->head);
+ buff[i].flag = 0;
+ }
+ i++;
+ zv_info("xmit skb=0x%x i=%d", skb, i);
+ if(i > ZVNET_XMIT_MAX_QUEUE_NUM){
+ panic("qlen:%d!", i);
+ break;
+ }
+ }
+ spin_lock_irqsave(&g_zvnet_skb_queue.lock, flags1);
+ skb_queue_splice_tail_init(&g_zvnet_skb_xmit_queue, &g_zvnet_skb_queue);
+ spin_unlock_irqrestore(&g_zvnet_skb_queue.lock, flags1);
+ spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
+ zv_info("g_zvnet_skb_queue.qlen=%d i=%d", g_zvnet_skb_queue.qlen, i);
+ for(j = 0; j < i; j = j + ZVNET_XMIT_BUFF_NUM){
+ if(i <= (j + ZVNET_XMIT_BUFF_NUM)){
+ msg.buf = (void *)&buff[j];
+ msg.len = sizeof(struct T_zvnet_rpmsg)*(i-j);/*±¾´ÎÄÜÈ¡¹â*/
+ ret = zvnetWrite(&msg);
+ }else{
+ msg.buf = (void *)&buff[j];
+ msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;
+ ret = zvnetWrite(&msg);
+ }
+ zv_info("xmit write ret=%d size=%d i=%d j=%d", ret, msg.len, i, j);
+ if(ret < 0) {
+ if(printk_ratelimit())
+ zv_warn("zvnet_channel_write ret=%d fail.",ret);
+ num = msg.len / sizeof(struct T_zvnet_rpmsg);
+ for(k = j; k < j+num; k++){
+ if(buff[k].flag == 0){
+ skb = (struct sk_buff *)buff[k].buff;
+ skb_unlink(skb, &g_zvnet_skb_queue);
+ skb->isToap = 0;
+ kfree_skb(skb);
+ }else{
+ if(buff[k].head)
+ put_ct_for_ap(buff[k].head);
+ write_free_apbuf(buff[k].buff);
+ }
+ }
+ }
+ }
+}
+#endif
+static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net)
+{
+#ifdef USE_ZVNET_PACKET
+ struct sk_buff *data = NULL;
+
+ //zv_info("g_zvnet_skb_xmit_queue.qlen=%d", g_zvnet_skb_xmit_queue.qlen);
+ if(g_zvnet_skb_xmit_queue.qlen >= ZVNET_XMIT_MAX_QUEUE_NUM){
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("write err, qlen:%d!", g_zvnet_skb_xmit_queue.qlen);
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->next//|| skb->capHead
+ || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
+ || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
+ int ret_len = skb->len;
+
+ data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
+ if (unlikely(!data)) {
+ zv_err("dev_alloc_skb fail,len %d",ret_len);
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ skb_put(data,ret_len);
+ skb_reserve(data, NET_IP_ALIGN);
+ memcpy(data->data, skb->data, ret_len);
+ zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
+ kfree_skb(skb);
+ }else{
+ data = skb;
+ }
+ data->dev = net;
+ data->isToap = 1;
+ v7_dma_map_area(data->head, data->end - data->head + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
+ skb_queue_tail(&g_zvnet_skb_xmit_queue, data);
+ if(data->len < g_wrap_packet_size || g_zvnet_skb_xmit_queue.qlen > g_wrap_num)
+ up(&g_zvnet_xmit_sem);
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+#else
+ struct zvnet *dev = netdev_priv(net);
+ struct zvnet_device *zvnetdev = (struct zvnet_device *)dev->dev_priv;
+ int ret = 0;
+ struct zvp_header hzvp;
+
+ if (!skb) {
+ zv_err("err: skb == 0!");
+ }
+#if 0
+ if (skb->len > ZVNET_TMP_BUFF_LEN) {
+ zv_err("err: skb->len(%d)>%d!", skb->len, ZVNET_TMP_BUFF_LEN);
+ }
+
+send_header:
+ ret = zvnet_channel_write(&(zvnetdev->chn_info), skb->data, skb->len);
+
+ if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
+ zvnetdev->retran_times ++;
+ zv_warn("The retran_times is %d.",zvnetdev->retran_times);
+ goto send_header;
+ }
+
+ if (ret >= 0) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+ } else {
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("write err, ret:%d!", ret);
+ }
+
+exit:
+ kfree_skb(skb);
+#else
+ struct T_zvnet_rpmsg buff = {0};
+ struct sk_buff *data = NULL;
+ if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->capHead || skb->next
+ || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
+ || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
+ int ret_len = skb->len;
+
+ data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
+ if (unlikely(!data)) {
+ zv_err("dev_alloc_skb fail,len %d",ret_len);
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ skb_put(data,ret_len);
+ skb_reserve(data, NET_IP_ALIGN);
+ memcpy(data->data, skb->data, ret_len);
+ data->isToap = 1;
+ buff.buff = data;
+ buff.data_off = data->data - data->head;
+ buff.head = virt_to_phys(data->head);
+ buff.len = ret_len;
+ buff.end_off = data->end - data->head;
+ buff.dev = zvnet_get_index_by_netdev(net);
+ zv_info("alloc 0x%x 0x%x %d %d", buff.buff, buff.head, buff.data_off, buff.len);
+ zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
+ v7_dma_map_area(data->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
+ }else{
+ skb->isToap = 1;
+ buff.buff = skb;
+ buff.data_off = skb->data - skb->head;
+ buff.head = virt_to_phys(skb->head);
+ buff.len = skb->len;
+ buff.end_off = skb->end - skb->head;
+ buff.dev = zvnet_get_index_by_netdev(net);
+ zv_info("transfer 0x%x %d 0x%x %d", buff.buff, buff.head, buff.data_off, buff.len);
+ v7_dma_map_area(skb->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
+ }
+send_header:
+ ret = zvnet_channel_write(&g_zvnet_chn_info, &buff, sizeof(struct T_zvnet_rpmsg));
+
+ if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
+ zvnetdev->retran_times ++;
+ zv_warn("The retran_times is %d.",zvnetdev->retran_times);
+ goto send_header;
+ }
+
+ if (ret >= 0) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+ if(data){
+ kfree_skb(skb);
+ skb_queue_tail(&g_zvnet_skb_queue, data);
+ }else
+ skb_queue_tail(&g_zvnet_skb_queue, skb);
+ zvnetdev->retran_times = 0;
+ } else {
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("write err, ret:%d!", ret);
+ if(data){
+ data->isToap = 0;
+ kfree_skb(data);
+ }
+ else
+ skb->isToap = 0;
+ kfree_skb(skb);
+ }
+#endif
+#endif
+ return NET_XMIT_SUCCESS;
+}
+
+/* Called by the kernel when transmit times out */
+static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue)
+{
+ zv_warn("sent timeout!");
+ net->stats.tx_errors++;
+ netif_wake_queue(net);
+}
+
+static struct net_device_stats *zvnet_get_stats(struct net_device *net)
+{
+ return &net->stats;
+}
+
+const struct net_device_ops zvnet_netdev_ops = {
+ .ndo_open = zvnet_open,
+ .ndo_stop = zvnet_close,
+ .ndo_start_xmit = zvnet_xmit,
+ .ndo_tx_timeout = zvnet_tx_timeout,
+ .ndo_get_stats = zvnet_get_stats,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void v2xnet_init_netdev(struct net_device *net)
+{
+ u8 node_id [ETH_ALEN];
+
+ random_ether_addr(node_id);
+ memcpy (net->dev_addr, node_id, sizeof node_id);
+
+ net->netdev_ops = &zvnet_netdev_ops;
+ net->watchdog_timeo = WATCHDOG_TIMEO;
+ net->flags |= IFF_NOARP;
+}
+
+static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb)
+{
+ int status;
+
+ //zv_info("enter...");
+
+ //skb->protocol = eth_type_trans(skb, dev->net);
+
+ status = netif_rx (skb);
+ if (status == NET_RX_SUCCESS) {
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+ } else {
+ dev->net->stats.rx_errors++;
+ zv_err("netif_rx status %d.", status);
+ }
+}
+
+static void zvnet_bh (unsigned long param)
+{
+ struct zvnet *dev = (struct zvnet *)param;
+ struct sk_buff *skb;
+
+ while((skb = skb_dequeue(&dev->rxq)) != NULL) {
+ if (skb->len)
+ zvnet_skb_return(dev, skb);
+ else {
+ dev->net->stats.rx_errors++;
+ dev_kfree_skb (skb);
+ zv_err("drop!!!ddrnet_bh skb len == 0.");
+ }
+ }
+}
+
+static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev)
+{
+ struct zvnet *dev = NULL;
+
+ dev = netdev_priv(net);
+ if(!dev) {
+ zv_err("dev is null.\n");
+ return NULL;
+ }
+
+ dev->net = net;
+ dev->bh.func = zvnet_bh;
+ dev->bh.data = (unsigned long) dev;
+
+ skb_queue_head_init (&dev->rxq);
+
+ dev->dev_priv = zvnetdev;
+
+ return dev;
+}
+
+/*·µ»ØÖµ´óÓÚµÈÓÚ0£¬±íʾдͨµÀ³É¹¦£»Ð¡ÓÚ0±íʾдͨµÀʧ°Ü*/
+static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len)
+{
+ T_RpMsg_Msg msg;
+
+ if(NULL == buf) {
+ return -EINVAL;
+ }
+ memset(&msg, 0, sizeof(msg));
+ msg.coreID = chninfo->core_id;
+ msg.chID = chninfo->channel_id;
+ msg.flag |= RPMSG_WRITE_INT; //| RPMSG_WRITE_IRQLOCK;
+ msg.buf = buf;
+ msg.len = len;
+
+ return zvnetWrite(&msg);
+}
+
+/*·µ»ØÖµ´óÓÚ0£¬±íʾ¶ÁȡͨµÀ³É¹¦£»Ð¡ÓÚµÈÓÚ0±íʾͨµÀÊý¾ÝΪ¿Õ»òʧ°Ü*/
+static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len)
+{
+ T_RpMsg_Msg msg;
+ int ret = 0;
+
+ if(NULL == buf) {
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.coreID = chninfo->core_id;
+ msg.chID = chninfo->channel_id;
+ msg.buf = buf;
+ msg.len = len;
+
+ ret = zvnetRead(&msg);
+ if (ret <= 0) {
+ zv_err("rpm read err=%d!",ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int zvnet_channel_clear(struct zvnet_channel *chninfo)
+{
+ char *tbuf = NULL;
+ unsigned int tlen = chninfo->channel_size/2;
+ int ret = 0;
+
+ tbuf = (char *)kzalloc(tlen,GFP_ATOMIC);
+ if(IS_ERR(tbuf)) {
+ zv_err("kzalloc fail! %d byte.", tlen);
+ return -ENOMEM;
+ }
+ ret = zvnet_channel_read(chninfo, tbuf, tlen);
+ if(ret < 0) {
+ zv_err("zvnet_channel_read fail!");
+ ret = 0;
+ }
+ kfree(tbuf);
+ zv_err("Drop channel data. %d byte.",ret);
+
+ return ret;
+}
+
+static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp)
+{
+ return zvnet_channel_read(chninfo, phzvp, sizeof(struct zvp_header));
+}
+
+static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev)
+{
+ struct sk_buff *skb;
+
+ if(NULL == chninfo || 0 >= tlen || NULL == dev) {
+ return NULL;
+ }
+ skb = dev_alloc_skb(tlen);
+ if (unlikely(!skb)) {
+ zv_err("netdev_alloc_skb fail,len %d",tlen);
+ return NULL;
+ }
+ skb_put(skb,tlen);
+
+ if(zvnet_channel_read(chninfo, (void *)skb->data, tlen) != tlen) {
+ zv_err("zvnet_channel_read fail.\n");
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ zv_info("%s dev receive packet %d byte.",dev->net->name, tlen);
+
+ skb->dev = dev->net;
+
+ return skb;
+}
+
+static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo)
+{
+ struct sk_buff *skb;
+#if 0
+ char skb_data[ZVNET_TMP_BUFF_LEN];
+ int ret_len = 0;
+
+ if(NULL == chninfo || NULL == dev) {
+ zv_err("zvnet_channel_read null.\n");
+ return NULL;
+ }
+
+ ret_len = zvnet_channel_read(chninfo, (void *)skb_data, sizeof(skb_data));
+
+ if(ret_len <= 0) {
+ zv_err("zvnet_channel_read fail.\n");
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(ret_len + ZVNET_SKB_PAD);
+ if (unlikely(!skb)) {
+ zv_err("netdev_alloc_skb fail,len %d",ret_len);
+ return NULL;
+ }
+
+ skb_put(skb,ret_len);
+ skb_reserve(skb, ZVNET_SKB_PAD);
+ memcpy(skb->data, &skb_data[0], ret_len);
+#else
+ struct T_zvnet_rpmsg buff = {0};
+ int ret_len = 0;
+ ret_len = zvnet_channel_read(chninfo, (void *)&buff, sizeof(struct T_zvnet_rpmsg));
+
+ if(ret_len <= 0) {
+ zv_err("rpm read err=%d", ret_len);
+ msleep(1000);
+ return NULL;
+ }
+ if(ret_len != sizeof(struct T_zvnet_rpmsg)) {
+ panic("err, ret:%d!!!!!!", ret_len);
+ }
+ skb = skb_build_apbuf(&buff);
+ if (unlikely(!skb)) {
+ zv_err("netdev_alloc_skb fail,len %d",ret_len);
+ return NULL;
+ }
+#endif
+ //skb->dev = dev->net;
+ return skb;
+}
+
+static int zvnet_receive_thread(void *argv)
+{
+ //struct zvnet_device *zvnetdev = (struct zvnet_device *)argv;
+ //struct zvnet_channel *chninfo = NULL;
+ struct zvnet *dev = NULL;
+ int index,ret_len,i,num;
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 20,
+ .flag = 0};
+ struct T_zvnet_rpmsg buff[ZVNET_XMIT_BUFF_NUM];
+ //struct zvp_header hzvp;
+/*
+ if(IS_ERR(zvnetdev)) {
+ zv_err("The receive thread create fail!");
+ return -EINVAL;
+ }
+ chninfo = &zvnetdev->chn_info;
+ dev = zvnetdev->dev;
+*/
+ while(1) {
+/*
+ if(unlikely(!(zvnetdev->net->flags & IFF_UP))) {
+ msleep(1000);
+ continue;
+ }
+*/
+ //memset(&hzvp, 0, sizeof(hzvp));
+#ifdef USE_ZVNET_PACKET
+ //ret_len = zvnet_channel_read(&g_zvnet_chn_info, (void *)buff, sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM);
+ msg.buf = (void *)(buff); // Êý¾Ý
+ msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
+ ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+ zv_info("zvnetRead ret=%d", ret_len);
+ if(ret_len <= 0) {
+ zv_err("rpm read err=%d", ret_len);
+ msleep(1000);
+ continue;
+ }
+ if((ret_len % sizeof(struct T_zvnet_rpmsg)) != 0) {
+ panic("err, ret:%d!!!!!!", ret_len);
+ }
+ num = ret_len / sizeof(struct T_zvnet_rpmsg);
+ for(i = 0; i < num; i++){
+ skb = skb_build_apbuf(&buff[i]);
+ if (unlikely(!skb)) {
+ zv_err("skb_build_apbuf fail,len=%d i=%d",ret_len,i);
+ continue;
+ }
+ if(unlikely(!(skb->dev->flags & IFF_UP))) {
+ if(printk_ratelimit())
+ zv_err("drop!!!%s is down.", skb->dev->name);
+ dev_kfree_skb (skb);
+ continue;
+ }
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (fast_from_driver && fast_from_driver(skb, skb->dev))
+ {
+ continue;
+ }
+ index = zvnet_get_index_by_netdev(skb->dev);
+ if(index < 0)
+ panic("");
+ dev = zvnet_dev[index].dev;
+ spin_lock_irqsave(&dev->rxq.lock, flags);
+ __skb_queue_tail(&dev->rxq, skb);
+ spin_unlock_irqrestore(&dev->rxq.lock, flags);
+ tasklet_schedule(&dev->bh);
+ }
+#else
+ if(0 != (skb = zvnet_direct_read_skb(&g_zvnet_chn_info))) {
+ //skb_debug_test(skb);
+ if(unlikely(!(skb->dev->flags & IFF_UP))) {
+ zv_err("drop!!!%s is down.", skb->dev->name);
+ dev_kfree_skb (skb);
+ continue;
+ }
+ skb->protocol = eth_type_trans(skb, skb->dev);
+#if 1
+ if (fast_from_driver && fast_from_driver(skb, skb->dev))
+ {
+ continue;
+ }
+#endif
+ index = zvnet_get_index_by_netdev(skb->dev);
+ if(index < 0)
+ panic("");
+ dev = zvnet_dev[index].dev;
+ spin_lock_irqsave(&dev->rxq.lock, flags);
+ __skb_queue_tail(&dev->rxq, skb);
+ spin_unlock_irqrestore(&dev->rxq.lock, flags);
+ tasklet_schedule(&dev->bh);
+ }
+ else {
+ zv_err("zvnet_read_header fail.");
+ msleep(1000);
+ }
+#endif
+ }
+
+ zv_err("The receive thread exit!");
+ return 0;
+}
+
+static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size)
+{
+ return zvnetCreateChannel (dstCoreID, chID, size);
+}
+
+static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size)
+{
+ int retval;
+
+ retval = rpmsgCreateChannel_v2xnet (core_id, channel_id, channel_size);
+ if(retval != RPMSG_SUCCESS && retval != RPMSG_CHANNEL_ALREADY_EXIST)
+ goto out;
+
+ return retval;
+
+out:
+ zv_err("could not create channel.");
+ return retval;
+}
+/*
+static int zvnet_channel_create(struct zvnet_device *zvnetdev)
+{
+ struct task_struct *th = NULL;
+ int retval = 0;
+ struct zvnet_channel *chninfo = NULL;
+
+ if (IS_ERR(zvnetdev)) {
+ return -EINVAL;
+ }
+ chninfo = &(zvnetdev->chn_info);
+ retval = zvnet_createIcpChannel(chninfo->core_id, chninfo->channel_id, chninfo->channel_size);
+ if(retval < 0) {
+ zv_err("Create IcpChannel fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_receive_thread, (void *)zvnetdev, "zvnet-recv%d", chninfo->channel_id);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start receive thread.");
+ return PTR_ERR(th);
+ }
+ chninfo->rcv_thread = th;
+
+ return 0;
+}
+*/
+static int zvnet_release_thread(void * nouse)
+{
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = 0};
+ void *buff[ZVNET_FREE_BUFF_NUM];
+ int i,num,retval;
+ struct sk_buff *skb;
+ struct sched_param param = { .sched_priority = 1 };
+ param.sched_priority = 37;
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
+
+ while(1) {
+ zv_info("g_zvnet_skb_queue.qlen=%d", g_zvnet_skb_queue.qlen);
+#ifdef USE_ZVNET_PACKET
+ msg.buf = (unsigned char *)(buff); // Êý¾Ý
+ msg.len = 4*ZVNET_FREE_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
+ retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+ zv_info("free read ret=%d", retval);
+ if (retval <= 0) {
+ zv_err("rpm read err=%d", retval);
+ msleep(1000);
+ continue;
+ }
+ if((retval%4) != 0) {
+ panic("err, ret:%d!!!!!!", retval);
+ }
+ num = retval>>2;
+ for(i = 0; i < num; i++){
+ skb = (struct sk_buff *)buff[i];
+ zv_info("free 0x%x", skb);
+ if (skb == NULL || skb->next == NULL || skb->prev == NULL) {
+ panic("rpm read=%d i=%d NULL", retval, i);
+ continue;
+ }
+ skb_unlink(skb, &g_zvnet_skb_queue);
+ if(skb->isToap != 1)
+ panic("");
+ skb->isToap = 0;
+ kfree_skb(skb);
+ }
+#else
+ void *buff;
+ msg.coreID = CORE_PS0;
+ msg.chID = 29;
+ msg.buf = (unsigned char *)(&buff); // Êý¾Ý
+ msg.len = 4;// ¶ÁÈ¡µÄ³¤¶È
+ //msg.flag |= RPMSG_READ_POLL;
+
+ retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+ if (retval <= 0) {
+ zv_err("no msg or threand exited");
+ msleep(1000);
+ continue;
+ }
+ if(retval != 4) {
+ panic("err, ret:%d!!!!!!", retval);
+ }
+ zv_info("free 0x%x", buff);
+ skb = (struct sk_buff *)buff;
+ skb_unlink(skb, &g_zvnet_skb_queue);
+ if(skb->isToap != 1)
+ panic("");
+ skb->isToap = 0;
+ kfree_skb(skb);
+#endif
+ }
+ zv_err("The realse thread exit!");
+ return 0;
+}
+#ifdef USE_ZVNET_PACKET
+static int zvnet_xmit_warp_thread(void * nouse)
+{
+ while(1) {
+ down_timeout(&g_zvnet_xmit_sem, msecs_to_jiffies(g_wrap_timeout));
+ zvnet_xmit_packet();
+ }
+ zv_err("The xmit warp thread exit!");
+ return 0;
+}
+
+static int zvnet_free_warp_thread(void * nouse)
+{
+ while(1) {
+ down_timeout(&g_zvnet_free_sem, msecs_to_jiffies(g_wrap_timeout));
+ write_free_apbuf_packet();
+ }
+ zv_err("The free warp thread exit!");
+ return 0;
+}
+#endif
+
+static int zvnet_update_thread(void * nouse)
+{
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 21,
+ .flag = 0};
+ int ret_len = 0;
+ struct nf_conn *ct;
+
+ while(1) {
+ struct T_zvnet_rpmsg_ctstat buff = {0};
+ msg.buf = (void *)(&buff); // Êý¾Ý
+ msg.len = sizeof(struct T_zvnet_rpmsg_ctstat);// ¶ÁÈ¡µÄ³¤¶È
+ ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+
+ if(ret_len <= 0) {
+ zv_err("rpm read err=%d", ret_len);
+ msleep(1000);
+ continue;
+ }
+ if(ret_len != sizeof(struct T_zvnet_rpmsg_ctstat)) {
+ panic("err, ret:%d!!!!!!", ret_len);
+ }
+ ct = (struct nf_conn *)buff.cap_nfct;
+
+ WARN_ON(atomic_read(&ct->ct_general.use) == 0);
+ if(buff.flag){
+ BUG_ON(buff.in <= 0 || buff.out <= 0);
+ if(buff.pkt[0].pkt && buff.pkt[0].len){
+ zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[0]->name, ct->outdev[0]->name, buff.in, buff.out);
+ zvnet_dev[buff.in-1].net->stats.rx_packets += buff.pkt[0].pkt;
+ zvnet_dev[buff.in-1].net->stats.rx_bytes += buff.pkt[0].len;
+ zvnet_dev[buff.out-1].net->stats.tx_packets += buff.pkt[0].pkt;
+ zvnet_dev[buff.out-1].net->stats.tx_bytes += buff.pkt[0].len;
+ }
+ if(buff.pkt[1].pkt && buff.pkt[1].len){
+ zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[1]->name, ct->outdev[1]->name, buff.out, buff.in);
+ zvnet_dev[buff.out-1].net->stats.rx_packets += buff.pkt[1].pkt;
+ zvnet_dev[buff.out-1].net->stats.rx_bytes += buff.pkt[1].len;
+ zvnet_dev[buff.in-1].net->stats.tx_packets += buff.pkt[1].pkt;
+ zvnet_dev[buff.in-1].net->stats.tx_bytes += buff.pkt[1].len;
+ }
+ spin_lock_bh(&fast_fw_spinlock);
+ ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += buff.pkt[IP_CT_DIR_ORIGINAL].len;
+ ct->packet_info[IP_CT_DIR_ORIGINAL].packets += buff.pkt[IP_CT_DIR_ORIGINAL].pkt;
+ ct->packet_info[IP_CT_DIR_REPLY].bytes += buff.pkt[IP_CT_DIR_REPLY].len;
+ ct->packet_info[IP_CT_DIR_REPLY].packets += buff.pkt[IP_CT_DIR_REPLY].pkt;
+ spin_unlock_bh(&fast_fw_spinlock);
+ zv_info("nf_update %x %d %d %d %d", buff.cap_nfct, buff.pkt[0].pkt, buff.pkt[0].len, buff.pkt[1].pkt, buff.pkt[1].len);
+ }else{
+ zv_info("nf_put %x", buff.cap_nfct);
+ nf_conntrack_put(buff.cap_nfct);
+ }
+ }
+ zv_err("The update thread exit!");
+ return 0;
+}
+
+/*******************************************************************************
+ * Global function implementations *
+ ******************************************************************************/
+static int __init zvnet_init(void)
+{
+ int i;
+ int err = -ENOMEM;
+ struct zvnet *dev = NULL;
+ struct net_device *net = NULL;
+ struct zvnet_device *zvnetdev = NULL;
+
+#ifdef USE_ZVNET_PACKET
+ skb_queue_head_init(&g_zvnet_skb_xmit_queue);
+ spin_lock_init(&g_zvnet_free_lock);
+ sema_init(&g_zvnet_free_sem, 0);
+ sema_init(&g_zvnet_xmit_sem, 0);
+#endif
+ skb_queue_head_init(&g_zvnet_skb_queue);
+ g_zvnet_chn_info.core_id = CORE_PS0;
+ g_zvnet_chn_info.channel_id = ICP_CHN_ZVNET1;
+ g_zvnet_chn_info.channel_size = ICP_CHANNEL_SIZE;
+ for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
+ zvnetdev = &zvnet_dev[i];
+ memset(zvnetdev, 0, sizeof(struct zvnet_device));
+ net = alloc_etherdev(sizeof(struct zvnet));
+ if (!net) {
+ zv_err("could not allocate device.\n");
+ return err;
+ }
+
+ net->needed_headroom += ZVNET_SKB_PAD;//NET_SKB_PAD;
+ sprintf(net->name, "%s%d", ZVNET_IFNAME_PREFIX, i);
+ dev = v2xnet_dev_init(net, zvnetdev);
+ v2xnet_init_netdev(net);
+ if(0 == i){
+ net->flags = (net->flags & (~IFF_NOARP));
+ }
+ err = register_netdev(net);
+ if (err) {
+ zv_err("register_netdev error:%d :%d\n",err,i);
+ return err;
+ }
+ zvnetdev->dev = dev;
+ zvnetdev->net = net;
+/*
+ zvnetdev->chn_info.core_id = CAP_ID;
+ zvnetdev->chn_info.channel_id = ICP_CHN_ZVNET1 + i;//zvnet_collect[i];
+ zvnetdev->chn_info.channel_size = ICP_CHANNEL_SIZE;
+ err = zvnet_channel_create(zvnetdev);
+ if(0 != err) {
+ zv_err("zvnet_channel_create error:%d :%d\n",err,i);
+ goto out_unregister_netdev;
+ }
+*/
+ }
+ {
+ struct task_struct *th = NULL;
+ int retval = 0;
+ retval = zvnet_createIcpChannel(CORE_PS0, 21, 64);
+ if(retval < 0) {
+ zv_err("Create IcpChannel channel_21 fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_update_thread, 0, "zvnet-update%d", 21);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start update thread.");
+ return PTR_ERR(th);
+ }
+ retval = zvnet_createIcpChannel(CORE_PS0, 20, ICP_CHANNEL_SIZE);
+ if(retval < 0) {
+ zv_err("Create IcpChannel channel_20 fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_receive_thread, 0, "zvnet-recv%d", 20);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start receive thread.");
+ return PTR_ERR(th);
+ }
+ g_zvnet_chn_info.rcv_thread = th;
+
+ retval = zvnet_createIcpChannel(CORE_PS0, 29, ICP_CHANNEL_SIZE);
+ if(retval < 0) {
+ zv_err("Create IcpChannel channel_29 fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_release_thread, 0, "zvnet-free%d", 29);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start release thread.");
+ return PTR_ERR(th);
+ }
+#ifdef USE_ZVNET_PACKET
+ th = kthread_run(zvnet_xmit_warp_thread, 0, "zvnet-xmit-wrap");
+ if (IS_ERR(th)) {
+ zv_err("Unable to start xmit_warp thread.");
+ return PTR_ERR(th);
+ }
+
+ th = kthread_run(zvnet_free_warp_thread, 0, "zvnet-free-wrap");
+ if (IS_ERR(th)) {
+ zv_err("Unable to start free_warp thread.");
+ return PTR_ERR(th);
+ }
+#endif
+ vir_addr_ap = ioremap_cache(DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
+ zv_warn("vir_addr_ap vir=0x%x phy=0x%x len=0x%x", vir_addr_ap, DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
+ if(vir_addr_ap == NULL)
+ {
+ zv_err("AP mmap failed.\n");
+ return -1;
+ }
+
+ }
+ zv_dbg("success.\n");
+ return 0;
+
+}
+
+static void __exit zvnet_exit(void)
+{
+ int i;
+ struct net_device *net;
+
+ for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
+ net = zvnet_dev[i].net;
+ unregister_netdev(net);
+ free_netdev(net);
+ zvnet_dev[i].net = NULL;
+ }
+ zv_warn("success.\n");
+}
+
+late_initcall(zvnet_init);
+module_exit(zvnet_exit);
+
+MODULE_AUTHOR("ZXIC");
+MODULE_DESCRIPTION("ZXIC CAP LAN NET DEVICE");
+MODULE_LICENSE("GPL");
+
diff --git a/upstream/linux-5.10/drivers/spi/spi-zx29.c b/upstream/linux-5.10/drivers/spi/spi-zx29.c
new file mode 100755
index 0000000..d570db1
--- /dev/null
+++ b/upstream/linux-5.10/drivers/spi/spi-zx29.c
@@ -0,0 +1,3681 @@
+/*******************************************************************************
+* Copyright (C) 2016-2021, ZTE Corporation.
+*
+* File Name:spi-zx29.c
+* File Mark:
+* Description:
+* Others:
+* Version: 1.0
+* Author: ZTE
+* Date:
+* modify
+********************************************************************************/
+
+/****************************************************************************
+* Include files
+****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
+#include <linux/semaphore.h>
+//#include <linux/wakelock.h> //qhf
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/dma/zx-dma.h>
+#include <linux/dma-direct.h>
+#include <asm/memory.h>
+#include <linux/debugfs.h>
+#include <linux/spi/spi.h>
+//#include <linux/soc/zte/pm/drv_idle.h>
+
+#include "spi-zx29.h"
+#include "pub_debug_info.h"
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+#include <linux/wait.h>
+#include <linux/suspend.h>
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+
+struct zx29_ssp_device_of_data {
+ enum zx29_ssp_device_mode mode;
+};
+static const struct of_device_id zx29_spi_of_match[];
+
+/****************************************************************************
+* Local Macros
+****************************************************************************/
+
+#define CONFIG_SPI_DMA_ENGINE
+#define SPI_PSM_CONTROL (0) //(1)//qhf
+
+/*
+ * This macro is used to define some register default values.
+ * reg is masked with mask, the OR:ed with an (again masked)
+ * val shifted sb steps to the left.
+ */
+#define SPI_WRITE_BITS(reg, val, mask, sb) \
+ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
+
+/*
+ * This macro is also used to define some default values.
+ * It will just shift val by sb steps to the left and mask
+ * the result with mask.
+ */
+#define GEN_MASK_BITS(val, mask, sb) \
+ (((val)<<(sb)) & (mask))
+
+
+#define SPI_GPIO_HIGH 1
+#define SPI_GPIO_LOW 0
+
+#define ZX29_CS_ACTIVE 1 /* normally nCS, active low */
+#define ZX29_CS_INACTIVE 0
+
+#define DRIVE_TX 0
+#define DO_NOT_DRIVE_TX 1
+
+#define DO_NOT_QUEUE_DMA 0
+#define QUEUE_DMA 1
+
+#define RX_TRANSFER BIT(0)
+#define TX_TRANSFER BIT(1)
+
+/* registers */
+#define SPI_VER_REG_OFFSET (0x00)
+#define SPI_COM_CTRL_OFFSET (0x04)
+#define SPI_FMT_CTRL_OFFSET (0x08)
+#define SPI_DR_OFFSET (0x0C)
+#define SPI_FIFO_CTRL_OFFSET (0x10)
+#define SPI_FIFO_SR_OFFSET (0x14)
+#define SPI_INTR_EN_OFFSET (0x18)
+#define SPI_INTR_SR_OFFSET (0x1C)
+#define SPI_TIMING_OFFSET (0x20)
+
+/*
+ * SPI Version Register - SPI_VER_REG
+ */
+#define SPI_VER_REG_MASK_Y (0xFFUL << 16)
+#define SPI_VER_REG_MASK_X (0xFFUL << 24)
+
+/*
+ * SPI Common Control Register - SPI_COM_CTRL
+ */
+#define SPI_COM_CTRL_MASK_LBM (0x1UL << 0)
+#define SPI_COM_CTRL_MASK_SSPE (0x1UL << 1)
+#define SPI_COM_CTRL_MASK_MS (0x1UL << 2)
+#define SPI_COM_CTRL_MASK_SOD (0x1UL << 3)
+#define SPI_COM_CTRL_MASK_SSPE_BACK (0x1UL << 4)
+
+/*
+ * SPI Format Control Register - SPI_FMT_CTRL
+ */
+#define SPI_FMT_CTRL_MASK_FRF (0x3UL << 0)
+#define SPI_FMT_CTRL_MASK_POL (0x1UL << 2)
+#define SPI_FMT_CTRL_MASK_PHA (0x1UL << 3)
+#define SPI_FMT_CTRL_MASK_DSS (0x1FUL << 4)
+
+/*
+ * SPI FIFO Control Register - SPI_FIFO_CTRL
+ */
+#define SPI_FIFO_CTRL_MASK_RX_DMA_EN (0x1UL << 2)
+#define SPI_FIFO_CTRL_MASK_TX_DMA_EN (0x1UL << 3)
+#define SPI_FIFO_CTRL_MASK_RX_FIFO_THRES (0xFUL << 4)
+#define SPI_FIFO_CTRL_MASK_TX_FIFO_THRES (0xFUL << 8)
+/*
+ * SPI FIFO Status Register - SPI_FIFO_SR
+ */
+
+#define SPI_FIFO_SR_MASK_RX_BEYOND_THRES (0x1UL << 0)
+#define SPI_FIFO_SR_MASK_TX_BEYOND_THRES (0x1UL << 1)
+#define SPI_FIFO_SR_MASK_RX_FIFO_FULL (0x1UL << 2)
+#define SPI_FIFO_SR_MASK_TX_FIFO_EMPTY (0x1UL << 3)
+#define SPI_FIFO_SR_MASK_BUSY (0x1UL << 4)
+#define SPI_FIFO_SR_SHIFT_RX_CNT 5
+
+#define SPI_FIFO_SR_MASK_RX_FIFO_CNTR (0x1fUL << SPI_FIFO_SR_SHIFT_RX_CNT)
+#define SPI_FIFO_SR_SHIFT_TX_CNT 10
+#define SPI_FIFO_SR_MASK_TX_FIFO_CNTR (0x1fUL << SPI_FIFO_SR_SHIFT_TX_CNT)
+
+/*
+ * SPI Interrupt Enable Register - SPI_INTR_EN
+ */
+#define SPI_INTR_EN_MASK_RX_OVERRUN_IE (0x1UL << 0)
+#define SPI_INTR_EN_MASK_TX_UNDERRUN_IE (0x1UL << 1)
+#define SPI_INTR_EN_MASK_RX_FULL_IE (0x1UL << 2)
+#define SPI_INTR_EN_MASK_TX_EMPTY_IE (0x1UL << 3)
+#define SPI_INTR_EN_MASK_RX_THRES_IE (0x1UL << 4)
+#define SPI_INTR_EN_MASK_TX_THRES_IE (0x1UL << 5)
+//yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme.
+#define SPI_INTR_EN_MASK_MST_EOT_IE (0x1UL << 6)
+
+/*
+ * SPI Interrupt Status Register OR Interrupt Clear Register - SPI_INTR_SR_SCLR
+ */
+
+#define SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR (0x1UL << 0)
+#define SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR (0x1UL << 1)
+#define SPI_INTR_SR_SCLR_MASK_RX_FULL_INTR (0x1UL << 2)
+#define SPI_INTR_SR_SCLR_MASK_TX_EMPTY_INTR (0x1UL << 3)
+#define SPI_INTR_SR_SCLR_MASK_RX_THRES_INTR (0x1UL << 4)
+#define SPI_INTR_SR_SCLR_MASK_TX_THRES_INTR (0x1UL << 5)
+
+/*
+ * SPI TIMING Register
+ */
+#define SPI_TIMING_MASK_T_CS_DESEL 0xFUL
+
+/* SPI WCLK Freqency */
+#define SPI_SPICLK_FREQ_26M (26*1000*1000)
+#define SPI_SPICLK_FREQ_104M (104*1000*1000)
+#define SPI_SPICLK_FREQ_156M (156*1000*1000)
+
+#define CLEAR_ALL_INTERRUPTS 0x3FUL
+#define ENABLE_ALL_INTERRUPTS 0x3FUL
+#define ENABLE_INTERRUPTS 0x03UL
+#define DISABLE_ALL_INTERRUPTS 0x0UL
+/*
+ * Message State
+ * we use the spi_message.state (void *) pointer to
+ * hold a single state value, that's why all this
+ * (void *) casting is done here.
+ */
+
+enum zx29_spi_state {
+ STATE_START,
+ STATE_RUNNING,
+ STATE_DONE,
+ STATE_ERROR
+};
+
+/*
+ * SPI State - Whether Enabled or Disabled
+ */
+#define SPI_DISABLED (0)
+#define SPI_ENABLED (1)
+
+/*
+ * SPI DMA State - Whether DMA Enabled or Disabled
+ */
+#define SPI_DMA_DISABLED (0)
+#define SPI_DMA_ENABLED (1)
+
+/*
+ * SPI SOD State - Whether SOD Enabled or Disabled
+ */
+#define SPI_SOD_DISABLED (1)
+#define SPI_SOD_ENABLED (0)
+#define SPI_SLAVE_MODE (1)
+#define SPI_MASTER_MODE (0)
+
+
+/*
+ * SPI TRANSFER DELAY CFG
+ * DELAY TIME ≈ (1 / Buad_Rate) * 8 + (SPI_TIMING_T_CS_DESEL + 1) / Buad_Rate;
+ */
+
+
+enum spi_fifo_threshold_level {
+ SPI_FIFO_THRES_1,
+ SPI_FIFO_THRES_2,
+ SPI_FIFO_THRES_3,
+ SPI_FIFO_THRES_4,
+ SPI_FIFO_THRES_5,
+ SPI_FIFO_THRES_6,
+ SPI_FIFO_THRES_7,
+ SPI_FIFO_THRES_8,
+ SPI_FIFO_THRES_9,
+ SPI_FIFO_THRES_10,
+ SPI_FIFO_THRES_11,
+ SPI_FIFO_THRES_12,
+ SPI_FIFO_THRES_13,
+ SPI_FIFO_THRES_14,
+ SPI_FIFO_THRES_15,
+ SPI_FIFO_THRES_16
+
+};
+
+/*
+ * SPI Clock Parameter ranges
+ */
+#define DIV_MIN 0x00
+#define DIV_MAX 0x0F
+
+#define SPI_POLLING_TIMEOUT 1000
+
+/*
+ * The type of reading going on on this chip
+ */
+enum spi_reading {
+ READING_NULL,
+ READING_U8,
+ READING_U16,
+ READING_U32
+};
+
+/**
+ * The type of writing going on on this chip
+ */
+enum spi_writing {
+ WRITING_NULL,
+ WRITING_U8,
+ WRITING_U16,
+ WRITING_U32
+};
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL022 derivates
+ * @fifodepth: depth of FIFOs (both)
+ * @max_bpw: maximum number of bits per word
+ * @unidir: supports unidirection transfers
+ * @extended_cr: 32 bit wide control register 0 with extra
+ * features and extra features in CR1 as found in the ST variants
+ * @pl023: supports a subset of the ST extensions called "PL023"
+ */
+struct vendor_data {
+ int fifodepth;
+ int max_bpw;
+ bool loopback;
+};
+
+struct zx29_ssp_pins
+{
+ struct device *dev;
+
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pfunc;
+ struct pinctrl_state *pgpio;
+ struct pinctrl_state *pcs_gpio_active;
+ struct pinctrl_state *pcs_func;
+ struct pinctrl_state *pcs_gpio_sleep;
+ int gpio_cs;
+ int gpio_clk;
+ int gpio_tx;
+ int gpio_rx;
+};
+struct zx29_ssp_pins ssp_pins[4];
+
+/**
+ * struct spi-zx29 - This is the private SSP driver data structure
+ * @adev: AMBA device model hookup
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
+ * @master: SPI framework hookup
+ * @master_info: controller-specific data from machine setup
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @pump_transfers: Tasklet used in Interrupt Transfer mode
+ * @cur_msg: Pointer to current spi_message being processed
+ * @cur_transfer: Pointer to current spi_transfer
+ * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
+ * @tx: current position in TX buffer to be read
+ * @tx_end: end position in TX buffer to be read
+ * @rx: current position in RX buffer to be written
+ * @rx_end: end position in RX buffer to be written
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
+ */
+struct zx29_spi {
+ char name[16];
+ struct platform_device *pdev;
+ struct vendor_data *vendor;
+ resource_size_t phybase;
+ void __iomem *virtbase;
+ unsigned int irq;
+ struct clk *pclk;/* spi controller work clock */
+ struct clk *spi_clk;/* spi clk line clock */
+ u32 clkfreq;
+ struct spi_master *master;
+ struct zx29_spi_controller *master_info;
+ /* Message per-transfer pump */
+ struct tasklet_struct pump_transfers;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ bool next_msg_cs_active;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ enum spi_reading read;
+ enum spi_writing write;
+ u32 exp_fifo_level;
+ enum spi_rx_level_trig rx_lev_trig;
+ enum spi_tx_level_trig tx_lev_trig;
+ /* DMA settings */
+#ifdef CONFIG_SPI_DMA_ENGINE
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct sg_table sgt_rx;
+ struct sg_table sgt_tx;
+ char *dummypage;
+ unsigned int dma_running;
+// struct mutex spi_lock;
+#endif
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry * spi_root;
+ struct debugfs_regset32 spi_regset;
+ u32 spi_poll_cnt;
+ u32 spi_dma_cnt;
+#endif
+#if SPI_PSM_CONTROL
+ struct wake_lock psm_lock;
+#endif
+ struct semaphore sema_dma;
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+ wait_queue_head_t wait;
+ int trans_done;
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+
+ u8 iface_mode;
+#define SPI_MOTO_FORMAT 0x00
+#define SPI_TI_FORMAT 0x01
+#define SPI_ISI_FORMAT 0x02
+ enum zx29_ssp_device_mode mode;
+ int (*zx29_flush_rxfifo) (struct zx29_spi *zx29spi,void *buf);
+
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SSP for each client chip
+ * @cr0: Value of control register CR0 of SSP - on later ST variants this
+ * register is 32 bits wide rather than just 16
+ * @cr1: Value of control register CR1 of SSP
+ * @dmacr: Value of DMA control Register of SSP
+ * @cpsr: Value of Clock prescale register
+ * @cs: Value of cs register
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @enable_dma: Whether to enable DMA or not
+ * @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt/DMA
+ *
+ * Runtime state of the SSP controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ u32 ver_reg;
+ u32 com_ctrl;
+ u32 fmt_ctrl;
+ u32 fifo_ctrl;
+ u32 timing;
+// u32 intr_en;
+ u8 n_bytes;
+ u8 clk_div;/* spi clk divider */
+ bool enable_dma;
+ bool enable_trans_gap;
+ enum spi_reading read;
+ enum spi_writing write;
+ void (*cs_control) (int dev_id,u32 command);
+ int xfer_type;
+};
+//struct semaphore g_SpiTransferSemaphore;
+
+struct zx29_spi *g_zx29_spi[4];
+
+#if SPI_PSM_CONTROL
+static volatile unsigned int spi_active_count = 0;
+
+static void zx29_spi_set_active(struct wake_lock *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if(spi_active_count == 0)
+ {
+ zx_cpuidle_set_busy(IDLE_FLAG_SPI);
+ }
+ spi_active_count++;
+
+ local_irq_restore(flags);
+
+ wake_lock(lock);
+}
+
+static void zx29_spi_set_idle(struct wake_lock *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+#if 0 //qhf
+ spi_active_count--;
+ if(spi_active_count == 0)
+ {
+ zx_cpuidle_set_free(IDLE_FLAG_SPI);
+ }
+#endif
+ local_irq_restore(flags);
+
+ wake_unlock(lock);
+}
+#endif
+
+static int zx29_do_interrupt_dma_transfer(struct zx29_spi *zx29spi);
+/**
+ * default_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+
+static void default_cs_control(int dev_id,u32 command)
+{
+ gpio_set_value(ssp_pins[dev_id].gpio_cs, !command);
+}
+
+/**
+ * flush - flush the FIFO to reach a clean state
+ * SSP driver private data structure
+ */
+static int flush(struct zx29_spi *zx29spi)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+ uint32_t rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+
+ dev_dbg(&zx29spi->pdev->dev, "flush\n");
+ /* Flushing FIFO by software cannot clear RX DMA Request. */
+ do {
+ if(0 == strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+ while (readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & rx_fifo_cnt_msk)
+ readl((SPI_DR_OFFSET+zx29spi->virtbase));
+ }else {
+ while ((readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase))>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7F)
+ readl((SPI_DR_OFFSET+zx29spi->virtbase));
+ }
+ } while ((readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) && limit--);
+
+ zx29spi->exp_fifo_level = 0;
+
+ return limit;
+}
+
+/**
+ * restore_state - Load configuration of current chip
+ * SSP driver private data structure
+ */
+static void restore_state(struct zx29_spi *zx29spi)
+{
+ struct chip_data *chip = zx29spi->cur_chip;
+
+ /* disable all interrupts */
+ writel(ENABLE_INTERRUPTS, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));
+ writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+
+ writel(chip->fmt_ctrl, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+ writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+// writel(chip->intr_en, SPI_INTR_EN(zx297520v2spi->virtbase));
+ if(zx29spi->mode == ZX29_SSP_SLAVE_TYPE)
+ chip->com_ctrl |= GEN_MASK_BITS(SPI_SLAVE, SPI_COM_CTRL_MASK_MS, 2)|GEN_MASK_BITS(SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
+ writel(chip->com_ctrl, (SPI_COM_CTRL_OFFSET + zx29spi->virtbase));
+ writel(chip->timing, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+}
+
+/*
+ * Default spi Register Values
+ */
+#define DEFAULT_SPI_COM_CTRL ( \
+ GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
+ GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
+ GEN_MASK_BITS(SPI_MASTER, SPI_COM_CTRL_MASK_MS, 2) \
+)
+
+/*
+ * Default spi Register Values
+ */
+#define DEFAULT_SPI_SLAVE_COM_CTRL ( \
+ GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
+ GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
+ GEN_MASK_BITS(SPI_SLAVE, SPI_COM_CTRL_MASK_MS, 2) \
+)
+
+
+#define DEFAULT_SPI_FMT_CTRL ( \
+ GEN_MASK_BITS(SPI_INTERFACE_MOTOROLA_SPI, SPI_FMT_CTRL_MASK_FRF, 0) | \
+ GEN_MASK_BITS(SPI_CLK_POL_IDLE_LOW, SPI_FMT_CTRL_MASK_POL, 2) | \
+ GEN_MASK_BITS(SPI_CLK_FIRST_EDGE, SPI_FMT_CTRL_MASK_PHA, 3) | \
+ GEN_MASK_BITS(SPI_DATA_BITS_8, SPI_FMT_CTRL_MASK_DSS, 4) \
+)
+
+#define DEFAULT_SPI_FIFO_CTRL ( \
+ GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2) | \
+ GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3) | \
+ GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4) | \
+ GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8) \
+)
+
+
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * SSP driver private data structure
+ */
+static void load_spi_default_config(struct zx29_spi *zx29spi)
+{
+ writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+ writel(ENABLE_INTERRUPTS, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));
+
+ writel(DEFAULT_SPI_FMT_CTRL, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+ writel(DEFAULT_SPI_FIFO_CTRL, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+
+ if(zx29spi->mode == ZX29_SSP_MASTER_TYPE) {
+ writel(DEFAULT_SPI_COM_CTRL, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+ }
+ else {
+ writel(DEFAULT_SPI_SLAVE_COM_CTRL, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+ }
+}
+
+
+static unsigned reader(struct zx29_spi *zx29spi)
+{
+ uint32_t fifo_sr = 0,rd_max = 0;
+ unsigned len = 0;
+ uint32_t rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+ if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+ rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+ }else {
+ rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+ }
+ //read rx fifo to empty first
+ while ((zx29spi->rx < zx29spi->rx_end) && rd_max--) {
+ switch (zx29spi->read) {
+ case READING_NULL:
+ readw((SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (zx29spi->rx) =
+ readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (zx29spi->rx) =
+ (u16) readw((SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (zx29spi->rx) =
+ readl((SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ }
+ len += zx29spi->cur_chip->n_bytes;
+ zx29spi->rx += (zx29spi->cur_chip->n_bytes);
+ zx29spi->exp_fifo_level--;
+ }
+ return len;
+
+}
+
+static unsigned writer(struct zx29_spi *zx29spi)
+{
+ uint32_t fifo_sr;
+ uint32_t wr_max;
+ uint32_t tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
+ uint32_t tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
+ unsigned len = 0;
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+ if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+ wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+ }else {
+ wr_max = (fifo_sr>>12)&0x1f;
+ }
+
+ if ((fifo_sr & SPI_FIFO_SR_MASK_BUSY) && wr_max) {
+ wr_max--;
+ }
+
+ while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
+ switch (zx29spi->write) {
+ case WRITING_NULL:
+ writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (zx29spi->tx)), (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ }
+ len += zx29spi->cur_chip->n_bytes;
+ zx29spi->tx += (zx29spi->cur_chip->n_bytes);
+ zx29spi->exp_fifo_level++;
+ }
+ return len;
+}
+
+
+/**
+ * This will write to TX and read from RX according to the parameters.
+ */
+
+static void readwriter(struct zx29_spi *zx29spi)
+{
+ uint32_t fifo_sr;
+ uint32_t rd_max, wr_max;
+ uint32_t rx_fifo_cnt_msk;
+ uint32_t tx_fifo_cnt_msk;
+ uint32_t tx_fifo_cnt_pos;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+
+ rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+ tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
+ tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
+
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+ if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+ rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+ wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+ }else {
+ rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+ wr_max = (fifo_sr>>12)&0x1f;
+ }
+
+ if ((fifo_sr & SPI_FIFO_SR_MASK_BUSY) && wr_max) {
+ wr_max--;
+ }
+ //read rx fifo to empty first
+ while ((zx29spi->rx < zx29spi->rx_end) && rd_max--) {
+ switch (zx29spi->read) {
+ case READING_NULL:
+ readw((SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (zx29spi->rx) =
+ readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (zx29spi->rx) =
+ (u16) readw((SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (zx29spi->rx) =
+ readl((SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ }
+ zx29spi->rx += (zx29spi->cur_chip->n_bytes);
+ zx29spi->exp_fifo_level--;
+ }
+
+ //write
+ while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
+ switch (zx29spi->write) {
+ case WRITING_NULL:
+ writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (zx29spi->tx)), (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+ break;
+ }
+ zx29spi->tx += (zx29spi->cur_chip->n_bytes);
+ zx29spi->exp_fifo_level++;
+
+ if(zx29spi->cur_chip->enable_trans_gap) {
+
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ cpu_relax();
+ }
+ while (fifo_sr && diff_ns < 10000000); //10ms
+ if(diff_ns >= 10000000) {
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ if(fifo_sr)
+ dev_info(&zx29spi->pdev->dev, "bus busy time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+ }
+ }
+ }
+
+ if(!zx29spi->cur_chip->enable_trans_gap) {
+
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ cpu_relax();
+ }while (fifo_sr && diff_ns < 100000000); //100ms
+ if(diff_ns >= 100000000) {
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ if(fifo_sr)
+ dev_info(&zx29spi->pdev->dev, "bus busy.. time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+ }
+ }
+ /*
+ * When we exit here the TX FIFO should be full and the RX FIFO
+ * should be empty
+ */
+}
+
+/*
+ * This DMA functionality is only compiled in if we have
+ * access to the generic DMA devices/DMA engine.
+ */
+#ifdef CONFIG_SPI_DMA_ENGINE
+
+static void zx29_fill_txfifo(struct zx29_spi *zx29spi)
+{
+ uint32_t fifo_sr;
+ int32_t rd_max, wr_max;
+ uint32_t rx_fifo_cnt_msk;
+ uint32_t tx_fifo_cnt_msk;
+ uint32_t tx_fifo_cnt_pos;
+ unsigned cur_transfer_len;
+ rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+ tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
+ tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
+
+ if(!zx29spi) {
+ printk("zx29spi err! \r\n");
+ return;
+ }
+ cur_transfer_len = zx29spi->cur_transfer->len;
+ while (zx29spi->tx < zx29spi->tx_end && cur_transfer_len) {
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+#if 0
+ rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+ wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+#else
+ if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+ rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+ wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+ }else {
+ rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+ wr_max = (fifo_sr>>12)&0x1f;
+ }
+#endif
+ if (fifo_sr & SPI_FIFO_SR_MASK_BUSY) {
+ wr_max--;
+ }
+ wr_max -= rd_max;
+ wr_max = (wr_max > 0) ? wr_max : 0;
+
+ //write
+ while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
+ writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
+ zx29spi->tx += (zx29spi->cur_chip->n_bytes);
+ cur_transfer_len -= zx29spi->cur_chip->n_bytes;
+ }
+
+ cpu_relax();
+ }
+}
+
+static void dma_callback(void *data)
+{
+ struct zx29_spi *zx29spi = (struct zx29_spi *)data;
+ //printk(KERN_INFO "spi:dma transfer complete. %X-%X-%x\n", zx29spi->dma_running, readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+ //up(&zx29spi->sema_dma);
+ if(zx29spi->master->slave == true){
+ wake_up(&zx29spi->wait);
+ zx29spi->trans_done = true;
+ }else{
+ up(&zx29spi->sema_dma);
+ }
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+}
+
+/*
+static void dma_callback_tx(void *data)
+{
+ struct zx29_spi *zx29spi = (struct zx29_spi *)data;
+ // printk(KERN_INFO "spi:dma transfer complete tx\n");
+ printk("%s",__func__);
+ printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+
+ //up(&g_SpiTransferSemaphore);
+}
+*/
+
+/**
+ * configure_dma - configures the channels for the next transfer
+ * SSP driver's private data structure
+ */
+
+static int configure_dma(struct zx29_spi *zx29spi)
+{
+// unsigned int pages;
+// int ret;
+// int rx_sglen, tx_sglen;
+ dma_channel_def rx_conf;
+ dma_channel_def tx_conf;
+ struct dma_chan *rxchan = zx29spi->dma_rx_channel;
+ struct dma_chan *txchan = zx29spi->dma_tx_channel;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ struct spi_transfer *transfer = zx29spi->cur_transfer;
+
+ rx_conf.src_addr = (SPI_DR_OFFSET+zx29spi->phybase);
+ rx_conf.dest_addr = (unsigned int)zx29spi->rx;
+ rx_conf.dma_control.tran_mode = TRAN_PERI_TO_MEM;
+ rx_conf.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ rx_conf.link_addr = 0;
+
+ tx_conf.src_addr = (unsigned int)zx29spi->tx;
+ tx_conf.dest_addr = (SPI_DR_OFFSET+zx29spi->phybase);
+ tx_conf.dma_control.tran_mode = TRAN_MEM_TO_PERI;
+ tx_conf.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ tx_conf.link_addr = 0;
+
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+
+ switch (zx29spi->rx_lev_trig) {
+ case SPI_RX_1_OR_MORE_ELEM:
+ rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
+ rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
+ break;
+ case SPI_RX_4_OR_MORE_ELEM:
+ rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
+ rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ break;
+ case SPI_RX_8_OR_MORE_ELEM:
+ rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
+ rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
+ break;
+ case SPI_RX_16_OR_MORE_ELEM:
+ rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
+ rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
+ break;
+ case SPI_RX_32_OR_MORE_ELEM:
+ rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
+ rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
+ break;
+ default:
+ rx_conf.dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
+ rx_conf.dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (zx29spi->tx_lev_trig) {
+ case SPI_TX_1_OR_MORE_EMPTY_LOC:
+ tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
+ tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
+ break;
+ case SPI_TX_4_OR_MORE_EMPTY_LOC:
+ tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
+ tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ break;
+ case SPI_TX_8_OR_MORE_EMPTY_LOC:
+ tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
+ tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
+ break;
+ case SPI_TX_16_OR_MORE_EMPTY_LOC:
+ tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
+ tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
+ break;
+ case SPI_TX_32_OR_MORE_EMPTY_LOC:
+ tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
+ tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
+ break;
+ default:
+ tx_conf.dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
+ tx_conf.dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (zx29spi->read) {
+ case READING_NULL:
+ /* Use the same as for writing */
+ rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ rx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ case READING_U8:
+ rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ rx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ case READING_U16:
+ rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
+ rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
+ rx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ case READING_U32:
+ rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
+ rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
+ rx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ }
+
+ switch (zx29spi->write) {
+ case WRITING_NULL:
+ /* Use the same as for reading */
+ tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ tx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ case WRITING_U8:
+ tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ tx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ case WRITING_U16:
+ tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
+ tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
+ tx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ case WRITING_U32:
+ tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
+ tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
+ tx_conf.count = zx29spi->cur_transfer->len;
+ break;
+ }
+
+ dmaengine_slave_config(rxchan,(struct dma_slave_config*)&rx_conf);
+ dmaengine_slave_config(txchan,(struct dma_slave_config*)&tx_conf);
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ if (zx29spi->rx) {
+ //printk("%s,tx=%p,rx=%p,len=%d\n",__func__,zx29spi->tx,zx29spi->rx,zx29spi->cur_transfer->len);
+ //printk("tx_conf:sb_len=%d,db_len=%d, sb_size=%d,db_size=%d\n",tx_conf.dma_control.src_burst_len, tx_conf.dma_control.dest_burst_len, tx_conf.dma_control.src_burst_size, tx_conf.dma_control.dest_burst_size);
+ //printk("rx_conf:sb_len=%d,db_len=%d, sb_size=%d,db_size=%d\n",rx_conf.dma_control.src_burst_len, rx_conf.dma_control.dest_burst_len, rx_conf.dma_control.src_burst_size, rx_conf.dma_control.dest_burst_size);
+
+ rxdesc= rxchan->device->device_prep_interleaved_dma(rxchan,NULL,0);
+ txdesc= txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = zx29spi;
+ // txdesc->callback = dma_callback_tx;
+ // txdesc->callback_param = zx29spi;
+
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(rxchan);
+ if (transfer->tx_dma) {
+ /* SPI RX buffer may overflow in DMA busy situation. */
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(txchan);
+ zx29spi->dma_running = TX_TRANSFER | RX_TRANSFER;
+ enable_irq(zx29spi->irq); /* detect overflow through interrupt */
+ } else {
+ if(zx29spi->mode == ZX29_SSP_MASTER_TYPE)
+ zx29_fill_txfifo(zx29spi);
+ zx29spi->dma_running = RX_TRANSFER;
+ }
+ }
+ else if (zx29spi->tx){
+ txdesc = txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
+ txdesc->callback = dma_callback;
+ txdesc->callback_param = zx29spi;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(txchan);
+ zx29spi->dma_running = TX_TRANSFER;
+ }
+
+ return 0;
+}
+
+#if 0
+static bool zx29_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ dma_peripheral_id peri_id = (dma_peripheral_id) param;
+#if 0
+ if ((chan->chan_id == (unsigned int)peri_id) && \
+ (strcmp(dev_name(chan->device->dev), "a1200000.dma") == 0))
+ return true;
+
+ chan->private = param;
+
+ return false;
+#endif
+ if (chan->chan_id == (unsigned int)peri_id)
+ return true;
+
+ chan->private = param;
+
+ return false;
+
+}
+#endif
+extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
+static int zx29_dma_probe(struct zx29_spi *zx29spi)
+{
+ dma_cap_mask_t mask;
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ /*
+ * We need both RX and TX channels to do DMA, else do none
+ * of them.
+ */
+ zx29spi->dma_rx_channel = dma_request_channel(mask,
+ zx29_dma_filter_fn,
+ zx29spi->master_info->dma_rx_param);
+ if (!zx29spi->dma_rx_channel) {
+ dev_dbg(&zx29spi->pdev->dev, "no RX DMA channel!\n");
+ dev_err(&zx29spi->pdev->dev, "no RX DMA channel!,dma_rx_param=:%d\n",zx29spi->master_info->dma_rx_param);
+ goto err_no_rxchan;
+ }
+
+ zx29spi->dma_tx_channel = dma_request_channel(mask,
+ zx29_dma_filter_fn,
+ zx29spi->master_info->dma_tx_param);
+ if (!zx29spi->dma_tx_channel) {
+ dev_dbg(&zx29spi->pdev->dev, "no TX DMA channel!\n");
+ dev_err(&zx29spi->pdev->dev, "no TX DMA channel!\n");
+ goto err_no_txchan;
+ }
+
+ zx29spi->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!zx29spi->dummypage) {
+ dev_dbg(&zx29spi->pdev->dev, "no DMA dummypage!\n");
+ dev_err(&zx29spi->pdev->dev, "no DMA dummypage!\n");
+ goto err_no_dummypage;
+ }
+
+ dev_info(&zx29spi->pdev->dev, "setup for DMA on RX %s, TX %s\n",
+ dma_chan_name(zx29spi->dma_rx_channel),
+ dma_chan_name(zx29spi->dma_tx_channel));
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(zx29spi->dma_tx_channel);
+err_no_txchan:
+ dma_release_channel(zx29spi->dma_rx_channel);
+ zx29spi->dma_rx_channel = NULL;
+err_no_rxchan:
+ dev_err(&zx29spi->pdev->dev,
+ "Failed to work in dma mode, work without dma!\n");
+ dev_dbg(&zx29spi->pdev->dev,
+ "Failed to work in dma mode, work without dma!\n");
+ return -ENODEV;
+}
+
+static void terminate_dma(struct zx29_spi *zx29spi)
+{
+ struct dma_chan *rxchan = zx29spi->dma_rx_channel;
+ struct dma_chan *txchan = zx29spi->dma_tx_channel;
+
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
+// unmap_free_dma_scatter(zx29spi);
+ zx29spi->dma_running = 0;
+}
+
+static void zx29_dma_remove(struct zx29_spi *zx29spi)
+{
+ if (zx29spi->dma_running)
+ terminate_dma(zx29spi);
+ if (zx29spi->dma_tx_channel)
+ dma_release_channel(zx29spi->dma_tx_channel);
+ if (zx29spi->dma_rx_channel)
+ dma_release_channel(zx29spi->dma_rx_channel);
+ kfree(zx29spi->dummypage);
+}
+
+#endif
+
+static irqreturn_t zx29_spi_irq(int irqno, void *dev_id)
+{
+ struct zx29_spi *zx29spi = dev_id;
+
+ disable_irq_nosync(zx29spi->irq);
+ up(&zx29spi->sema_dma);
+
+ //pr_info("spi_irq %X-%X\n", zx29spi->dma_running, readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)));
+ return IRQ_HANDLED;
+}
+
+static void print_info_data(void * data, int len) {
+ int i = 0;
+ unsigned char *p = data;
+
+ if(p) {
+
+ for(i = 0;i <= (len-8);i+=8) {
+ printk("%02x %02x %02x %02x %02x %02x %02x %02x \r\n",p[i],p[i+1],p[i+2],p[i+3],p[i+4],p[i+5],p[i+6],p[i+7]);
+ }
+ printk("\n");
+ }
+
+}
+
+static int zx29_flush_rxfifo(struct zx29_spi *zx29spi,void *buf)
+{
+ int ret = 0;
+ struct spi_transfer transfer;
+ unsigned char data[64] = {0};
+ uint32_t fifo_sr = 0;
+ uint32_t rd_max = 0;
+ uint32_t rx_fifo_cnt_msk= SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+
+ transfer.tx_buf = 0;
+ transfer.rx_buf = data;
+ transfer.len = 0;
+
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+ if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+ rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+ }else {
+ rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+ }
+ while(rd_max--) {
+ *(u8 *) transfer.rx_buf =
+ readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
+ transfer.rx_buf++;
+ transfer.len++;
+ }
+ memcpy(buf,data,transfer.len);
+ //dev_info(&zx29spi->pdev->dev,"spi_fifo_sr = %d transfer.len=%d \n",fifo_sr,transfer.len);
+ //print_info_data(data,transfer.len);
+ return transfer.len;
+
+}
+
+int get_spi_rx_fifo(struct spi_device *spi,unsigned char *buf)
+{
+
+ struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
+ int ret = 0;
+
+ if(!spi || !buf || !zx29spi)
+ return ret;
+ if(!zx29spi->zx29_flush_rxfifo)
+ return ret;
+ return zx29spi->zx29_flush_rxfifo(zx29spi,buf);
+}
+
+
+void set_spi_timing(struct spi_device *spi,unsigned int param)
+{
+ struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
+
+ printk("val set before: 0x%x \n",readl((SPI_TIMING_OFFSET+zx29spi->virtbase)));
+ writel(param, (SPI_TIMING_OFFSET+zx29spi->virtbase));
+ printk("val set after: 0x%x \n",readl((SPI_TIMING_OFFSET+zx29spi->virtbase)));
+}
+
+
+void slave_mode_set(struct spi_device *spi,unsigned int mode)
+{
+ unsigned int regval = 0;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+ struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
+
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+ writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)) & (~(SPI_FMT_CTRL_MASK_POL|SPI_FMT_CTRL_MASK_PHA));
+ printk("val set before: 0x%x \n",regval);
+ switch(mode){
+ case 0:
+ break;
+ case 1:
+ regval |= SPI_FMT_CTRL_MASK_PHA;
+ break;
+ case 2:
+ regval |= SPI_FMT_CTRL_MASK_POL;
+ break;
+ case 3:
+ regval |= (SPI_FMT_CTRL_MASK_POL|SPI_FMT_CTRL_MASK_PHA);
+ break;
+ default:
+ break;
+ }
+ writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+
+ writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+ //while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4 & 0x1;
+ cpu_relax();
+ }
+ while (!regval && diff_ns < 100000000); //100ms
+ if(diff_ns >= 100000000)
+ dev_info(&zx29spi->pdev->dev, "wait sspe timeout, slave_mode_set failed! diff_ns= 0x%x \n",diff_ns);
+ else
+ printk("val set after: 0x%x \n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)));
+ return;
+}
+
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+#define SSP0_PARA_BASE_ADDR 0x1400030
+#define SSP1_PARA_BASE_ADDR 0x1400048
+#define SSP_MASK_SW_WRST (0x1L << 9)
+#define SSP_MASK_SW_PRST (0x1L << 8)
+static int zx29_slave_ctrl_reset(struct zx29_spi *zx29spi)
+{
+ void __iomem *addr = NULL;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+ volatile unsigned int val = 0;
+
+ if(!strcmp(zx29spi->pdev->name,"1410000.ssp")) {
+ addr = ioremap(SSP1_PARA_BASE_ADDR, 0x1000);
+ }else{
+ addr = ioremap(SSP0_PARA_BASE_ADDR, 0x1000);
+ }
+
+ if(addr){
+ val = *(volatile unsigned int *)addr;
+ //dev_info(&zx29spi->pdev->dev, "val = 0x%x 0x%x\n",val,(~(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST)));
+ *(volatile unsigned int *)addr = val & (~(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST));
+
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 1 & 0x1;
+ cpu_relax();
+ }while(val && diff_ns < 100000000); //100ms
+
+ if(diff_ns >= 100000000)
+ dev_info(&zx29spi->pdev->dev, "zx29_slave_assert_ctrl failed!!! \n");
+ else
+ dev_info(&zx29spi->pdev->dev, "zx29_slave_assert_ctrl success! \n");
+
+
+ val = *(volatile unsigned int *)addr;
+ *(volatile unsigned int *)addr = val|(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST);
+ udelay(500);
+
+ iounmap(addr);
+ }
+
+ return 0;
+}
+
+static int zx29_slave_ctrl_reinit(struct zx29_spi *zx29spi)
+{
+ volatile unsigned int regval;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+
+ zx29_slave_ctrl_reset(zx29spi);
+
+ /* Disable SPI */
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+ writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ load_spi_default_config(zx29spi);
+ writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+
+ if(!strcmp(zx29spi->pdev->name,"1410000.ssp")) {
+ regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
+ writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+ dev_info(&zx29spi->pdev->dev," %s set non-camera mode regval:0x%x \n",zx29spi->pdev->name,regval);
+ }
+
+ writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+ k_time_start = ktime_get();
+ do{
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ regval = ((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1);
+ cpu_relax();
+ }while((regval == 0) && diff_ns < 100000000);
+
+ if(diff_ns >= 100000000)
+ dev_info(&zx29spi->pdev->dev, "wait sspen timeout!!! \n");
+ else
+ dev_info(&zx29spi->pdev->dev,"ssp enabled \n",regval);
+
+ return 0;
+}
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+
+static int zx29_slave_do_interrupt_dma_transfer(struct zx29_spi *zx29spi)
+{
+ struct spi_transfer *transfer = zx29spi->cur_transfer;
+ int ret = 0;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+ unsigned int fifo_sr = 0;
+ if((void *)transfer->tx_dma != NULL){
+ zx29spi->tx = (void *)transfer->tx_dma;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ if((void *)transfer->rx_dma != NULL){
+ zx29spi->rx = (void *)transfer->rx_dma;
+ zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+
+ /*if tx is null, use rx buffer as a dummy tx buffer.*/
+ if((void *)transfer->tx_dma == NULL){
+ zx29spi->tx = (void *)transfer->rx_dma;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ }
+
+ zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+ zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+
+ /* If we're using DMA, set up DMA here */
+ if (zx29spi->cur_chip->enable_dma) {
+ /* Configure DMA transfer */
+ zx29spi->trans_done = false; //yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck
+ ret = configure_dma(zx29spi);
+ if (ret) {
+ dev_err(&zx29spi->pdev->dev, "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ }
+
+ if (zx29spi->cur_chip->enable_dma)
+ {
+ extern void spi_dev_send_dma_cfg_down(struct spi_device *spi);
+ struct spi_device *spi = zx29spi->cur_msg->spi;
+ spi_dev_send_dma_cfg_down(spi);
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+ //down(&zx29spi->sema_dma);
+ ret = wait_event_freezable(zx29spi->wait, zx29spi->trans_done);
+ if(ret){
+ terminate_dma(zx29spi);
+ disable_irq_nosync(zx29spi->irq);
+ zx29spi->dma_running = 0;
+ zx29_slave_ctrl_reinit(zx29spi);
+ goto err_config_dma;
+
+ }
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+ //printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+
+
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ cpu_relax();
+ }
+ while (fifo_sr && diff_ns < 100000000); //100ms
+
+ if(diff_ns >= 100000000) {
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ if(fifo_sr)
+ dev_info(&zx29spi->pdev->dev, "bus busy... time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+ }
+ if (zx29spi->dma_running == (TX_TRANSFER | RX_TRANSFER)) {
+ u32 intr_status;
+ intr_status = readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+ if (intr_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) {
+ terminate_dma(zx29spi);
+ dev_err(&zx29spi->cur_msg->spi->dev, "spi rx fifo overflow status = %X!!\n", intr_status);
+ ret = -EIO;
+ } else
+ disable_irq_nosync(zx29spi->irq);
+ }
+ zx29spi->dma_running = 0;
+ }
+
+err_config_dma:
+ if(ret)
+ {
+ dev_err(&zx29spi->pdev->dev, "down_interruptible, ret=%d\n",ret);
+ }
+ return ret;
+}
+
+
+static int zx29_do_interrupt_dma_transfer(struct zx29_spi *zx29spi)
+{
+ u32 irqflags = ENABLE_ALL_INTERRUPTS;
+ struct spi_transfer *transfer = zx29spi->cur_transfer;
+ int ret = 0;
+ static int sc_debug_info_record_cnt[4] ={0};
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+ unsigned int fifo_sr = 0;
+
+ if((void *)transfer->tx_dma != NULL){
+ zx29spi->tx = (void *)transfer->tx_dma;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ if((void *)transfer->rx_dma != NULL){
+ zx29spi->rx = (void *)transfer->rx_dma;
+ zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+
+ /*if tx is null, use rx buffer as a dummy tx buffer.*/
+ if((void *)transfer->tx_dma == NULL){
+ zx29spi->tx = (void *)transfer->rx_dma;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ }
+
+ zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+ zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+ //printk("zx29spi->cur_chip->enable_dma= 0x%x transfer->tx_dma=0x%x transfer->rx_dma=0x%x\n",zx29spi->cur_chip->enable_dma,transfer->tx_dma,transfer->rx_dma);
+ /* If we're using DMA, set up DMA here */
+ if (zx29spi->cur_chip->enable_dma) {
+ /* Configure DMA transfer */
+ ret = configure_dma(zx29spi);
+ if (ret) {
+ dev_err(&zx29spi->pdev->dev, "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ /* Disable interrupts in DMA mode, IRQ from DMA controller */
+ irqflags = DISABLE_ALL_INTERRUPTS;
+ }
+
+ /* config interrupts */
+ /* writel(irqflags, (SPI_INTR_EN_OFFSET+zx29spi->virtbase)); //spi interrupt mode is not supported. */
+
+ /* Enable SSP, turn on interrupts */
+// writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ if (zx29spi->cur_chip->enable_dma)
+ {
+ ret = down_timeout(&zx29spi->sema_dma, msecs_to_jiffies(1500));
+ //printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+ if (ret < 0) {
+ panic("spi transfer timeout,times(%d)\n",sc_debug_info_record_cnt[zx29spi->pdev->id]);
+ if(sc_debug_info_record_cnt[zx29spi->pdev->id] < 5) {
+ sc_debug_info_record(MODULE_ID_CAP_SPI, "%s transfer timeout:0x%x 0x%x 0x%x \n",zx29spi->pdev->name,readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),
+ readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)),readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)));
+ }
+ sc_debug_info_record_cnt[zx29spi->pdev->id]++;
+ }
+
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ cpu_relax();
+ }
+ while (fifo_sr && diff_ns < 100000000); //100ms
+
+ if(diff_ns >= 100000000) {
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ if(fifo_sr)
+ dev_info(&zx29spi->pdev->dev, "bus busy.... time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+ }
+
+ if (zx29spi->dma_running == (TX_TRANSFER | RX_TRANSFER)) {
+ u32 intr_status;
+ intr_status = readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+ if (intr_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) {
+ terminate_dma(zx29spi);
+ dev_err(&zx29spi->cur_msg->spi->dev, "spi rx fifo overflow status = %X!!\n", intr_status);
+ ret = -EIO;
+ } else
+ disable_irq_nosync(zx29spi->irq);
+ }
+ zx29spi->dma_running = 0;
+ }
+
+err_config_dma:
+ if(ret)
+ {
+ dev_err(&zx29spi->pdev->dev, "down_interruptible, ret=%d\n",ret);
+ }
+// writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+ return ret;
+}
+
+
+static int zx29_do_polling_transfer(struct zx29_spi *zx29spi)
+{
+ struct spi_transfer *transfer = zx29spi->cur_transfer;
+ int ret = 0;
+ unsigned int fifo_sr = 0;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+
+ dev_dbg(&zx29spi->pdev->dev, "polling transfer ongoing ...\n");
+
+ if (!zx29spi->tx && !zx29spi->rx) {
+ return ret;
+ }
+
+ k_time_start = ktime_get();
+ /*read and write*/
+ while ((zx29spi->tx < zx29spi->tx_end) || (zx29spi->rx < zx29spi->rx_end)) {
+ readwriter(zx29spi);
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ if(diff_ns >= 1000000000) /*1s*/{
+ dev_info(&zx29spi->pdev->dev, "do_polling time out,diff_ns=%lld len=0x%x tx=0x%x tx_end=0x%x rx=0x%x rx_end=0x%x \n",
+ diff_ns,zx29spi->cur_transfer->len,zx29spi->tx,zx29spi->tx_end,zx29spi->rx,zx29spi->rx_end);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ cpu_relax();
+
+ }while (fifo_sr && diff_ns < 100000000); //100ms
+
+ if(diff_ns >= 100000000) {
+ fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+ if(fifo_sr) {
+ dev_info(&zx29spi->pdev->dev, "bus busy.. time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+ ret = -EIO;
+ }
+ }
+ return ret;
+}
+
+static int zx29_spi_map_mssg(struct zx29_spi *zx29spi,
+ struct spi_message *msg)
+{
+ struct device *dev;
+ struct spi_transfer *transfer;
+ int ret = 0;
+ static int sc_debug_info_record_tx_cnt[4] ={0};
+ static int sc_debug_info_record_rx_cnt[4] ={0};
+
+ if(!zx29spi || !msg)
+ return -EFAULT;
+
+ if (msg->is_dma_mapped || !msg->spi->dma_used || !zx29spi->master_info->enable_dma) {
+ return 0;
+ }
+ dev = &zx29spi->pdev->dev;
+ /* Map until end or first fail */
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+ if (/*transfer->len <= zx29spi->vendor->fifodepth ||*/ transfer->tx_dma || transfer->rx_dma )
+ continue;
+
+ if (transfer->tx_buf != NULL) {
+ transfer->tx_dma = dma_map_single(dev,(void *)transfer->tx_buf, transfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, transfer->tx_dma)) {
+ dev_err(dev, "dma_map_single spi Tx failed,times(%d)\n",sc_debug_info_record_tx_cnt[zx29spi->pdev->id]);
+ if(sc_debug_info_record_tx_cnt[zx29spi->pdev->id] < 5)
+ sc_debug_info_record(MODULE_ID_CAP_SPI, "%s tx_dma_map failed \n",zx29spi->pdev->name);
+ transfer->tx_dma = 0;
+ ret |= -ENOMEM;
+ sc_debug_info_record_tx_cnt[zx29spi->pdev->id]++;
+ }
+ }
+
+ if (transfer->rx_buf != NULL) {
+ transfer->rx_dma = dma_map_single(dev, transfer->rx_buf, transfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, transfer->rx_dma)) {
+ dev_err(dev, "dma_map_single spi Rx failed,times(%d)\n",sc_debug_info_record_rx_cnt[zx29spi->pdev->id]);
+ if(sc_debug_info_record_rx_cnt[zx29spi->pdev->id] < 5)
+ sc_debug_info_record(MODULE_ID_CAP_SPI, "%s rx_dma_map failed \n",zx29spi->pdev->name);
+ transfer->rx_dma = 0;
+ ret |= -ENOMEM;
+ sc_debug_info_record_rx_cnt[zx29spi->pdev->id]++;
+ }
+
+ if (!transfer->rx_dma && transfer->tx_dma && transfer->tx_buf) {
+ dma_unmap_single(dev, transfer->tx_dma, transfer->len, DMA_TO_DEVICE);
+ transfer->tx_dma = 0;
+ }
+ }
+ }
+
+
+ return ret;
+}
+
+static void zx29_spi_unmap_mssg(struct zx29_spi *zx29spi,
+ struct spi_message *msg)
+{
+ struct device *dev = &zx29spi->pdev->dev;
+ struct spi_transfer *transfer;
+
+ if (msg->is_dma_mapped || !msg->spi->dma_used || !zx29spi->master_info->enable_dma)
+ return;
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+ if ( (!transfer->tx_buf && transfer->tx_dma) || (! transfer->rx_buf && transfer->rx_dma) )
+ continue;
+
+ if (transfer->rx_buf != NULL && transfer->rx_dma)
+ dma_unmap_single(dev, transfer->rx_dma, transfer->len, DMA_FROM_DEVICE);
+
+ if (transfer->tx_buf != NULL && transfer->tx_dma)
+ dma_unmap_single(dev, transfer->tx_dma, transfer->len, DMA_TO_DEVICE);
+ }
+}
+
+static int zx29_slave_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct zx29_spi *zx29spi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *transfer;
+ unsigned cs_change = 1;
+ const int nsecs = 100;
+ int ret = 0;
+
+ zx29spi->cur_msg = msg;
+
+ /* Setup the SPI using the per chip configuration */
+ zx29spi->cur_chip = spi_get_ctldata(msg->spi);
+ ret = zx29_spi_map_mssg(zx29spi, msg);
+ /* continue with polling mode */
+ if(ret){
+ dev_info(&zx29spi->pdev->dev, "ret = %d\n",ret);
+ goto out;
+ }
+ //restore_state(zx29spi);
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+ zx29spi->cur_transfer = transfer;
+ if((void *)transfer->tx_buf != NULL){
+ zx29spi->tx = (void *)transfer->tx_buf;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ else
+ zx29spi->tx = zx29spi->tx_end = NULL;
+
+ if((void *)transfer->rx_buf != NULL){
+ zx29spi->rx = (void *)transfer->rx_buf;
+ zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+#if 0
+ /*if tx is null, use rx buffer as a dummy tx buffer.*/
+ if((void *)transfer->tx_buf == NULL){
+ zx29spi->tx = (void *)transfer->rx_buf;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+#endif
+ }
+ else
+ zx29spi->rx = zx29spi->rx_end = NULL;
+
+ zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+ zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+
+ if (/*transfer->rx_buf || */transfer->rx_dma)
+ flush(zx29spi);
+
+ writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+
+ if (zx29spi->cur_chip->xfer_type == POLLING_TRANSFER || (!transfer->tx_dma && !transfer->rx_dma)) {
+ if (zx29spi->tx < zx29spi->tx_end)
+ zx29spi->cur_transfer->len = writer(zx29spi);
+ if(zx29spi->rx < zx29spi->rx_end)
+ zx29spi->cur_transfer->len = reader(zx29spi);
+#if defined(CONFIG_DEBUG_FS)
+ zx29spi->spi_poll_cnt ++;
+#endif
+ } else {
+ struct chip_data *chip = zx29spi->cur_chip;
+ if (transfer->rx_buf || transfer->rx_dma) {
+ writel((chip->fifo_ctrl | (SPI_FIFO_CTRL_MASK_RX_DMA_EN | SPI_FIFO_CTRL_MASK_TX_DMA_EN)),
+ (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ } else {
+ writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_TX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ }
+ ret = zx29_slave_do_interrupt_dma_transfer(zx29spi);
+#if defined(CONFIG_DEBUG_FS)
+ zx29spi->spi_dma_cnt ++;
+#endif
+
+ /* clear TX/RX DMA Enable */
+ writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ }
+
+ if (ret) {
+ pr_info("ssp:transfer error,transfer=%p\n", transfer);
+ break;
+ }
+
+ /* Update total byte transferred */
+ msg->actual_length += zx29spi->cur_transfer->len;
+
+ if (transfer->delay_usecs)
+ udelay(transfer->delay_usecs);
+ }
+out:
+ zx29_spi_unmap_mssg(zx29spi, msg);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+
+#if SPI_PSM_CONTROL
+ zx29_spi_set_idle(&zx29spi->psm_lock);
+#endif
+
+ return ret;
+}
+
+static int zx29_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct zx29_spi *zx29spi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *transfer;
+ unsigned cs_change = 1;
+ const int nsecs = 100;
+ int ret = 0;
+ ktime_t k_time_start = 0;
+ ktime_t diff_ns = 0;
+ unsigned int reg_val = 0;
+
+ pm_stay_awake(&zx29spi->pdev->dev);
+ //printk(KERN_INFO "ssp:in function %s \n", __FUNCTION__);
+#if SPI_PSM_CONTROL
+ zx29_spi_set_active(&zx29spi->psm_lock);
+#endif
+ //mutex_lock(&zx29spi->spi_lock);
+ //printk(KERN_INFO "ssp:lock \n");
+ /* Initial message state */
+ zx29spi->cur_msg = msg;
+ /* Setup the SPI using the per chip configuration */
+ zx29spi->cur_chip = spi_get_ctldata(msg->spi);
+
+ if ((clk_get_rate(zx29spi->spi_clk) / 2) != spi->max_speed_hz) {
+ clk_set_rate(zx29spi->spi_clk, spi->max_speed_hz * 2);
+ }
+
+ restore_state(zx29spi);
+
+ ret = zx29_spi_map_mssg(zx29spi, msg);
+ /* continue with polling mode */
+ if(ret){
+ dev_info(&zx29spi->pdev->dev, "ret = %d\n",ret);
+ goto out;
+ }
+
+
+ //while (readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK);
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+ cpu_relax();
+ }
+ while (reg_val && diff_ns < 100000000); //100ms
+
+ if(diff_ns >= 100000000) {
+ reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+ if(reg_val) {
+ dev_info(&zx29spi->pdev->dev, "wait sspe back time_out diff_ns=%lld \n",diff_ns);
+ goto out;
+ }
+ }
+
+ writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+ zx29spi->cur_transfer = transfer;
+ //if (transfer->bits_per_word || transfer->speed_hz)
+ // dev_warn(&msg->spi->dev, "ignore bits & speed setting in transfer.");
+
+ if((void *)transfer->tx_buf != NULL){
+ zx29spi->tx = (void *)transfer->tx_buf;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ else
+ zx29spi->tx = zx29spi->tx_end = NULL;
+
+ if((void *)transfer->rx_buf != NULL){
+ zx29spi->rx = (void *)transfer->rx_buf;
+ zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+
+ /*if tx is null, use rx buffer as a dummy tx buffer.*/
+ if((void *)transfer->tx_buf == NULL){
+ zx29spi->tx = (void *)transfer->rx_buf;
+ zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+ }
+ }
+ else
+ zx29spi->rx = zx29spi->rx_end = NULL;
+
+ zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+ zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+
+ if (transfer->rx_buf || transfer->rx_dma)
+ flush(zx29spi);
+
+ writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+ if (cs_change) {
+ zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_ACTIVE);
+ }
+
+ cs_change = transfer->cs_change;
+
+ if (zx29spi->cur_chip->xfer_type == POLLING_TRANSFER || (!transfer->tx_dma && !transfer->rx_dma)) {
+
+ ret = zx29_do_polling_transfer(zx29spi);
+ #if defined(CONFIG_DEBUG_FS)
+ zx29spi->spi_poll_cnt ++;
+ #endif
+ } else {
+ struct chip_data *chip = zx29spi->cur_chip;
+
+ if (transfer->rx_buf || transfer->rx_dma) {
+ writel((chip->fifo_ctrl | (SPI_FIFO_CTRL_MASK_RX_DMA_EN | SPI_FIFO_CTRL_MASK_TX_DMA_EN)),
+ (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ } else {
+ writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_TX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ }
+
+ ret = zx29_do_interrupt_dma_transfer(zx29spi);
+ #if defined(CONFIG_DEBUG_FS)
+ zx29spi->spi_dma_cnt ++;
+ #endif
+
+ /* clear TX/RX DMA Enable */
+ writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ }
+
+ if (ret) {
+ pr_info("ssp:transfer error,transfer=%p\n", transfer);
+ break;
+ }
+
+ /* Update total byte transferred */
+ msg->actual_length += zx29spi->cur_transfer->len;
+
+ if (transfer->delay_usecs)
+ udelay(transfer->delay_usecs);
+
+ if (cs_change) {
+ zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_INACTIVE);
+ ndelay(nsecs);
+ }
+ }
+ if (ret || !cs_change) {
+ zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_INACTIVE);
+ }
+ //while (~ readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK);
+ k_time_start = ktime_get();
+ do {
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+ cpu_relax();
+ }
+ while (!reg_val && diff_ns < 100000000); //100ms
+
+ if(diff_ns >= 100000000) {
+ reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+ if(!reg_val) {
+ dev_info(&zx29spi->pdev->dev, "wait sspe back time_out diff_ns=%lld \n",diff_ns);
+ goto out;
+ }
+ }
+ writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+out:
+ zx29_spi_unmap_mssg(zx29spi, msg);
+ //mutex_unlock(&zx29spi->spi_lock);
+ //printk(KERN_INFO "ssp:unlock \n");
+
+ msg->status = ret;
+ spi_finalize_current_message(master);
+
+#if SPI_PSM_CONTROL
+ zx29_spi_set_idle(&zx29spi->psm_lock);
+#endif
+ pm_relax(&zx29spi->pdev->dev);
+ return ret;
+}
+
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+#define SPI_SLVAE_RX_BUFF_SIZE 4096
+#define SPI_SLVAE_RX_MAX_PACK_NUM 15
+#define SPI_SLVAE_RX_PACK_LEN 146
+#define SPI_SLVAE_RX_LIST_BUFF_LEN (SPI_SLVAE_RX_MAX_PACK_NUM*SPI_SLVAE_RX_PACK_LEN)
+static dma_channel_def slave_rx_conf[SPI_SLVAE_RX_MAX_PACK_NUM] = {0};
+//yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss.
+#define SPI_MAGIC 0x55555555
+static bool rxbuf_is_free_space(struct spi_device *spi)
+{
+ if (spi->recv_pos < spi->rd_pos) {
+ if ((spi->rd_pos - spi->recv_pos) > SPI_SLVAE_RX_PACK_LEN)
+ return 1;
+ else
+ return 0;
+ }
+ else {
+ if ((SPI_SLVAE_RX_BUFF_SIZE - spi->recv_pos + spi->rd_pos ) > SPI_SLVAE_RX_PACK_LEN)
+ return 1;
+ else
+ return 0;
+ }
+}
+
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss start */
+static void dma_cyclic_callback(void *data)
+{
+ struct spi_device *spi = (struct spi_device *)data;
+ struct zx29_spi *zx29spi = NULL;
+ int index = 0;
+ unsigned int end = 0;
+
+ zx29spi = spi_master_get_devdata(spi->master);
+ zx29spi->spi_poll_cnt++;
+ end = *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + SPI_SLVAE_RX_PACK_LEN - 4);
+ while((end != SPI_MAGIC) && index < SPI_SLVAE_RX_MAX_PACK_NUM) {
+ if(!rxbuf_is_free_space(spi)) {
+ printk("rx_buff not enough space!!!!!");
+ zx29spi->spi_dma_cnt++;
+ break;
+ }else {
+ if((spi->recv_pos + SPI_SLVAE_RX_PACK_LEN) <= SPI_SLVAE_RX_BUFF_SIZE) {
+ memcpy(spi->rx_buf + spi->recv_pos,spi->cyc_buf + spi->cyc_index * SPI_SLVAE_RX_PACK_LEN,SPI_SLVAE_RX_PACK_LEN);
+ }else {
+ memcpy(spi->rx_buf + spi->recv_pos,spi->cyc_buf + spi->cyc_index * SPI_SLVAE_RX_PACK_LEN,SPI_SLVAE_RX_BUFF_SIZE - spi->recv_pos);
+ memcpy(spi->rx_buf,spi->cyc_buf + spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + (SPI_SLVAE_RX_BUFF_SIZE - spi->recv_pos),SPI_SLVAE_RX_PACK_LEN-(SPI_SLVAE_RX_BUFF_SIZE-spi->recv_pos));
+ }
+ *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + SPI_SLVAE_RX_PACK_LEN - 4) = SPI_MAGIC;
+ spi->recv_pos = (spi->recv_pos + SPI_SLVAE_RX_PACK_LEN)%SPI_SLVAE_RX_BUFF_SIZE;
+ spi->cyc_index = (spi->cyc_index + 1)%SPI_SLVAE_RX_MAX_PACK_NUM;
+
+ zx29spi->spi_dma_cnt++;
+ index++;
+ end = *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + SPI_SLVAE_RX_PACK_LEN - 4);
+ }
+
+ if(spi->is_rd_waiting == true && spi->recv_done == 0) {
+ wake_up(&spi->rd_wait);
+ spi->recv_done = 1;
+ }
+ }
+ if((end != SPI_MAGIC) && index == SPI_SLVAE_RX_MAX_PACK_NUM)
+ printk("cyc_buf be covered!!!!!");
+ return;
+}
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss end */
+
+static int zx29_slave_config_dma(struct zx29_spi *zx29spi,struct spi_device *spi)
+{
+ struct chip_data *chip = NULL;
+ struct dma_chan *rxchan = NULL;
+ struct dma_async_tx_descriptor *rxdesc;
+ unsigned short transfer_len = SPI_SLVAE_RX_PACK_LEN;
+ int i;
+
+ chip = zx29spi->cur_chip = spi->controller_state;
+
+ if (spi->rx_dma)
+ flush(zx29spi);
+ writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+ writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_RX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+
+ zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+ zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+ rxchan = zx29spi->dma_rx_channel;
+ /* Check that the channels are available */
+ if (!rxchan)
+ return -ENODEV;
+
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+ for(i = 0;i < SPI_SLVAE_RX_MAX_PACK_NUM;i++) {
+ switch (zx29spi->rx_lev_trig) {
+ case SPI_RX_1_OR_MORE_ELEM:
+ slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_1;
+ slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_1;
+ break;
+ case SPI_RX_4_OR_MORE_ELEM:
+ slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
+ slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ break;
+ case SPI_RX_8_OR_MORE_ELEM:
+ slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_8;
+ slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_8;
+ break;
+ case SPI_RX_16_OR_MORE_ELEM:
+ slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_16;
+ slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_16;
+ break;
+ case SPI_RX_32_OR_MORE_ELEM:
+ slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_ALL;
+ slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
+ break;
+ default:
+ slave_rx_conf[i].dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
+ slave_rx_conf[i].dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (zx29spi->read) {
+ case READING_NULL:
+ /* Use the same as for writing */
+ slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ slave_rx_conf[i].count = transfer_len;
+ break;
+ case READING_U8:
+ slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ slave_rx_conf[i].count = transfer_len;
+ break;
+ case READING_U16:
+ slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
+ slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
+ slave_rx_conf[i].count = transfer_len;
+ break;
+ case READING_U32:
+ slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
+ slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
+ slave_rx_conf[i].count = transfer_len;
+ break;
+ }
+
+ slave_rx_conf[i].src_addr = (SPI_DR_OFFSET+zx29spi->phybase);
+ slave_rx_conf[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
+ slave_rx_conf[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ slave_rx_conf[i].dest_addr = (unsigned int)spi->rx_dma + transfer_len*i;
+ slave_rx_conf[i].link_addr = 1;
+ //yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss.
+ *(volatile unsigned int *)(spi->cyc_buf + transfer_len*i + transfer_len -4) = SPI_MAGIC;
+ }
+
+ dmaengine_slave_config(rxchan,(struct dma_slave_config*)&slave_rx_conf[0]);
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ if (spi->rx_dma) {
+ rxdesc = rxchan->device->device_prep_dma_cyclic(rxchan,NULL,SPI_SLVAE_RX_MAX_PACK_NUM * SPI_SLVAE_RX_PACK_LEN, SPI_SLVAE_RX_PACK_LEN,0,0);
+ if (!rxdesc) {
+ printk(KERN_INFO "!!ERROR DESC !!![%s][%d]\n",__func__,__LINE__);
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_cyclic_callback;
+ rxdesc->callback_param = spi;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(rxchan);
+ zx29spi->dma_running = RX_TRANSFER;
+ }
+ return 0;
+}
+
+static int zx29_slave_rd_start(struct spi_device *spi)
+{
+ struct zx29_spi *zx29spi = NULL;
+ struct device *dev;
+ int status = 0;
+ static int wd_wait_queue_init = 0;
+
+ printk("zx29_slave_rd_start...\r\n");
+
+ zx29spi = spi_master_get_devdata(spi->master);
+ dev = &zx29spi->pdev->dev;
+ if (!zx29spi)
+ return -EINVAL;
+
+ spi->cyc_index = 0;
+ spi->rd_pos = spi->recv_pos = 0;
+
+ spi->cyc_buf = dma_alloc_coherent(dev, SPI_SLVAE_RX_BUFF_SIZE, &spi->rx_dma, GFP_KERNEL);
+ if (dma_mapping_error(dev, spi->rx_dma)) {
+ dev_err(dev, "dma_map_single spi rx failed\n");
+ return -ENOMEM;
+ }
+
+ if(wd_wait_queue_init == 0) {
+ init_waitqueue_head(&spi->rd_wait);
+ spi->recv_done = false;
+ spi->is_rd_waiting = false;
+ wd_wait_queue_init = 1;
+ }
+ status = zx29_slave_config_dma(zx29spi,spi);
+
+ return status;
+}
+
+static int zx29_slave_rd_stop(struct spi_device *spi)
+{
+ struct zx29_spi *zx29spi = NULL;
+ struct device *dev;
+ int status = 0;
+ struct chip_data *chip = NULL;
+ struct dma_chan *rxchan = NULL;
+
+ zx29spi = spi_master_get_devdata(spi->master);
+ dev = &zx29spi->pdev->dev;
+ if (!zx29spi)
+ return -EINVAL;
+
+ chip = zx29spi->cur_chip= spi->controller_state;
+ writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ rxchan = zx29spi->dma_rx_channel;
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ if(spi->rx_dma) {
+ dmaengine_terminate_all(rxchan);
+ zx29spi->dma_running = 0;
+ }
+
+ if(spi->cyc_buf != NULL && spi->rx_dma) {
+ dma_free_coherent(dev, SPI_SLVAE_RX_BUFF_SIZE, spi->cyc_buf, spi->rx_dma);
+ spi->cyc_buf = NULL;
+ }
+
+ spi->cyc_index = 0;
+ spi->rd_pos = spi->recv_pos = 0;
+ spi->recv_done = false;
+ spi->is_rd_waiting = false;
+ printk("zx29_slave_rd_stop...\r\n");
+
+ return status;
+}
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+
+static int zx29_prepare_transfer_hardware(struct spi_master *master)
+{
+
+ return 0;
+}
+
+static int zx29_unprepare_transfer_hardware(struct spi_master *master)
+{
+ //struct zx29_spi *zx29spi = spi_master_get_devdata(master);
+
+ //dev_warn(&zx29spi->pdev->dev,"in function %s\n", __FUNCTION__);
+
+ /* nothing more to do - disable spi/ssp and power off */
+ //writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ return 0;
+}
+
+static int verify_controller_parameters(struct zx29_spi *zx29spi,
+ struct spi_config_chip const *chip_info)
+{
+ if ((chip_info->iface < SPI_INTERFACE_MOTOROLA_SPI)
+ || (chip_info->iface > SPI_INTERFACE_ISI_SPI)) {
+ dev_err(&zx29spi->pdev->dev,
+ "interface is configured incorrectly\n");
+ return -EINVAL;
+ }
+
+ if ((chip_info->hierarchy != SPI_MASTER)
+ && (chip_info->hierarchy != SPI_SLAVE)) {
+ dev_err(&zx29spi->pdev->dev,
+ "hierarchy is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->com_mode != INTERRUPT_TRANSFER)
+ && (chip_info->com_mode != DMA_TRANSFER)
+ && (chip_info->com_mode != POLLING_TRANSFER)) {
+ dev_err(&zx29spi->pdev->dev,
+ "Communication mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ switch (chip_info->rx_lev_trig) {
+ case SPI_RX_1_OR_MORE_ELEM:
+ case SPI_RX_4_OR_MORE_ELEM:
+ case SPI_RX_8_OR_MORE_ELEM:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SPI_RX_16_OR_MORE_ELEM:
+ if (zx29spi->vendor->fifodepth < 16) {
+ dev_err(&zx29spi->pdev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SPI_RX_32_OR_MORE_ELEM:
+ if (zx29spi->vendor->fifodepth < 32) {
+ dev_err(&zx29spi->pdev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&zx29spi->pdev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ break;
+ }
+ switch (chip_info->tx_lev_trig) {
+ case SPI_TX_1_OR_MORE_EMPTY_LOC:
+ case SPI_TX_4_OR_MORE_EMPTY_LOC:
+ case SPI_TX_8_OR_MORE_EMPTY_LOC:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SPI_TX_16_OR_MORE_EMPTY_LOC:
+ if (zx29spi->vendor->fifodepth < 16) {
+ dev_err(&zx29spi->pdev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SPI_TX_32_OR_MORE_EMPTY_LOC:
+ if (zx29spi->vendor->fifodepth < 32) {
+ dev_err(&zx29spi->pdev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&zx29spi->pdev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static struct vendor_data vendor_arm = {
+ .fifodepth = 16,
+ .max_bpw = 32,
+ .loopback = true,
+};
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct spi_config_chip spi_default_chip_info = {
+ .com_mode = DMA_TRANSFER,//INTERRUPT_TRANSFER,//POLLING_TRANSFER,
+ .iface = SPI_INTERFACE_MOTOROLA_SPI,
+ .hierarchy = SPI_MASTER,
+ .slave_tx_disable = DO_NOT_DRIVE_TX,
+ .rx_lev_trig = SPI_RX_4_OR_MORE_ELEM,
+ .tx_lev_trig = SPI_TX_4_OR_MORE_EMPTY_LOC,
+// .ctrl_len = SSP_BITS_8,
+// .wait_state = SSP_MWIRE_WAIT_ZERO,
+// .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+// .cs_control = default_cs_control,
+};
+
+/*
+
+*/
+static void spi_set_gpio_function(int dev_id)
+{
+ if (pinctrl_select_state(ssp_pins[dev_id].pctrl, ssp_pins[dev_id].pfunc) < 0) {
+ printk("spi%d setting spi pin ctrl failed\n",dev_id);
+ }
+ return;
+}
+static void spi_set_gpio_gpio(int dev_id)
+{
+ if (pinctrl_select_state(ssp_pins[dev_id].pctrl, ssp_pins[dev_id].pgpio) < 0) {
+ printk("spi%d setting spi pin ctrl failed\n",dev_id);
+ }
+ return;
+}
+
+
+static void spi_set_gpio_val(int gpio_num, int val)
+{
+ //zx29_gpio_output_data(gpio_num, val);
+ gpio_set_value(gpio_num,val);
+
+}
+
+static int spi_get_gpio_val(int gpio_num)
+{
+ //zx29_gpio_set_direction(gpio,GPIO_IN);
+
+ return gpio_get_value(gpio_num);
+}
+
+static void spi_time_delay(int delay/*us*/)
+{
+ udelay(delay);
+}
+
+void spi_fun_mode_stop(int dev_id)
+{
+ spi_set_gpio_gpio(dev_id);
+}
+
+void spi_gpio_mode_start(int dev_id)
+{
+ //mutex_lock(&g_zx29_spi->spi_lock); //spi control function mutex.
+ /* set clk tx rx cs to gpio */
+ //spi_set_gpio_gpio(dev_id);
+ gpio_direction_output(ssp_pins[dev_id].gpio_cs,SPI_GPIO_HIGH);
+ gpio_direction_output(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
+ gpio_direction_output(ssp_pins[dev_id].gpio_tx,0);//value ?
+ gpio_direction_input(ssp_pins[dev_id].gpio_rx);
+
+ return ;
+}
+EXPORT_SYMBOL(spi_gpio_mode_start);
+void spi_gpio_mode_stop(int dev_id)
+{
+ /* set clk tx rx cs to function */
+ spi_set_gpio_function(dev_id);
+ //mutex_unlock(&g_zx29_spi->spi_lock); //spi control function mutex.
+}
+EXPORT_SYMBOL(spi_gpio_mode_stop);
+
+void spi_gpio_write_single8(int dev_id,unsigned char data)
+{
+ int i;
+
+ //printk("spi_gpio_write_single8 %x\n", data);
+
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_LOW);/* CS invail*/
+
+ for( i=7; i>=0; i-- )
+ {
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+ if ((data >> i) & 0x1)
+ {
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_HIGH);
+ }
+ else
+ {
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
+ }
+ spi_time_delay(1);
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+ spi_time_delay(1);
+ }
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_HIGH);
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
+
+}
+EXPORT_SYMBOL(spi_gpio_write_single8);
+/*******************************************************************************
+ * Function:
+ * Description:
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+unsigned char spi_gpio_read_single8(int dev_id)
+{
+ int i;
+ unsigned char readData = 0;
+
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_LOW);/* CS */
+
+ for( i=7; i>=0; i-- )
+ {
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+ spi_time_delay(1);
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+ if( spi_get_gpio_val(ssp_pins[dev_id].gpio_rx) )/* lcd tx rx */
+ {
+ readData |= (1 << i);
+ }
+ spi_time_delay(1);
+ }
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_cs, SPI_GPIO_HIGH);
+
+ //printk("spi_gpio_read_single8 %x\n", readData);
+ return readData;
+}
+EXPORT_SYMBOL(spi_gpio_read_single8);
+
+/**
+ * @brief spi gpio mode, cs control
+ *
+ * This function used for lcd 3-wires spi mode.
+ * before cs pull down, spi pads will change to gpio mode.
+ * after cs pull high, spi pads gpio mode recovery to spi mode.
+ *
+ * @param level 0: cs line pull down, no-zero: cs line pull up.
+ *
+ * @retval none
+ */
+void spi_gpio_3wire_cs(int dev_id,unsigned char level)
+{
+ if(level){
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,SPI_GPIO_HIGH);
+ gpio_direction_input(ssp_pins[dev_id].gpio_tx);
+
+ /* zx29_gpio_function_sel(GPIO_AP_SPI0_CS, GPIO_AP_SPI0_CS_FUN); */
+ //zx29_gpio_function_sel(GPIO_AP_SPI0_CLK, GPIO_AP_SPI0_CLK_FUN);
+ //zx29_gpio_function_sel(GPIO_AP_SPI0_TXD, GPIO_AP_SPI0_TXD_FUN);
+
+ //mutex_unlock(&g_zx29_spi->spi_lock); //spi control function mutex.
+ }
+ else{
+ //mutex_lock(&g_zx29_spi->spi_lock);
+
+ /* zx29_gpio_function_sel(GPIO_AP_SPI0_CS, GPIO_AP_CS_GPIO_FUN); */
+ //zx29_gpio_function_sel(GPIO_AP_SPI0_CLK, GPIO_AP_CLK_GPIO_FUN);
+ //zx29_gpio_function_sel(GPIO_AP_SPI0_TXD, GPIO_AP_TXD_GPIO_FUN);
+
+ gpio_direction_output(ssp_pins[dev_id].gpio_cs,SPI_GPIO_LOW);
+ gpio_direction_output(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
+ gpio_direction_output(ssp_pins[dev_id].gpio_tx,SPI_GPIO_LOW);
+
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
+ spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,SPI_GPIO_LOW);
+ }
+}
+EXPORT_SYMBOL(spi_gpio_3wire_cs);
+
+/**
+ * @brief spi gpio mode, one byte write.
+ *
+ * This function used for lcd 3-wires spi mode.
+ * txd line used tx function and rx function at different time.
+ *
+ * @param reg one byte write data.
+ *
+ * @retval none
+ */
+void spi_gpio_3wire_write8(int dev_id,unsigned char reg)
+{
+ int i;
+ //unsigned char readData = 0;
+
+ //write
+ spi_time_delay(50);
+ for (i = 0; i < 8; i++)
+ {
+ gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+ spi_time_delay(50);
+
+ if ((reg & 0x80)==0x80)
+ {
+ gpio_set_value(ssp_pins[dev_id].gpio_tx, SPI_GPIO_HIGH);
+ }
+ else
+ {
+ gpio_set_value(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
+ }
+ spi_time_delay(50);
+
+ gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+ spi_time_delay(50);
+
+ reg <<= 1;
+ }
+ //spi_time_delay(50);
+}
+EXPORT_SYMBOL(spi_gpio_3wire_write8);
+
+/**
+ * @brief spi gpio mode, one byte read.
+ *
+ * This function used for lcd 3-wires spi mode.
+ * txd line used tx function and rx function at different time.
+ *
+ * @param none.
+ *
+ * @retval one byte readed data.
+ */
+unsigned char spi_gpio_3wire_read8(int dev_id)
+{
+ int i;
+ unsigned char readData = 0;
+ //read
+ gpio_direction_input(ssp_pins[dev_id].gpio_tx);
+ spi_time_delay(50);
+
+ readData = 0;
+ for (i = 0; i < 8; i++)
+ {
+ readData <<= 1;
+ gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+ spi_time_delay(50);
+
+ if (SPI_GPIO_HIGH == gpio_get_value(ssp_pins[dev_id].gpio_tx))
+ {
+ readData |= 0x01;
+ }
+
+ gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+ spi_time_delay(50);
+ }
+ //spi_time_delay(50);
+
+ //printk("spi_gpio_read_single8 %x\n", readData);
+ return readData;
+}
+EXPORT_SYMBOL(spi_gpio_3wire_read8);
+
+
+static void zx29_setup_to_regs(struct chip_data *chip,struct zx29_spi *zx29spi)
+{
+ unsigned int regval = 0;
+ ktime_t k_time_start = 0;
+ ktime_t k_time_end = 0;
+ ktime_t diff_ns = 0;
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+ writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ writel(chip->fmt_ctrl, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+ //writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+ writel(chip->com_ctrl, (SPI_COM_CTRL_OFFSET + zx29spi->virtbase));
+ //writel(chip->timing, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+
+ writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ //while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
+ k_time_start = ktime_get();
+ do {
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+ diff_ns = ktime_sub(ktime_get(),k_time_start);
+ cpu_relax();
+ }
+ while (!regval && diff_ns < 10000000);
+
+ if(diff_ns >= 10000000) {
+ dev_info(&zx29spi->pdev->dev, " zx29_setup_to_regs failed! diff_ns=%lld \n",diff_ns);
+ }
+
+}
+/**
+ * zx29_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info. Nothing is really being written to the
+ * controller hardware here, that is not done until the actual transfer
+ * commence.
+ */
+static int zx29_setup(struct spi_device *spi)
+{
+ struct spi_config_chip const *chip_info;
+ struct chip_data *chip;
+ unsigned speed_hz;
+ int status = 0;
+ struct zx29_spi *zx29spi = NULL;
+ unsigned int bits =0;
+ u8 iface = 0;
+ u32 tmp;
+
+ if (!spi)
+ return -EINVAL;
+ bits = spi->bits_per_word;
+ zx29spi = spi_master_get_devdata(spi->master);
+ if (!zx29spi)
+ return -EINVAL;
+ iface = zx29spi->iface_mode;
+
+ /* Get controller_state if one is supplied */
+ chip = spi_get_ctldata(spi);
+
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev, "cannot allocate controller state\n");
+ return -ENOMEM;
+ }
+ dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n");
+ }
+
+ /* Get controller data if one is supplied */
+ chip_info = spi->controller_data;
+
+ if (chip_info == NULL) {
+ chip_info = &spi_default_chip_info;
+ /* spi_board_info.controller_data not is supplied */
+ dev_dbg(&spi->dev, "using default controller_data settings\n");
+ } else
+ dev_dbg(&spi->dev, "using user supplied controller_data settings\n");
+
+ /*
+ * We can override with custom divisors, else we use the board
+ * frequency setting
+ */
+
+ /* set spi clock source at 104MHz/1 */
+ //writel(chip ->clk_div-1, M0_SSP_CLKDIV_REG_VA);
+ speed_hz = spi->max_speed_hz;
+// clk_set_rate(zx29spi->spi_clk, speed_hz * 2); /* f(ssp_clk) = 2*f(ssp_sclk_out) */
+ spi->max_speed_hz = clk_round_rate(zx29spi->spi_clk, speed_hz * 2) / 2;
+
+ if (spi->max_speed_hz != speed_hz)
+ dev_dbg(&spi->dev, "round speed %dHz differs from requested %dHz.", spi->max_speed_hz, speed_hz);
+
+ status = verify_controller_parameters(zx29spi, chip_info);
+ if (status) {
+ dev_err(&spi->dev, "controller data is incorrect");
+ goto err_config_params;
+ }
+
+ zx29spi->rx_lev_trig = chip_info->rx_lev_trig;
+ zx29spi->tx_lev_trig = chip_info->tx_lev_trig;
+
+ /* Now set controller state based on controller data */
+ //chip->xfer_type = chip_info->com_mode;
+ chip->xfer_type = spi->dma_used ? DMA_TRANSFER : POLLING_TRANSFER;
+ dev_dbg(&spi->dev, "chip->xfer_type = 0x%x \n",chip->xfer_type);
+
+ if (!chip_info->cs_control) {
+ chip->cs_control = default_cs_control;
+
+ if (spi->master->num_chipselect != 1)
+ dev_err(&spi->dev, "chip select function is NULL!\n");
+ } else
+ chip->cs_control = chip_info->cs_control;
+
+ /* Check bits per word with vendor specific range */
+ if ((bits <= 3) || (bits > zx29spi->vendor->max_bpw)) {
+ status = -ENOTSUPP;
+ dev_err(&spi->dev, "illegal data size for this controller!\n");
+ dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+ zx29spi->vendor->max_bpw);
+ goto err_config_params;
+ } else if (bits <= 8) {
+ dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
+ chip->n_bytes = 1;
+ chip->read = READING_U8;
+ chip->write = WRITING_U8;
+ } else if (bits <= 16) {
+ dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
+ chip->n_bytes = 2;
+ chip->read = READING_U16;
+ chip->write = WRITING_U16;
+ } else {
+ dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+ chip->n_bytes = 4;
+ chip->read = READING_U32;
+ chip->write = WRITING_U32;
+ }
+
+ /* Now Initialize all register settings required for this chip */
+ chip->com_ctrl = 0;
+ chip->fmt_ctrl = 0;
+ chip->fifo_ctrl = 0;
+ chip->timing = 0;
+
+ if ((chip->xfer_type == DMA_TRANSFER)
+ && ((zx29spi->master_info)->enable_dma)) {
+ chip->enable_dma = true;
+ dev_dbg(&spi->dev, "DMA mode set in controller state\n");
+ } else {
+ chip->enable_dma = false;
+ dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
+ }
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
+
+ if (zx29spi->rx_lev_trig == SPI_RX_8_OR_MORE_ELEM)
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
+ else
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
+ if (zx29spi->tx_lev_trig == SPI_TX_8_OR_MORE_EMPTY_LOC)
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
+ else
+ SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
+
+ SPI_WRITE_BITS(chip->fmt_ctrl, bits - 1, SPI_FMT_CTRL_MASK_DSS, 4);
+ SPI_WRITE_BITS(chip->fmt_ctrl, chip_info->iface, SPI_FMT_CTRL_MASK_FRF, 0);
+
+ if((iface== SPI_TI_FORMAT)||(iface== SPI_ISI_FORMAT)){
+ printk("qhf %s set iface = %d\n",__func__,iface);
+ SPI_WRITE_BITS(chip->fmt_ctrl, iface, SPI_FMT_CTRL_MASK_FRF, 0);
+ }
+ /* Stuff that is common for all versions */
+ if (spi->mode & SPI_CPOL)
+ tmp = SPI_CLK_POL_IDLE_HIGH;
+ else
+ tmp = SPI_CLK_POL_IDLE_LOW;
+ SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_POL, 2);
+
+ if (spi->mode & SPI_CPHA)
+ tmp = SPI_CLK_SECOND_EDGE;
+ else
+ tmp = SPI_CLK_FIRST_EDGE;
+
+ SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_PHA, 3);
+
+ /* Loopback is available on all versions except PL023 */
+ if (zx29spi->vendor->loopback) {
+ if (spi->mode & SPI_LOOP)
+ tmp = LOOPBACK_ENABLED;
+ else
+ tmp = LOOPBACK_DISABLED;
+ SPI_WRITE_BITS(chip->com_ctrl, tmp, SPI_COM_CTRL_MASK_LBM, 0);
+ }
+// SPI_WRITE_BITS(chip->com_ctrl, SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
+ SPI_WRITE_BITS(chip->com_ctrl, chip_info->hierarchy, SPI_COM_CTRL_MASK_MS, 2);
+// SPI_WRITE_BITS(chip->com_ctrl, chip_info->slave_tx_disable, SPI_COM_CTRL_MASK_SOD, 3);
+
+ if(spi->trans_gaped) {
+ chip->enable_trans_gap = true;
+ }
+ SPI_WRITE_BITS(chip->timing, spi->trans_gap_num, SPI_TIMING_MASK_T_CS_DESEL, 0);
+ /* Save controller_state */
+ spi_set_ctldata(spi, chip);
+ if(zx29spi->mode == ZX29_SSP_SLAVE_TYPE) {
+
+ SPI_WRITE_BITS(chip->com_ctrl, SPI_SLAVE_MODE, SPI_COM_CTRL_MASK_MS, 2);
+ zx29_setup_to_regs(chip,zx29spi);
+ }
+
+ //yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme.
+
+ return status;
+ err_config_params:
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ return status;
+}
+
+/**
+ * zx29_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void zx29_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+}
+
+static int zx29_spi_clock_init(struct zx29_spi *zx29spi)
+{
+ int status = 0;
+ struct platform_device *pdev = zx29spi->pdev;
+ /* work clock */
+ zx29spi->spi_clk = devm_clk_get(&pdev->dev, "work_clk");
+ if (IS_ERR(zx29spi->spi_clk)) {
+ status = PTR_ERR(zx29spi->spi_clk);
+ dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+ return status;
+ }
+
+ /* enable spiclk at function zx29_setup */
+
+ if (device_property_read_u32(&pdev->dev, "clock-frequency", &zx29spi->clkfreq))
+ zx29spi->clkfreq = SPI_SPICLK_FREQ_26M;
+ status = clk_set_rate(zx29spi->spi_clk, zx29spi->clkfreq);
+ if(status) {
+ dev_err(&pdev->dev,"clc_set_rate err status=%d \n",status);
+ return status;
+ }
+ /* enable ssp clock source */
+ clk_prepare_enable(zx29spi->spi_clk);
+
+ /* apb clock */
+ zx29spi->pclk = devm_clk_get(&pdev->dev, "apb_clk");
+ if (IS_ERR(zx29spi->pclk)) {
+ status = PTR_ERR(zx29spi->pclk);
+ dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+ return status;
+ }
+ clk_prepare_enable(zx29spi->pclk);
+
+ return status;
+}
+
+static void spicc_clkgate_ctrl(struct zx29_spi *zx29spi,unsigned char is_enable)
+{
+ if (is_enable) {
+ clk_enable(zx29spi->spi_clk);
+ clk_enable(zx29spi->pclk);
+ } else {
+ clk_disable(zx29spi->spi_clk);
+ clk_disable(zx29spi->pclk);
+ }
+}
+
+static int zx29_spi_slave_clock_init(struct zx29_spi *zx29spi)
+{
+ int status=0;
+ struct platform_device *pdev = zx29spi->pdev;
+ /* work clock */
+ zx29spi->spi_clk = devm_clk_get(&pdev->dev, "work_clk");
+ if (IS_ERR(zx29spi->spi_clk)) {
+ status = PTR_ERR(zx29spi->spi_clk);
+ dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+ return status;
+ }
+
+ /* enable spiclk at function zx29_setup */
+ //if (device_property_read_u32(&pdev->dev, "clock-frequency", &zx29spi->clkfreq))
+ zx29spi->clkfreq = SPI_SPICLK_FREQ_156M; /*salve */
+
+ status = clk_set_rate(zx29spi->spi_clk, zx29spi->clkfreq);
+ if(status) {
+ dev_err(&pdev->dev,"clc_set_rate err status=%d \n",status);
+ return status;
+ }
+ /* enable ssp clock source */
+ clk_prepare_enable(zx29spi->spi_clk);
+
+ /* apb clock */
+ zx29spi->pclk = devm_clk_get(&pdev->dev, "apb_clk");
+ if (IS_ERR(zx29spi->pclk)) {
+ status = PTR_ERR(zx29spi->pclk);
+ dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+ return status;
+ }
+ clk_prepare_enable(zx29spi->pclk);
+ spicc_clkgate_ctrl(zx29spi,true);
+
+ return status;
+}
+
+
+static int zx29_spi_init_pinctrl(struct platform_device *pdev)
+{
+ struct pinctrl *pctrl;
+ enum of_gpio_flags flags;
+ int ret;
+ struct zx29_spi *zx29spi = NULL;
+
+ if(!pdev) {
+ printk("pdev not exist \n");
+ return -1;
+ }
+ zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
+ if (IS_ERR(zx29spi)) {
+ dev_warn(&pdev->dev, "Failed to get zx29->ssp%d pins",pdev->id);
+ pctrl = NULL;
+ return 0;
+ }
+
+ pctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(pctrl)) {
+ dev_warn(&pdev->dev, "Failed to get zx29->ssp%d pins",pdev->id);
+ pctrl = NULL;
+ return 0;
+ }
+ ssp_pins[pdev->id].pctrl=pctrl;
+
+ ssp_pins[pdev->id].pcs_gpio_active = pinctrl_lookup_state(pctrl, "cs_gpio_active");
+ if (IS_ERR(ssp_pins[pdev->id].pcs_gpio_active)) {
+ dev_err(&pdev->dev, "missing cs_gpio_active \n");
+ }
+
+ ssp_pins[pdev->id].pcs_gpio_sleep = pinctrl_lookup_state(pctrl, "cs_gpio_sleep");
+ if (IS_ERR(ssp_pins[pdev->id].pcs_gpio_sleep)) {
+ dev_err(&pdev->dev, "missing cs_gpio_sleep \n");
+ }
+ ssp_pins[pdev->id].pcs_func = pinctrl_lookup_state(ssp_pins[pdev->id].pctrl, "cs_func");
+ if (IS_ERR(ssp_pins[pdev->id].pcs_func)) {
+ dev_err(&pdev->dev, "missing cs_func \n");
+ }
+
+ if(zx29spi->master->slave == false) {
+ if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_active) < 0) {
+ printk("spi%d setting cs_gpio pin ctrl failed\n",pdev->id);
+ }
+ }else {
+ if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_func) < 0) {
+ printk("spi%d setting cs_func pin ctrl failed\n",pdev->id);
+ }
+ }
+
+ ssp_pins[pdev->id].gpio_cs = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(ssp_pins[pdev->id].gpio_cs)) {
+ pr_info("ssp%d gpio_cs no found\n",pdev->id);
+ }
+
+ ssp_pins[pdev->id].gpio_clk = of_get_gpio_flags(pdev->dev.of_node, 1, &flags);
+ if (!gpio_is_valid(ssp_pins[pdev->id].gpio_clk)) {
+ pr_info("ssp%d gpio_clk no found\n",pdev->id);
+ }
+ ssp_pins[pdev->id].gpio_tx = of_get_gpio_flags(pdev->dev.of_node, 2, &flags);
+ if (!gpio_is_valid(ssp_pins[pdev->id].gpio_tx)) {
+ pr_info("ssp%d gpio_tx no found\n",pdev->id);
+ }
+
+ ssp_pins[pdev->id].gpio_rx = of_get_gpio_flags(pdev->dev.of_node, 3, &flags);
+ if (!gpio_is_valid(ssp_pins[pdev->id].gpio_rx)) {
+ pr_info("ssp%d gpio_rx no found\n",pdev->id);
+ }
+
+ if(zx29spi->master->slave == false)
+ gpio_direction_output(ssp_pins[pdev->id].gpio_cs,SPI_GPIO_HIGH);
+
+ return 0;
+}
+static void zx29_spi_get_platformInfo(struct platform_device *pdev,struct zx29_spi_controller *platform_info)
+{
+ struct device *dev=&pdev->dev;
+ u32 dma_tx,dma_rx,enable_dma;
+
+
+ if (device_property_read_u16(dev, "bus_id", &platform_info->bus_id)) {
+ platform_info->bus_id = pdev->id;
+ }
+ if (device_property_read_u8(dev, "num_chipselect", &platform_info->num_chipselect)) {
+ platform_info->num_chipselect = 1;
+ }
+#if 0
+ if (device_property_read_u32(dev, "enable_dma",&enable_dma)) {
+ dev_err(&pdev->dev,"enable_dma get failed");
+ platform_info->enable_dma = 0;
+ }
+ else {
+ platform_info->enable_dma = enable_dma;
+ }
+#endif
+ if (device_property_read_u32(dev, "autosuspend_delay", &platform_info->autosuspend_delay))
+ platform_info->autosuspend_delay = 0;
+
+ if(device_property_read_u32(dev, "dma_rx", &dma_rx)){
+ dev_err(&pdev->dev,"dma_rx get failed");
+ }
+ platform_info->dma_rx_param = (void*)dma_rx;
+ device_property_read_u32(dev, "dma_tx", &dma_tx);
+ platform_info->dma_tx_param = (void*)dma_tx;
+
+ dev_dbg(&pdev->dev,"get dma_rx=0x%x dma_tx=0x%x enable_dma=0x%x",dma_rx,dma_tx,platform_info->enable_dma);
+
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define dump_register(reg) \
+{ \
+ .name = __stringify(reg), \
+ .offset = SPI_ ##reg##_OFFSET, \
+}
+
+
+static const struct debugfs_reg32 spi_regs[] = {
+ dump_register(VER_REG),
+ dump_register(COM_CTRL),
+ dump_register(FMT_CTRL),
+ dump_register(DR),
+ dump_register(FIFO_CTRL),
+ dump_register(FIFO_SR),
+ dump_register(INTR_EN),
+ dump_register(INTR_SR),
+ dump_register(TIMING),
+};
+
+//#define Strcat(x, fmt, ...) sprintf(x, "%s" #fmt, x, __VA_ARGS__)
+
+static void debugfs_spi_init(struct zx29_spi *zx29spi)
+{
+ struct dentry *root;
+ struct dentry *node;
+ char tmp[32];
+
+ if(!zx29spi)
+ return;
+
+ //create root
+ sprintf(tmp,"spi%d_zx29", zx29spi->pdev->id);
+ root = debugfs_create_dir(tmp, NULL);
+ if (!root) {
+ dev_err(&zx29spi->pdev->dev, "debugfs_create_dir %s err\n", tmp);
+ goto err;
+ }
+
+ //create regs
+ zx29spi->spi_regset.regs = (struct debugfs_reg32 *)spi_regs;
+ zx29spi->spi_regset.nregs = sizeof(spi_regs)/sizeof(struct debugfs_reg32);
+ zx29spi->spi_regset.base = zx29spi->virtbase;
+
+ debugfs_create_regset32("spi_regs", S_IRUGO, root, &zx29spi->spi_regset);
+ //create info
+ debugfs_create_u32("poll_cnt", S_IRUGO, root, &zx29spi->spi_poll_cnt);
+ debugfs_create_u32("dma_cnt", S_IRUGO, root, &zx29spi->spi_dma_cnt);
+
+ zx29spi->spi_root = (void *)root;
+ return;
+err:
+ dev_err(&zx29spi->pdev->dev, "debugfs_spi_init err\n");
+}
+
+#endif
+
+
+static int zx29_spi_init_irq(struct platform_device *pdev, struct zx29_spi *zx29spi)
+{
+ int irq = 0,ret = 0;
+
+ if(!zx29spi || !pdev) {
+ ret = -ENOENT;
+ return ret;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_IRQ\n");
+ ret = -ENOENT;
+ return ret;
+ }
+ zx29spi->irq = irq;
+ dev_dbg(&pdev->dev, "used interrupt num is %d\n", zx29spi->irq);
+ ret = devm_request_irq(&pdev->dev, zx29spi->irq, zx29_spi_irq,
+ IRQF_TRIGGER_HIGH | IRQF_NO_THREAD | IRQF_ONESHOT, dev_name(&pdev->dev), zx29spi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", ret);
+ return ret;
+ }
+ disable_irq_nosync(zx29spi->irq);
+ return ret;
+
+}
+
+static int zx29_spi_probe_of_master(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zx29_spi_controller *platform_info=NULL ;
+ struct spi_master *master;
+ struct zx29_spi *zx29spi = NULL; /*Data for this driver */
+ struct resource *regs = NULL;
+ struct resource *gpio = NULL;
+ struct resource *irq = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ int status = 0, i,ret;
+ u32 regval = 0;
+
+ platform_info = devm_kzalloc(&pdev->dev, sizeof(struct zx29_spi_controller), GFP_KERNEL);
+ if(platform_info == NULL)
+ return 0;
+ platform_info->bus_id = 0,
+ platform_info->num_chipselect = 1,
+ platform_info->enable_dma = 1,
+ platform_info->autosuspend_delay=0,
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct zx29_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
+ status = -ENOMEM;
+ goto err_no_master;
+ }
+
+ zx29spi = spi_master_get_devdata(master);
+ memset(zx29spi,0,sizeof(struct zx29_spi));
+ pdev->id = of_alias_get_id(np, "spi");
+ if(pdev->id < 0){
+ printk("zx29_ssp of_alias_get_id fail ret:%d\n", pdev->id);
+ status = -ENOMEM;
+ goto err_no_master;
+ }
+ snprintf(zx29spi->name, sizeof(zx29spi->name), "zx29-spi%d", pdev->id);
+ zx29_spi_get_platformInfo(pdev,platform_info);
+ //mutex_init(&zx29spi->spi_lock);
+ g_zx29_spi[pdev->id] = zx29spi;
+ zx29spi->master = master;
+ zx29spi->master_info = platform_info;
+ zx29spi->pdev = pdev;
+ zx29spi->vendor = &vendor_arm;
+ zx29spi->mode = ZX29_SSP_MASTER_TYPE;
+ zx29spi->zx29_flush_rxfifo = zx29_flush_rxfifo;
+ sema_init(&zx29spi->sema_dma, 0);
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+ init_waitqueue_head(&zx29spi->wait);
+ zx29spi->trans_done = false;
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+ dev_set_drvdata(&pdev->dev, zx29spi);
+ device_init_wakeup(&pdev->dev, true);
+ /*
+ * Bus Number Which has been Assigned to this SSP controller
+ * on this board
+ */
+ master->bus_num = platform_info->bus_id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = zx29_cleanup;
+ master->setup = zx29_setup;
+ master->prepare_transfer_hardware = zx29_prepare_transfer_hardware;
+ master->transfer_one_message = zx29_transfer_one_message;
+ master->unprepare_transfer_hardware = zx29_unprepare_transfer_hardware;
+ //master->rt = platform_info->rt;
+
+ /*
+ * Supports mode 0-3, loopback, and active low CS..
+ */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS|SPI_LOOP;
+
+ dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
+
+ zx29_spi_init_pinctrl(pdev);
+
+ /* registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (regs == NULL){
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto err_no_registers;
+ }
+ zx29spi->phybase = regs->start;
+ zx29spi->virtbase = devm_platform_ioremap_resource(pdev, 0);
+ if (zx29spi->virtbase == NULL) {
+ status = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
+ regs->start, zx29spi->virtbase);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_spi_init(zx29spi);
+#endif
+
+ /*clock init*/
+ status = zx29_spi_clock_init(zx29spi);
+ if(status)
+ goto err_no_clk;
+
+ /* Initialize transfer pump */
+ //tasklet_init(&zx29spi->pump_transfers, pump_transfers,(unsigned long)zx29spi);
+
+ /* Disable SPI */
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+ writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ load_spi_default_config(zx29spi);
+ writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+ if(!strcmp(pdev->name,"1410000.ssp")) {
+ regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
+ writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+ dev_info(&pdev->dev,"%s set non-camera mode regval:0x%x \n",pdev->name,regval);
+ }
+ status = zx29_spi_init_irq(pdev,zx29spi);
+ if(status != 0) {
+ dev_err(&pdev->dev, "zx29_spi_init_irq err!!! \n");
+ goto err_no_irq;
+ }
+ /* Get DMA channels */
+ if (platform_info->enable_dma) {
+ status = zx29_dma_probe(zx29spi);
+ if (status != 0) {
+ platform_info->enable_dma = 0;
+ sc_debug_info_record(MODULE_ID_CAP_SPI, "%s dma probe failed \n",pdev->name);
+ }
+ }
+
+#if SPI_PSM_CONTROL
+ wake_lock_init(&zx29spi->psm_lock, WAKE_LOCK_SUSPEND, zx29spi->name);
+#endif
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto err_spi_register;
+ }
+ dev_info(&pdev->dev," probe succeeded\n");
+
+ /* let runtime pm put suspend */
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&pdev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
+ } else {
+ pm_runtime_put(dev);
+ }
+
+ return 0;
+
+ err_spi_register:
+#if SPI_PSM_CONTROL
+ wake_lock_destroy(&zx29spi->psm_lock);
+#endif
+ if (platform_info->enable_dma)
+ zx29_dma_remove(zx29spi);
+
+ err_no_irq:
+ clk_disable(zx29spi->spi_clk);
+// err_no_clk_en:
+ //clk_unprepare(pl022->clk);
+ //err_clk_prep:
+ clk_put(zx29spi->spi_clk);
+ err_no_clk:
+// iounmap(zx29spi->virtbase);
+ err_gpios:
+ /* add */
+ err_no_ioremap:
+ err_no_registers:
+ spi_master_put(master);
+ err_no_master:
+ err_no_pdata:
+ return status;
+}
+
+static int zx29_spi_probe_of_slave(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zx29_spi_controller *platform_info=NULL ;
+ struct spi_master *master;
+ struct zx29_spi *zx29spi = NULL; /*Data for this driver */
+ struct resource *regs = NULL;
+ struct resource *gpio = NULL;
+ struct resource *irq = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ int status = 0, i,ret;
+ u32 regval = 0;
+
+ platform_info = devm_kzalloc(&pdev->dev, sizeof(struct zx29_spi_controller), GFP_KERNEL);
+ if(platform_info == NULL)
+ return 0;
+ platform_info->bus_id = 0,
+ platform_info->num_chipselect = 1,
+ platform_info->enable_dma = 1,
+ platform_info->autosuspend_delay=0,
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct zx29_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
+ status = -ENOMEM;
+ goto err_no_master;
+ }
+ master->slave = true;
+ zx29spi = spi_master_get_devdata(master);
+ memset(zx29spi,0,sizeof(struct zx29_spi));
+ pdev->id = of_alias_get_id(np, "spi");
+ if(pdev->id < 0){
+ printk("zx29_ssp of_alias_get_id fail ret:%d\n", pdev->id);
+ goto err_no_master;
+ }
+ snprintf(zx29spi->name, sizeof(zx29spi->name), "zx29-spi%d", pdev->id);
+
+ zx29_spi_get_platformInfo(pdev,platform_info);
+ //mutex_init(&zx29spi->spi_lock);
+ g_zx29_spi[pdev->id] = zx29spi;
+ zx29spi->master = master;
+ zx29spi->master_info = platform_info;
+ zx29spi->pdev = pdev;
+ zx29spi->vendor = &vendor_arm;
+ zx29spi->mode = ZX29_SSP_SLAVE_TYPE;
+ zx29spi->zx29_flush_rxfifo = zx29_flush_rxfifo;
+ sema_init(&zx29spi->sema_dma, 0);
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+ init_waitqueue_head(&zx29spi->wait);
+ zx29spi->trans_done = false;
+ /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+ dev_set_drvdata(&pdev->dev, zx29spi);
+ /*
+ * Bus Number Which has been Assigned to this SSP controller
+ * on this board
+ */
+ master->bus_num = platform_info->bus_id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = zx29_cleanup;
+ master->setup = zx29_setup;
+ master->prepare_transfer_hardware = zx29_prepare_transfer_hardware;
+ master->transfer_one_message = zx29_slave_transfer_one_message;
+ master->unprepare_transfer_hardware = zx29_unprepare_transfer_hardware;
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+ master->spi_slave_rd_start = zx29_slave_rd_start;
+ master->spi_slave_rd_stop = zx29_slave_rd_stop;
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+ //master->rt = platform_info->rt;
+
+ /*
+ * Supports mode 0-3, loopback, and active low CS..
+ */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS|SPI_LOOP;
+
+ dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
+
+ zx29_spi_init_pinctrl(pdev);
+
+ /* registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (regs == NULL){
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto err_no_registers;
+ }
+ zx29spi->phybase = regs->start;
+ zx29spi->virtbase = devm_platform_ioremap_resource(pdev, 0);
+ if (zx29spi->virtbase == NULL) {
+ status = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
+ regs->start, zx29spi->virtbase);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_spi_init(zx29spi);
+#endif
+
+ /*clock init*/
+ status = zx29_spi_slave_clock_init(zx29spi);
+ if(status)
+ goto err_no_clk;
+ /* Initialize transfer pump */
+ //tasklet_init(&zx29spi->pump_transfers, pump_transfers,(unsigned long)zx29spi);
+
+ /* Disable SPI */
+ regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+ writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+ load_spi_default_config(zx29spi);
+ writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+
+ if(!strcmp(pdev->name,"1410000.ssp")) {
+ regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
+ writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+ dev_info(&pdev->dev," %s set non-camera mode regval:0x%x \n",pdev->name,regval);
+ }
+
+ writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+ while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
+
+ dev_info(&pdev->dev,"ssp enabled \n",regval);
+ /* irq*/
+ status = zx29_spi_init_irq(pdev,zx29spi);
+ if(status != 0) {
+ dev_err(&pdev->dev, "zx29_spi_init_irq err!!! \n");
+ goto err_no_irq;
+ }
+
+ /* Get DMA channels */
+ if (platform_info->enable_dma) {
+ status = zx29_dma_probe(zx29spi);
+ if (status != 0) {
+ platform_info->enable_dma = 0;
+ sc_debug_info_record(MODULE_ID_CAP_SPI, "%s dma probe failed",pdev->name);
+ }
+ }
+
+#if SPI_PSM_CONTROL
+ wake_lock_init(&zx29spi->psm_lock, WAKE_LOCK_SUSPEND, zx29spi->name);
+#endif
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto err_spi_register;
+ }
+
+ dev_info(&pdev->dev," probe succeeded\n");
+
+ /* let runtime pm put suspend */
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&pdev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
+ } else {
+ pm_runtime_put(dev);
+ }
+
+ return 0;
+
+ err_spi_register:
+#if SPI_PSM_CONTROL
+ wake_lock_destroy(&zx29spi->psm_lock);
+#endif
+ if (platform_info->enable_dma)
+ zx29_dma_remove(zx29spi);
+
+ err_no_irq:
+ clk_disable(zx29spi->spi_clk);
+// err_no_clk_en:
+ //clk_unprepare(pl022->clk);
+ //err_clk_prep:
+ clk_put(zx29spi->spi_clk);
+ err_no_clk:
+// iounmap(zx29spi->virtbase);
+ err_gpios:
+ /* add */
+ err_no_ioremap:
+ err_no_registers:
+ spi_master_put(master);
+ err_no_master:
+ err_no_pdata:
+ return status;
+}
+
+
+
+static int zx29_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ const struct zx29_ssp_device_of_data *data;
+ enum zx29_ssp_device_mode mode;
+ int status = -EINVAL;
+ match = of_match_device(zx29_spi_of_match, dev);
+ if (!match)
+ return -EINVAL;
+ data = (struct zx29_ssp_device_of_data *)match->data;
+ mode = (enum zx29_ssp_device_mode)data->mode;
+
+ dev_info(&pdev->dev,"%s mode \n", (mode==0)?"MASTER":(mode==1)?"SLAVE":"UNKNOWN");
+ if(mode == ZX29_SSP_MASTER_TYPE)
+ status = zx29_spi_probe_of_master(pdev);
+ if(mode == ZX29_SSP_SLAVE_TYPE)
+ status = zx29_spi_probe_of_slave(pdev);
+
+ return status;
+}
+
+static int __exit zx29_spi_remove(struct platform_device *pdev)
+{
+ struct zx29_spi *zx29spi = dev_get_drvdata(&pdev->dev);
+ struct resource * gpio = NULL;
+ //struct resource * irq = NULL;
+ int i;
+
+ if (!zx29spi)
+ return 0;
+
+ /*
+ * undo pm_runtime_put() in probe. I assume that we're not
+ * accessing the primecell here.
+ */
+ pm_runtime_get_noresume(&pdev->dev);
+
+ spi_unregister_master(zx29spi->master);
+
+ load_spi_default_config(zx29spi);
+ if (zx29spi->master_info->enable_dma)
+ zx29_dma_remove(zx29spi);
+/*
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if( irq != NULL )
+ {
+ free_irq(irq->start, zx29spi);
+ }
+*/
+ devm_free_irq(&pdev->dev, zx29spi->irq, zx29spi);
+
+ clk_disable(zx29spi->spi_clk);
+ clk_put(zx29spi->spi_clk);
+
+ clk_disable(zx29spi->pclk);
+ clk_put(zx29spi->pclk);
+
+ #if defined(CONFIG_DEBUG_FS)
+ if(zx29spi->spi_root){
+ printk(KERN_INFO "spi:debugfs_remove_recursive \n");
+ debugfs_remove_recursive(zx29spi->spi_root);
+ }
+ #endif
+
+
+ // iounmap(zx29spi->virtbase);
+ //amba_release_regions(adev);
+ //tasklet_disable(&zx29spi->pump_transfers);
+
+ spi_master_put(zx29spi->master);
+ //amba_set_drvdata(adev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+#if SPI_PSM_CONTROL
+ wake_lock_destroy(&zx29spi->psm_lock);
+#endif
+
+ return 0;
+}
+
+static const struct zx29_ssp_device_of_data zx29_ssp_master_of_data = {
+ .mode = ZX29_SSP_MASTER_TYPE,
+};
+
+static const struct zx29_ssp_device_of_data zx29_ssp_slave_of_data = {
+ .mode = ZX29_SSP_SLAVE_TYPE,
+};
+
+static const struct of_device_id zx29_spi_of_match[] = {
+ {
+ .compatible = "zte,zx29_ssp",
+ .data = &zx29_ssp_master_of_data,
+ },
+ {
+ .compatible = "zte,zx29_ssp_slave",
+ .data = &zx29_ssp_slave_of_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zx29_spi_of_match);
+
+#ifdef CONFIG_PM
+static int zx29_spi_suspend(struct platform_device *pdev,pm_message_t state)
+{
+ struct zx29_spi *zx29spi = NULL;
+
+ if(pdev == NULL)
+ return -1;
+ if(pdev && &pdev->dev)
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
+ if(zx29spi && (zx29spi->master->slave == true)) {
+ if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_sleep) < 0) {
+ printk("spi%d setting cs_gpio pin ctrl failed\n",pdev->id);
+ return -1;
+ }
+ }
+ if(zx29spi&&zx29spi->master->slave == false) {
+ if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_sleep) < 0) {
+ printk("spi%d setting cs_gpio_sleep pin ctrl failed\n",pdev->id);
+ return -1;
+ }
+ printk("spi%d setting cs_gpio_sleep pin ctrl\n",pdev->id);
+ }
+ return 0;
+}
+
+static int zx29_spi_resume(struct platform_device *pdev)
+{
+ struct zx29_spi *zx29spi = NULL;
+
+ if(pdev == NULL)
+ return -1;
+ if(pdev && &pdev->dev)
+ pinctrl_pm_select_default_state(&pdev->dev);
+
+ zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
+ if(zx29spi && (zx29spi->master->slave == true)) {
+ if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_func) < 0) {
+ printk("spi%d setting cs_func pin ctrl failed\n",pdev->id);
+ return -1;
+ }
+ }
+ if(zx29spi&&zx29spi->master->slave == false) {
+ if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_active) < 0) {
+ printk("spi%d setting cs_gpio_active pin ctrl failed\n",pdev->id);
+ return -1;
+ }
+ printk("spi%d setting cs_gpio_active pin ctrl\n",pdev->id);
+ gpio_direction_output(ssp_pins[pdev->id].gpio_cs,SPI_GPIO_HIGH);
+ }
+ return 0;
+}
+#endif
+
+static struct platform_driver zx29_spi_driver = {
+ .driver = {
+ .name = "zx29_ssp",
+ .of_match_table = of_match_ptr(zx29_spi_of_match),
+ .owner = THIS_MODULE,
+ },
+ .probe = zx29_spi_probe,
+ #ifdef CONFIG_PM
+ .suspend = zx29_spi_suspend,
+ .resume = zx29_spi_resume,
+ #endif
+ .remove = __exit_p(zx29_spi_remove),
+};
+
+static int __init zx29_spi_init(void)
+{
+ return platform_driver_register(&zx29_spi_driver);
+}
+
+static void __exit zx29_spi_exit(void)
+{
+ platform_driver_unregister(&zx29_spi_driver);
+}
+
+module_init(zx29_spi_init);
+module_exit(zx29_spi_exit);
+
+MODULE_DESCRIPTION("zx29 spi controller driver");
+MODULE_AUTHOR("zte");
+MODULE_LICENSE("GPL");
+
diff --git a/upstream/linux-5.10/drivers/spi/spidev.c b/upstream/linux-5.10/drivers/spi/spidev.c
new file mode 100755
index 0000000..1522a21
--- /dev/null
+++ b/upstream/linux-5.10/drivers/spi/spidev.c
@@ -0,0 +1,2406 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Simple synchronous userspace interface to SPI devices
+ *
+ * Copyright (C) 2006 SWAPP
+ * Andrea Paterniani <a.paterniani@swapp-eng.it>
+ * Copyright (C) 2007 David Brownell (simplification, cleanup)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/acpi.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spidev.h>
+
+#include <linux/uaccess.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start*/
+#include <linux/wait.h>
+#include <linux/suspend.h>
+
+#define SPI_SLAVE_FOR_YK
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+/*
+ * This supports access to SPI devices using normal userspace I/O calls.
+ * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
+ * and often mask message boundaries, full SPI support requires full duplex
+ * transfers. There are several kinds of internal message boundaries to
+ * handle chipselect management and other protocol options.
+ *
+ * SPI has a character major number assigned. We allocate minor numbers
+ * dynamically using a bitmask. You must use hotplug tools, such as udev
+ * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
+ * nodes, since there is no fixed association of minor numbers with any
+ * particular SPI bus or device.
+ */
+#define SPIDEV_MAJOR 153 /* assigned */
+#define N_SPI_MINORS 32 /* ... up to 256 */
+
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
+
+
+/* Bit masks for spi_device.mode management. Note that incorrect
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
+ *
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
+ */
+#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
+ | SPI_RX_QUAD | SPI_RX_OCTAL)
+
+struct spidev_data {
+ dev_t devt;
+ spinlock_t spi_lock;
+ struct spi_device *spi;
+ struct list_head device_entry;
+
+ /* TX/RX buffers are NULL unless this device is open (users > 0) */
+ struct mutex buf_lock;
+ unsigned users;
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ u32 speed_hz;
+ u8 rd_from_rx_buffer;
+
+//#define SPIDEV_DEBUG
+#ifdef SPIDEV_DEBUG
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pgpioex;
+ struct pinctrl_state *pint_ex;
+ int gpio_ex;
+ int gpio_int;
+ int irq;
+ int tx_flag;
+ int rx_cnt_in_rx_thread;
+ int rx_cnt_in_tx_thread;
+ struct semaphore wait_req;
+ struct semaphore rec_req;
+ struct semaphore rec_head_msg_req;
+ struct semaphore rec_data_msg_req;
+ spinlock_t tx_flag_lock;
+ int msg_id;
+ bool is_data_check;
+ int rx_data_check_ok_cnt;
+ int rx_data_check_err_cnt;
+#endif
+//#define TEST_SWAP_KERNEL_AND_USER
+#ifdef TEST_SWAP_KERNEL_AND_USER
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pgpioex;
+ struct pinctrl_state *pint_ex;
+ struct semaphore sig_req;
+ struct semaphore sem_dma_cfg_done;
+ int gpio_ex;
+ int gpio_int;
+ int irq;
+ int pid;
+ int dma_cfg_done;
+#endif
+};
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_lock);
+
+static unsigned bufsiz = 4096;
+module_param(bufsiz, uint, S_IRUGO);
+MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t
+spidev_sync(struct spidev_data *spidev, struct spi_message *message)
+{
+ int status;
+ struct spi_device *spi;
+
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spidev->spi;
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ status = -ESHUTDOWN;
+ else
+ status = spi_sync(spi, message);
+
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static inline ssize_t
+spidev_sync_write(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = spidev->tx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+static inline ssize_t
+spidev_sync_read(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = spidev->rx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+
+static inline ssize_t
+spidev_sync_write_and_read(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = spidev->tx_buffer,
+ .rx_buf = spidev->rx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start*/
+/* Read-only message with current device setup */
+static ssize_t
+spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct spidev_data *spidev;
+ ssize_t status;
+ unsigned long missing;
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ spidev = filp->private_data;
+
+
+
+ #ifdef SPI_SLAVE_FOR_YK
+ size_t total = 0;
+
+ if (spidev->spi->rd_pos == spidev->spi->recv_pos) {
+
+ status = 0;
+ spidev->spi->is_rd_waiting = true;
+ if(0 != wait_event_freezable(spidev->spi->rd_wait, spidev->spi->recv_done)) {
+ if(spidev->spi->controller->spi_slave_rd_stop)
+ spidev->spi->controller->spi_slave_rd_stop(spidev->spi);
+ spidev->spi->is_rd_waiting = false;
+ return status;
+ }else {
+ spidev->spi->recv_done = false;
+ spidev->spi->is_rd_waiting = false;
+ }
+ }
+ mutex_lock(&spidev->buf_lock);
+ if(spidev->spi->rd_pos < spidev->spi->recv_pos) {
+
+ total = spidev->spi->recv_pos - spidev->spi->rd_pos;
+ status = (total > count) ? count : total;
+
+ missing = copy_to_user(buf, spidev->rx_buffer+spidev->spi->rd_pos, status);
+ if (missing == status) {
+ status = -EFAULT;
+ }
+ else {
+ status = status - missing;
+ spidev->spi->rd_pos += status;
+ }
+
+ }else if(spidev->spi->rd_pos > spidev->spi->recv_pos) {
+
+ total = bufsiz - (spidev->spi->rd_pos - spidev->spi->recv_pos);
+ status = (total > count) ? count : total;
+
+ if((spidev->spi->rd_pos + status) <= bufsiz) {
+
+ missing = copy_to_user(buf, spidev->rx_buffer+spidev->spi->rd_pos, status);
+ if (missing == status) {
+ status = -EFAULT;
+ }
+ else {
+ status = status - missing;
+ spidev->spi->rd_pos += status;
+ spidev->spi->rd_pos = spidev->spi->rd_pos%bufsiz;
+ }
+ }else {
+
+ unsigned long first,rest;
+
+ first = bufsiz - spidev->spi->rd_pos;
+ missing = copy_to_user(buf, spidev->rx_buffer+spidev->spi->rd_pos, first);
+ if (missing == first) {
+ status = -EFAULT;
+ } else {
+ status = status - missing;
+ }
+
+ rest = status-first;
+ missing = copy_to_user(buf+first, spidev->rx_buffer, rest);
+ if (missing == rest) {
+ status = -EFAULT;
+ } else {
+ status = status - missing;
+ }
+ spidev->spi->rd_pos = rest;
+ }
+ }
+ #else
+ mutex_lock(&spidev->buf_lock);
+ if(spidev->rd_from_rx_buffer)
+ status = count;
+ else
+ status = spidev_sync_read(spidev, count);
+
+ if (status > 0) {
+
+ missing = copy_to_user(buf, spidev->rx_buffer, status);
+ if (missing == status)
+ status = -EFAULT;
+ else
+ status = status - missing;
+ }
+ #endif
+ mutex_unlock(&spidev->buf_lock);
+
+ return status;
+}
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end*/
+
+/* Write-only message with current device setup */
+static ssize_t
+spidev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct spidev_data *spidev;
+ ssize_t status;
+ unsigned long missing;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ spidev = filp->private_data;
+
+ mutex_lock(&spidev->buf_lock);
+ missing = copy_from_user(spidev->tx_buffer, buf, count);
+ if (missing == 0) {
+ if(spidev->rd_from_rx_buffer)
+ status = spidev_sync_write_and_read(spidev, count);
+ else
+ status = spidev_sync_write(spidev, count);
+ }else {
+ status = -EFAULT;
+ }
+ mutex_unlock(&spidev->buf_lock);
+
+ return status;
+}
+
+static int spidev_message(struct spidev_data *spidev,
+ struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
+{
+ struct spi_message msg;
+ struct spi_transfer *k_xfers;
+ struct spi_transfer *k_tmp;
+ struct spi_ioc_transfer *u_tmp;
+ unsigned n, total, tx_total, rx_total;
+ u8 *tx_buf, *rx_buf;
+ int status = -EFAULT;
+
+ spi_message_init(&msg);
+ k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
+ if (k_xfers == NULL)
+ return -ENOMEM;
+
+ /* Construct spi_message, copying any tx data to bounce buffer.
+ * We walk the array of user-provided transfers, using each one
+ * to initialize a kernel version of the same transfer.
+ */
+ tx_buf = spidev->tx_buffer;
+ rx_buf = spidev->rx_buffer;
+ total = 0;
+ tx_total = 0;
+ rx_total = 0;
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
+ /* Ensure that also following allocations from rx_buf/tx_buf will meet
+ * DMA alignment requirements.
+ */
+ unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
+
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+ /* Since the function returns the total length of transfers
+ * on success, restrict the total to positive int values to
+ * avoid the return value looking like an error. Also check
+ * each transfer length to avoid arithmetic overflow.
+ */
+ if (total > INT_MAX || k_tmp->len > INT_MAX) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
+ if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+ rx_total += len_aligned;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->rx_buf = rx_buf;
+ rx_buf += len_aligned;
+ }
+ if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+ tx_total += len_aligned;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->tx_buf = tx_buf;
+ if (copy_from_user(tx_buf, (const u8 __user *)
+ (uintptr_t) u_tmp->tx_buf,
+ u_tmp->len))
+ goto done;
+ tx_buf += len_aligned;
+ }
+
+ k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
+ k_tmp->bits_per_word = u_tmp->bits_per_word;
+ k_tmp->delay.value = u_tmp->delay_usecs;
+ k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
+ k_tmp->speed_hz = u_tmp->speed_hz;
+ k_tmp->word_delay.value = u_tmp->word_delay_usecs;
+ k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
+ if (!k_tmp->speed_hz)
+ k_tmp->speed_hz = spidev->speed_hz;
+#ifdef VERBOSE
+ dev_dbg(&spidev->spi->dev,
+ " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
+ k_tmp->len,
+ k_tmp->rx_buf ? "rx " : "",
+ k_tmp->tx_buf ? "tx " : "",
+ k_tmp->cs_change ? "cs " : "",
+ k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
+ k_tmp->delay.value,
+ k_tmp->word_delay.value,
+ k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
+#endif
+ spi_message_add_tail(k_tmp, &msg);
+ }
+
+ status = spidev_sync(spidev, &msg);
+ if (status < 0)
+ goto done;
+
+ /* copy any rx data out of bounce buffer */
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
+ if (u_tmp->rx_buf) {
+ if (copy_to_user((u8 __user *)
+ (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
+ u_tmp->len)) {
+ status = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ status = total;
+
+done:
+ kfree(k_xfers);
+ return status;
+}
+
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+ unsigned *n_ioc)
+{
+ u32 tmp;
+
+ /* Check type, command number and direction */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+ || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+ || _IOC_DIR(cmd) != _IOC_WRITE)
+ return ERR_PTR(-ENOTTY);
+
+ tmp = _IOC_SIZE(cmd);
+ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+ return ERR_PTR(-EINVAL);
+ *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+ if (*n_ioc == 0)
+ return NULL;
+
+ /* copy into scratch area */
+ return memdup_user(u_ioc, tmp);
+}
+
+static long
+spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ u32 tmp;
+ unsigned n_ioc;
+ struct spi_ioc_transfer *ioc;
+
+ /* Check type and command number */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* use the buffer lock here for triple duty:
+ * - prevent I/O (from us) so calling spi_setup() is safe;
+ * - prevent concurrent SPI_IOC_WR_* from morphing
+ * data fields while SPI_IOC_RD_* reads them;
+ * - SPI_IOC_MESSAGE needs the buffer locked "normally".
+ */
+ mutex_lock(&spidev->buf_lock);
+
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+ retval = put_user(spi->mode & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MODE32:
+ retval = put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_BITS_PER_WORD:
+ retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MAX_SPEED_HZ:
+ retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_RD_DATA_FROM:
+ retval = put_user(spidev->rd_from_rx_buffer, (__u32 __user *)arg);
+ break;
+
+#ifdef TEST_SWAP_KERNEL_AND_USER
+ case SPI_IOC_RD_INT_ST:
+ tmp = gpio_get_value(spidev->gpio_int);
+ retval = put_user(tmp, (__u32 __user *)arg);
+ break;
+#endif
+ /* write requests */
+ case SPI_IOC_WR_MODE:
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = get_user(tmp, (u8 __user *)arg);
+ else
+ retval = get_user(tmp, (u32 __user *)arg);
+ if (retval == 0) {
+ struct spi_controller *ctlr = spi->controller;
+ u32 save = spi->mode;
+
+ if (tmp & ~SPI_MODE_MASK) {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp |= SPI_CS_HIGH;
+
+ tmp |= spi->mode & ~SPI_MODE_MASK;
+ spi->mode = (u16)tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_LSB_FIRST:
+ retval = get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp)
+ spi->mode |= SPI_LSB_FIRST;
+ else
+ spi->mode &= ~SPI_LSB_FIRST;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "%csb first\n",
+ tmp ? 'l' : 'm');
+ }
+ break;
+ case SPI_IOC_WR_BITS_PER_WORD:
+ retval = get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u8 save = spi->bits_per_word;
+
+ spi->bits_per_word = tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->bits_per_word = save;
+ else
+ dev_dbg(&spi->dev, "%d bits per word\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_MAX_SPEED_HZ:
+ retval = get_user(tmp, (__u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval == 0) {
+ spidev->speed_hz = tmp;
+ dev_dbg(&spi->dev, "%d Hz (max)\n",
+ spidev->speed_hz);
+ } else {
+ spi->max_speed_hz = save;
+ }
+ }
+ break;
+ case SPI_IOC_WR_RD_DATA_FROM:
+ retval = get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ spidev->rd_from_rx_buffer = tmp;
+ dev_dbg(&spi->dev, "RD DATA FROM %s \n",
+ spidev->rd_from_rx_buffer ? "RX_BUFFER":"DEVICE");
+ }
+ break;
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+#ifdef SPI_SLAVE_FOR_YK
+ case SPI_IOC_RD_BLOCK_RELEASE:
+ if(spidev->spi->is_rd_waiting == true) {
+ wake_up(&spidev->spi->rd_wait);
+ spidev->spi->recv_done = 1;
+ }
+ break;
+#endif
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+#ifdef TEST_SWAP_KERNEL_AND_USER
+ case SPI_IOC_WR_SIG_PID:
+ retval = get_user(tmp, (__u32 __user *)arg);
+ if (retval == 0) {
+ spidev->pid = tmp;
+ dev_dbg(&spi->dev, "SET SIG PID %d \n",
+ spidev->pid);
+ }else{
+ printk("%s %d %d \r\n",__FUNCTION__,__LINE__,retval);
+ }
+
+ break;
+#endif
+ default:
+ /* segmented and/or full-duplex I/O request */
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd,
+ (struct spi_ioc_transfer __user *)arg, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ break;
+ }
+ if (!ioc)
+ break; /* n_ioc is also 0 */
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+ break;
+ }
+
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct spi_ioc_transfer __user *u_ioc;
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ unsigned n_ioc, n;
+ struct spi_ioc_transfer *ioc;
+
+ u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+ mutex_lock(&spidev->buf_lock);
+
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ goto done;
+ }
+ if (!ioc)
+ goto done; /* n_ioc is also 0 */
+
+ /* Convert buffer pointers */
+ for (n = 0; n < n_ioc; n++) {
+ ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+ ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+ }
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+
+done:
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+static long
+spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+ && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+ && _IOC_DIR(cmd) == _IOC_WRITE)
+ return spidev_compat_ioc_message(filp, cmd, arg);
+
+ return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define spidev_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+static int spidev_open(struct inode *inode, struct file *filp)
+{
+ struct spidev_data *spidev;
+ int status = -ENXIO;
+ struct spi_device *spi;
+
+ mutex_lock(&device_list_lock);
+
+ list_for_each_entry(spidev, &device_list, device_entry) {
+ if (spidev->devt == inode->i_rdev) {
+ status = 0;
+ break;
+ }
+ }
+
+ if (status) {
+ pr_debug("spidev: nothing for minor %d\n", iminor(inode));
+ goto err_find_dev;
+ }
+
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+ }
+
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+ #ifdef SPI_SLAVE_FOR_YK
+ if(spidev->rx_buffer) {
+ spidev->spi->rx_buf = spidev->rx_buffer;
+ if(spidev->spi->controller->spi_slave_rd_start)
+ spidev->spi->controller->spi_slave_rd_start(spidev->spi);
+ }
+ #endif
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+ spidev->users++;
+ filp->private_data = spidev;
+ stream_open(inode, filp);
+
+ mutex_unlock(&device_list_lock);
+
+
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+ if(spi && spi->master->slave)
+ pm_stay_awake(&spi->dev);
+
+ return 0;
+
+err_alloc_rx_buf:
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+err_find_dev:
+ mutex_unlock(&device_list_lock);
+ return status;
+}
+
+static int spidev_release(struct inode *inode, struct file *filp)
+{
+ struct spidev_data *spidev;
+ int dofree;
+ struct spi_device *spi;
+
+ mutex_lock(&device_list_lock);
+ spidev = filp->private_data;
+ filp->private_data = NULL;
+
+ spin_lock_irq(&spidev->spi_lock);
+ /* ... after we unbound from the underlying device? */
+ dofree = (spidev->spi == NULL);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ /* last close? */
+ spidev->users--;
+ if (!spidev->users) {
+
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+ #ifdef SPI_SLAVE_FOR_YK
+ if(spidev->rx_buffer) {
+ if(spi->controller->spi_slave_rd_stop)
+ spi->controller->spi_slave_rd_stop(spi);
+ }
+ #endif
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+ if(spi && spi->master->slave)
+ pm_relax(&spi->dev);
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+
+ if (dofree)
+ kfree(spidev);
+ else
+ spidev->speed_hz = spidev->spi->max_speed_hz;
+ }
+#ifdef CONFIG_SPI_SLAVE
+ if (!dofree)
+ spi_slave_abort(spidev->spi);
+#endif
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static const struct file_operations spidev_fops = {
+ .owner = THIS_MODULE,
+ /* REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+ * too, except for the locking.
+ */
+ .write = spidev_write,
+ .read = spidev_read,
+ .unlocked_ioctl = spidev_ioctl,
+ .compat_ioctl = spidev_compat_ioctl,
+ .open = spidev_open,
+ .release = spidev_release,
+ .llseek = no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* The main reason to have this class is to make mdev/udev create the
+ * /dev/spidevB.C character device nodes exposing our userspace API.
+ * It also simplifies memory management.
+ */
+
+static struct class *spidev_class;
+
+#ifdef CONFIG_OF
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv" },
+ { .compatible = "lineartechnology,ltc2488" },
+ { .compatible = "ge,achc" },
+ { .compatible = "semtech,sx1301" },
+ { .compatible = "lwn,bk4" },
+ { .compatible = "dh,dhcom-board" },
+ { .compatible = "menlo,m53cpld" },
+ { .compatible = "zte,spidev" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+
+/* Dummy SPI devices not to be used in production systems */
+#define SPIDEV_ACPI_DUMMY 1
+
+static const struct acpi_device_id spidev_acpi_ids[] = {
+ /*
+ * The ACPI SPT000* devices are only meant for development and
+ * testing. Systems used in production should have a proper ACPI
+ * description of the connected peripheral and they should also use
+ * a proper driver instead of poking directly to the SPI bus.
+ */
+ { "SPT0001", SPIDEV_ACPI_DUMMY },
+ { "SPT0002", SPIDEV_ACPI_DUMMY },
+ { "SPT0003", SPIDEV_ACPI_DUMMY },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
+
+static void spidev_probe_acpi(struct spi_device *spi)
+{
+ const struct acpi_device_id *id;
+
+ if (!has_acpi_companion(&spi->dev))
+ return;
+
+ id = acpi_match_device(spidev_acpi_ids, &spi->dev);
+ if (WARN_ON(!id))
+ return;
+
+ if (id->driver_data == SPIDEV_ACPI_DUMMY)
+ dev_warn(&spi->dev, "do not use this driver in production systems!\n");
+}
+#else
+static inline void spidev_probe_acpi(struct spi_device *spi) {}
+#endif
+
+#ifdef SPIDEV_DEBUG
+#define SPIDEV_ATTR(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+
+static void print_buf_data(void * buf,int count)
+{
+ int i = 0;
+ if(buf) {
+ unsigned char *p = buf;
+ for(i = 0;i<= count-8;i+=8) {
+ printk("%02x %02x %02x %02x %02x %02x %02x %02x \r\n",p[i],p[i+1],p[i+2],p[i+3],p[i+4],p[i+5],p[i+6],p[i+7]);
+ }
+ }
+}
+
+
+struct spi_dev_hand_msg{
+ unsigned short head;
+ unsigned int len;
+ unsigned short tail;
+};
+
+#define MSG_HEAD 0xa5a5
+#define MSG_TAIL 0x7e7e
+
+extern void slave_mode_set(struct spi_device *spi,unsigned int param);
+extern void set_spi_timing(struct spi_device *spi,unsigned int param);
+extern int get_spi_rx_fifo(struct spi_device *spi,unsigned char *buf);
+static int spidev_get_rxfifo(struct spi_device *spi,unsigned char *buf)
+{
+ int ret = 0;
+
+ if(!spi || !buf)
+ return ret;
+ return get_spi_rx_fifo(spi,buf);
+
+}
+
+
+static int data_to_packet(void * buf,int len)
+{
+ int i = 2,ret = -1;
+ unsigned char sum = 0;
+ unsigned char *p = (unsigned char *)buf;
+
+ if(!p || len < 4) {
+ printk("%s param err! \n",__FUNCTION__);
+ return ret;
+ }
+ for(i = 2;i<len-2;i++)
+ sum += p[i];
+ p[1] = sum;
+ ret = 0;
+ return ret;
+}
+
+static int packet_check(void *buf,int len)
+{
+ unsigned char *p = (unsigned char *)buf;
+ int i = 2,ret = -1;
+ unsigned char sum=0;
+ if(!p || len < 4) {
+ printk("%s param err! \n",__FUNCTION__);
+ return ret;
+ }
+ if( (p[0] == 0xa5) && (p[len-1] == 0x7e) ) {
+ for(i = 2;i<len-2;i++)
+ sum +=p[i];
+ if(sum == p[1])
+ ret = 0;
+ }
+ return ret;
+
+}
+
+
+static int spi_dev_pin_init_test(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ enum of_gpio_flags flags;
+ static int spi_dev_pin_init_flag = 0;
+ int status = 0;
+
+ if(spi_dev_pin_init_flag < 2){
+ spidev->pctrl = devm_pinctrl_get(&spi->dev);
+ if(!spidev->pctrl) {
+ dev_info(&spi->dev,"get dev pctrl failed!\n",status);
+ return status;
+ }
+
+ spidev->pint_ex = pinctrl_lookup_state(spidev->pctrl, "int_ex");
+ if (IS_ERR(spidev->pint_ex)) {
+ dev_err(&spi->dev, "TEST: missing pint_ex \n");
+ return status;
+ }
+ if (pinctrl_select_state(spidev->pctrl, spidev->pint_ex) < 0) {
+ dev_err(&spi->dev, "TEST: slect pint_ex \n");
+ return status;
+ }
+
+ spidev->pgpioex = pinctrl_lookup_state(spidev->pctrl, "ex_gpio");
+ if (IS_ERR(spidev->pgpioex)) {
+ dev_err(&spi->dev, "TEST: missing ex_gpio \n");
+ return status;
+ }
+
+ spidev->gpio_ex = of_get_gpio_flags(spi->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(spidev->gpio_ex)) {
+ dev_err(&spi->dev,"gpio_ex no found,spidev->gpio_ex=%d \n",spidev->gpio_ex);
+ return status;
+ }
+ dev_info(&spi->dev,"gpio_ex found,spidev->gpio_ex=%d \n",spidev->gpio_ex);
+
+ status = gpio_request(spidev->gpio_ex, "gpio_ex");
+ if (status) {
+ pr_info("spidev->gpio_ex request error.\n");
+ }else {
+ gpio_direction_output(spidev->gpio_ex, 1);
+ dev_info(&spi->dev, "spidev->gpio_ex success \n");
+ }
+
+ spidev->gpio_int = of_get_gpio_flags(spi->dev.of_node, 1, &flags);
+ if (!gpio_is_valid(spidev->gpio_int)) {
+ dev_err(&spi->dev,"gpio_int no found,spidev->gpio_int=%d \n",spidev->gpio_int);
+ return status;
+ }
+ dev_info(&spi->dev,"gpio_int found,spidev->gpio_int=%d \n",spidev->gpio_int);
+
+ spi_dev_pin_init_flag += 1;
+ }
+ return status;
+
+}
+
+static irqreturn_t spidev_master_hand_shake_irq(int irqno, void *dev_id)
+{
+ static int count;
+ int gpio_in_status = 0,gpio_out_status = 0;
+
+ struct spidev_data *spidev = dev_id;
+
+ gpio_out_status = gpio_get_value(spidev->gpio_ex);
+ gpio_in_status = gpio_get_value(spidev->gpio_int);
+
+ //pr_info("hand_shake_irq get = %d %d %d\n", ++count,gpio_out_status,gpio_in_status);
+
+ if(gpio_out_status && !gpio_in_status) {
+ if(spidev->tx_flag == 0) {
+ up(&spidev->rec_req); /*receive slave reqeuet*/
+ }else {
+ pr_info("mmm \r\n");
+ up(&spidev->wait_req); /*first receive master req*/
+ }
+ }else if(!gpio_out_status && !gpio_in_status) {
+ up(&spidev->wait_req); /*receive slave ack*/
+ }else {
+ pr_info("recive invalid request\n");
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t spidev_slave_hand_shake_irq(int irqno, void *dev_id)
+{
+ static int count;
+ int gpio_in_status = 0,gpio_out_status = 0;
+
+ struct spidev_data *spidev = dev_id;
+
+ gpio_out_status = gpio_get_value(spidev->gpio_ex);
+ gpio_in_status = gpio_get_value(spidev->gpio_int);
+
+ //pr_info("hand_shake_irq get = %d %d %d\n", ++count,gpio_out_status,gpio_in_status);
+
+ if(gpio_out_status && !gpio_in_status)
+ {
+ if(spidev->tx_flag == 0) {
+ up(&spidev->rec_req); /*first receive master req*/
+ }else {
+ pr_info("sss \n");
+ up(&spidev->wait_req);
+ }
+ /*. then set gpio_out low as ack. */
+ }else if(!gpio_out_status && !gpio_in_status) {
+ up(&spidev->wait_req); /*receive master ack*/
+ }else {
+ pr_info("recive invalid request\n");
+ }
+ return IRQ_HANDLED;
+}
+
+
+
+static int spi_dev_irq_init_test(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ static int spi_dev_irq_init_flag = 0;
+ int irq = 0,ret = 0;
+
+ if(spi_dev_irq_init_flag < 2) {
+ if(!spi || !spidev) {
+ ret = -ENOENT;
+ return ret;
+ }
+ irq = irq_of_parse_and_map(spi->dev.of_node, 0);
+ if (irq <= 0) {
+ dev_err(&spi->dev, "ERROR: invalid interrupt number, irq = %d\n",irq);
+ return -EBUSY;
+ }
+ spidev->irq = irq;
+ dev_info(&spi->dev, "used interrupt num is %d\n", spidev->irq);
+ if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+ ret = devm_request_irq(&spi->dev, spidev->irq, spidev_master_hand_shake_irq,
+ 0, dev_name(&spi->dev), spidev);
+ }else {
+ ret = devm_request_irq(&spi->dev, spidev->irq, spidev_slave_hand_shake_irq,
+ 0, dev_name(&spi->dev), spidev);
+ }
+ if (ret < 0) {
+ dev_err(&spi->dev, "probe - cannot get IRQ (%d)\n", ret);
+ return ret;
+ }
+ spi_dev_irq_init_flag += 1;
+ }
+ return ret;
+
+}
+
+static size_t spi_dev_send_handle_pack_test(struct spidev_data *spidev,int len,struct spi_dev_hand_msg *recv_msg)
+{
+ struct spi_dev_hand_msg send_msg={0};
+
+ send_msg.head = MSG_HEAD;
+ send_msg.len = len;
+ send_msg.tail = MSG_TAIL;
+
+ struct spi_transfer t = {
+ .tx_buf = &send_msg,
+ .rx_buf = recv_msg,
+ .len = sizeof(struct spi_dev_hand_msg),
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+
+
+}
+
+
+
+static size_t spi_dev_recv_handle_pack_test(struct spidev_data *spidev,int len,struct spi_dev_hand_msg *recv_msg)
+{
+ struct spi_dev_hand_msg send_msg={0};
+
+ struct spi_transfer t = {
+ .tx_buf = &send_msg,
+ .rx_buf = recv_msg,
+ .len = sizeof(struct spi_dev_hand_msg),
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+
+
+}
+
+
+static void wait_spi_bus_idle_status_test(struct spidev_data *spidev)
+{
+ int count = 0;
+
+ do {
+ spin_lock_irq(&spidev->tx_flag_lock);
+ if( gpio_get_value(spidev->gpio_ex) && gpio_get_value(spidev->gpio_int))
+ break;
+ else {
+ spin_unlock(&spidev->tx_flag_lock);
+ usleep_range(50,100);
+ count++;
+ if(count%20 == 0) {
+ printk("bus busy %d us cnts.outst(%d),intst(%d).\n",count*50,
+ gpio_get_value(spidev->gpio_ex),gpio_get_value(spidev->gpio_int));
+ }
+
+ }
+ }while(1);
+ spidev->tx_flag = 1;
+ spin_unlock_irq(&spidev->tx_flag_lock);
+
+}
+
+static size_t spi_dev_send_one_pack_test(struct spi_device *spi,size_t len) {
+
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ struct spi_dev_hand_msg recv_msg={0};
+ size_t status;
+ int ret;
+ int rx_data_flag = 0;
+ if(len>4096)
+ printk("len(%d) err: \r\n",len);
+ wait_spi_bus_idle_status_test(spidev);
+ gpio_set_value(spidev->gpio_ex,0);
+ ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(50)); /*first ack m= 0,s=0*/
+ if (ret < 0) {
+ printk("first ack timeout\n");
+ }
+ spi_dev_send_handle_pack_test(spidev,len,&recv_msg); /*send head msg*/
+ if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+ len = (recv_msg.len >= len) ? recv_msg.len : len;
+ spidev->rx_cnt_in_tx_thread++;
+ rx_data_flag = 1;
+ if(len>4096)
+ printk("len(%d) err: \r\n",len);
+ }
+ ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(100));
+ if (ret < 0) {
+ printk("second ack timeout\n");
+ }
+ //down(&spidev->wait_req); /*second ack m= 0,s=0*/
+ status = spidev_sync_write_and_read(spidev,len);
+ if(rx_data_flag && spidev->is_data_check) {
+ ret = packet_check(spidev->rx_buffer,recv_msg.len);
+ if(ret) {
+ spidev->rx_data_check_err_cnt++;
+ //dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+ }else {
+ spidev->rx_data_check_ok_cnt++;
+ //dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+ }
+ }
+ spidev->tx_flag = 0;
+ gpio_set_value(spidev->gpio_ex,1);
+ return status;
+}
+
+
+static size_t spi_dev_slave_send_one_pack_test(struct spi_device *spi,size_t len) {
+
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ struct spi_dev_hand_msg recv_msg={0};
+ size_t status;
+ int ret;
+ int rx_data_flag = 0;
+ if(len>4096)
+ printk("len(%d) err: \r\n",len);
+ wait_spi_bus_idle_status_test(spidev);
+ up(&spidev->rec_head_msg_req);/*response master tx/rx dma set */
+ //printk("%s %d \r\n",__FUNCTION__,__LINE__);
+ spi_dev_send_handle_pack_test(spidev,len,&recv_msg); /*send head msg*/
+ if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+ if(len != recv_msg.len) {
+ //printk("%s len=%d rec_len=%d\n",__FUNCTION__,len,recv_msg.len);
+ len = (recv_msg.len >= len) ? recv_msg.len : len;
+
+ }
+ spidev->rx_cnt_in_tx_thread++;
+ rx_data_flag = 1;
+ if(len>4096)
+ printk("len(%d) err: \r\n",len);
+ }
+
+ //down(&spidev->wait_req);
+ ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(100));
+ if (ret < 0) {
+ printk("wait req timeout\n");
+ }
+ up(&spidev->rec_data_msg_req);/*response master tx/rx dma set */
+ //printk("%s %d \r\n",__FUNCTION__,__LINE__);
+ status = spidev_sync_write_and_read(spidev,len);
+ if(rx_data_flag && spidev->is_data_check) {
+ ret = packet_check(spidev->rx_buffer,recv_msg.len);
+ if(ret) {
+ spidev->rx_data_check_err_cnt++;
+ //dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+ }else {
+ spidev->rx_data_check_ok_cnt++;
+ //dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+ }
+ }
+ spidev->tx_flag = 0;
+ gpio_set_value(spidev->gpio_ex,1);
+ return status;
+}
+
+static int spi_dev_slave_read_hand_msg_process_test(void *arg)
+{
+ struct spi_device *spi = (struct spi_device *)arg;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ while(1) {
+ down(&spidev->rec_head_msg_req);
+ //printk("%s %d \r\n",__FUNCTION__,__LINE__);
+ gpio_set_value(spidev->gpio_ex,0);
+ }
+ return 0;
+}
+
+
+static int spi_dev_slave_read_data_process_test(void *arg)
+{
+ struct spi_device *spi = (struct spi_device *)arg;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ struct spi_dev_hand_msg *recv_msg=(struct spi_dev_hand_msg *)spidev->rx_buffer;
+
+ while(1) {
+ down(&spidev->rec_data_msg_req);
+ //printk("%s %d \r\n",__FUNCTION__,__LINE__);
+ gpio_set_value(spidev->gpio_ex,1);
+ usleep_range(50,100);
+ gpio_set_value(spidev->gpio_ex,0);
+ }
+ return 0;
+}
+
+static int spi_dev_master_read_thread_test(void *arg)
+{
+ struct spi_device *spi = (struct spi_device *)arg;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ pid_t kid;
+ struct pid *pid;
+ struct task_struct * tsk;
+ struct spi_dev_hand_msg recv_msg;
+ int ret;
+ if(!spidev){
+ dev_info(&spi->dev,"spi_dev return \r\n");
+ return 0;
+ }
+ while(1) {
+
+ down(&spidev->rec_req); /*first receive slave req*/
+ spi_dev_recv_handle_pack_test(spidev, sizeof(struct spi_dev_hand_msg), &recv_msg);
+ gpio_set_value(spidev->gpio_ex,0);
+ ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(100));
+ if (ret < 0) {
+ printk("%s wait req timeout\n",__FUNCTION__);
+ }
+ if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+ int len = recv_msg.len;
+ spidev_sync_write_and_read(spidev, len); /*set dma and recv data msg*/
+ spidev->rx_cnt_in_rx_thread++;
+ if(spidev->is_data_check) {
+ ret = packet_check(spidev->rx_buffer,len);
+ if(ret) {
+ spidev->rx_data_check_err_cnt++;
+ //dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+ }else {
+ spidev->rx_data_check_ok_cnt++;
+ //dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+ }
+ }
+ gpio_set_value(spidev->gpio_ex,1);
+ //print_buf_data(spidev->rx_buffer, len);
+ }else {
+ printk("%s data invalid\n",__FUNCTION__);
+ gpio_set_value(spidev->gpio_ex,1);
+ }
+ }
+ return 0;
+}
+
+
+
+
+static int spi_dev_slave_read_thread_test(void *arg)
+{
+ struct spi_device *spi = (struct spi_device *)arg;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ int ret;
+ struct spi_dev_hand_msg recv_msg;
+
+ if(!spidev){
+ dev_info(&spi->dev,"spi_dev return \r\n");
+ return 0;
+ }
+ while(1) {
+
+ down(&spidev->rec_req); /*first receive master req*/
+ //printk("%s %d \r\n",__FUNCTION__,__LINE__);
+ up(&spidev->rec_head_msg_req);/*response master tx/rx dma set */
+ //printk("%s %d \r\n",__FUNCTION__,__LINE__);
+ //spidev_sync_write_and_read(spidev,sizeof(struct spi_dev_hand_msg)); /*set dma and recv head msg*/
+ spi_dev_recv_handle_pack_test(spidev, sizeof(struct spi_dev_hand_msg), &recv_msg);
+ //recv_msg=(struct spi_dev_hand_msg *)spidev->rx_buffer;
+ if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+ int len = recv_msg.len;
+ up(&spidev->rec_data_msg_req); /*response master tx/rx dma set */
+ //printk("%s %d %d \r\n",__FUNCTION__,__LINE__,len);
+ spidev_sync_write_and_read(spidev, len); /*set dma and recv data msg*/
+ if(spidev->is_data_check) {
+ ret = packet_check(spidev->rx_buffer,len);
+ if(ret) {
+ spidev->rx_data_check_err_cnt++;
+ //dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+ }else {
+ spidev->rx_data_check_ok_cnt++;
+ //dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+ }
+ }
+ gpio_set_value(spidev->gpio_ex,1);
+ spidev->rx_cnt_in_rx_thread++;
+ //print_buf_data(spidev->rx_buffer, len);
+ }else {
+ up(&spidev->rec_data_msg_req);
+ printk("%s data invalid\n",__FUNCTION__);
+ gpio_set_value(spidev->gpio_ex,1);
+ }
+ }
+ return 0;
+}
+
+
+static int spidev_debug_test_init(struct spi_device *spi)
+{
+ int ret = 0;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ ret =spi_dev_pin_init_test(spi);
+ if(ret) {
+ dev_info(&spi->dev, "spi_dev_pin_init_test,ret=%d \n",ret);
+ return ret;
+ }
+ spin_lock_init(&spidev->tx_flag_lock);
+ sema_init(&spidev->wait_req, 0);
+ sema_init(&spidev->rec_req, 0);
+ sema_init(&spidev->rec_head_msg_req, 0);
+ sema_init(&spidev->rec_data_msg_req, 0);
+ spidev->tx_flag = 0;
+ spidev->rx_cnt_in_rx_thread = 0;
+ spidev->rx_cnt_in_tx_thread = 0;
+ spidev->is_data_check = false;
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return ret;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ return ret;
+ }
+ }
+
+ if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+ kernel_thread(spi_dev_master_read_thread_test,spi, 0); /* fork the main thread */
+ }else {
+ kernel_thread(spi_dev_slave_read_thread_test,spi, 0); /* fork the main thread */
+ kernel_thread(spi_dev_slave_read_hand_msg_process_test,spi, 0);
+ kernel_thread(spi_dev_slave_read_data_process_test,spi, 0);
+ }
+
+ ret =spi_dev_irq_init_test(spi);
+ if(ret) {
+ dev_info(&spi->dev, "spi_dev_irq_init_test,ret=%d \n",ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+static ssize_t spidevinfo_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+
+ ssize_t count = 0;
+
+ struct device *dev = container_of(kobj, struct device, kobj);
+ //struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ unsigned char cmd_str[16] = {0};
+ u32 param1,param2,param3;
+ u8 rwaddr,rwsize;
+ int ret,i;
+
+
+ return count;
+
+
+}
+extern void get_random_bytes(void * buf, size_t len);
+static ssize_t spidevinfo_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+
+{
+ ssize_t ret =0;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ //struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct spi_device *spi = (struct spi_device *)dev;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ unsigned char cmd_str[0x20] = {0};
+ u8 bBuf[32];
+
+ u32 param1 = 0,param2 = 0,param3 = 0;
+ u32 rwaddr =0 ,rwsize = 0;
+ int i;
+ s8 rev = -1;
+ size_t count = 0;
+
+
+ dev_info(&spi->dev, "spidev->speed_hz:%d \n", spi->max_speed_hz);
+
+ sscanf(buf, "%31s %x %x %x", &cmd_str,¶m1,¶m2,¶m3);
+ dev_info(dev, "cmd_str:%s,param1:%x,param2:%x,param3:%x\n",cmd_str,param1,param2,param3);
+
+ dev_info(&spi->dev, "mode %d, %s%s%s%s%u bits/w, %u Hz max --\n",
+ (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+ (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (spi->mode & SPI_3WIRE) ? "3wire, " : "",
+ (spi->mode & SPI_LOOP) ? "loopback, " : "",
+ spi->bits_per_word, spi->max_speed_hz);
+
+ count = param1;
+ ret = strcmp(cmd_str,"spi_write");
+ if( ret == 0) {
+ count = param1;
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+ dev_info(dev, "spidev->tx_buffer=0x%x\n",spidev->tx_buffer);
+ for(i = 0;i<count;i++) {
+ spidev->tx_buffer[i]=i;
+ }
+ print_buf_data(spidev->tx_buffer,count);
+ ret = spidev_sync_write(spidev, count);
+ if(ret == count) {
+ dev_info(dev, "send len success(len:%d) \n",ret);
+ }
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ dev_info(dev, "spi write end: \n");
+ }
+
+ ret = strcmp(cmd_str,"spi_read");
+ if(ret == 0) {
+ count = param1;
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+ memset(spidev->rx_buffer,0x0,bufsiz);
+ ret = spidev_sync_read(spidev, count);
+
+ if(ret == count) {
+ dev_info(dev, "read len success(len:%d) \n",ret);
+ print_buf_data(spidev->rx_buffer,count);
+ }
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+ dev_info(dev, "spi read end: \n");
+ }
+
+ ret = strcmp(cmd_str,"write_then_read");
+ if(ret == 0) {
+ count = param1;
+
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ return n;
+ }
+ }
+ for(i = 0;i<count;i++) {
+ spidev->tx_buffer[i]=i;
+ }
+ //memset(spidev->rx_buffer,0x0,bufsiz);
+ ret = spi_write_then_read(spi, spidev->tx_buffer, count, spidev->rx_buffer, count);
+
+ if(ret == 0) {
+ dev_info(dev, "spi write data(%d bytes) \n",count);
+ print_buf_data(spidev->tx_buffer,count);
+ dev_info(dev, "spi read data(%d bytes) \n",count);
+ print_buf_data(spidev->rx_buffer,count);
+ }
+
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+ dev_info(dev, "write_then_read.\n");
+
+ }
+
+
+ ret = strcmp(cmd_str,"write_and_read");
+ if(ret == 0) {
+ count = param1;
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ return n;
+ }
+ }
+ for(i = 0;i<count;i++) {
+ spidev->tx_buffer[i]=i;
+ }
+ memset(spidev->rx_buffer,0x0,bufsiz);
+ ret = spidev_sync_write_and_read(spidev, count);
+
+ if(ret == count) {
+ dev_info(dev, "spi write data(%d bytes) \n",ret);
+ print_buf_data(spidev->tx_buffer,count);
+ dev_info(dev, "spi read data(%d bytes) \n",ret);
+ print_buf_data(spidev->rx_buffer,count);
+ dev_info(dev, "write_and_read.\n");
+ }
+#if 0
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+#endif
+ dev_info(dev, "write_and_read.\n");
+
+ }
+ ret = strcmp(cmd_str,"fifo_flush");
+ if(ret == 0) {
+ unsigned char buff[64] ={0};
+
+ ret = spidev_get_rxfifo(spi,buff);
+ dev_info(dev, "get rx_fifo_len(%d bytes) \n",ret);
+ print_buf_data(buff,ret);
+ }
+
+ ret = strcmp(cmd_str,"timing-set");
+ if(ret == 0) {
+ dev_info(dev, "timing param(%d) \n",param1);
+ set_spi_timing(spi,param1);
+ }
+
+ ret = strcmp(cmd_str,"loop-en");
+ if(ret == 0) {
+ spi->mode |= SPI_LOOP;
+ spi_setup(spi);
+ }
+ ret = strcmp(cmd_str,"loop-dis");
+ if(ret == 0) {
+ spi->mode &= ~SPI_LOOP;
+ spi_setup(spi);
+ }
+ ret = strcmp(cmd_str,"speed_set");
+ if(ret == 0) {
+ spi->max_speed_hz = param1;
+ spi_setup(spi);
+ }
+
+ ret = strcmp(cmd_str,"mode_set");
+ if(ret == 0) {
+ if(param1 != 0 && param1 != 1 && param1 != 2 && param1 != 3)
+ dev_info(dev, "param err(%d) \n",param1);
+ else
+ dev_info(dev, "set spi mode(%d) \n",param1);
+ spi->mode &= (~0x3);
+ spi->mode |= param1;
+ ret = spi_setup(spi);
+ dev_info(dev, "set spi mode(0x%x),ret=%d \n",spi->mode,ret);
+ }
+
+
+ ret = strcmp(cmd_str,"slave_mode_set");
+ if(ret == 0) {
+ if(param1 != 0 && param1 != 1 && param1 != 2 && param1 != 3)
+ dev_info(dev, "param err(%d) \n",param1);
+ else
+ dev_info(dev, "set spi mode(%d) \n",param1);
+ slave_mode_set(spi,param1);
+ }
+
+ ret = strcmp(cmd_str,"send_msg_rand_len");
+ if(ret == 0) {
+
+ count = 0;
+ int times = param1;
+ while(times--) {
+
+ get_random_bytes(&count,4);
+ count = (count%0x1000) + 1;
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ return n;
+ }
+ }
+ for(i = 0;i<count;i++) {
+ spidev->tx_buffer[i]=i;
+ }
+ //memset(spidev->rx_buffer,0x0,bufsiz);
+ if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+ ret = spi_dev_send_one_pack_test(spi, count);
+ }
+ else {
+ ret = spi_dev_slave_send_one_pack_test(spi, count);
+ }
+ if(ret == count) {
+ #if 0
+ dev_info(dev, "spi write data(%d bytes) \n",ret);
+ print_buf_data(spidev->tx_buffer,count);
+ dev_info(dev, "spi read data(%d bytes) \n",ret);
+ print_buf_data(spidev->rx_buffer,count);
+ #endif
+ dev_info(dev, "write_and_read success. retain times:%d rx_cnt_in_tx_thread:%d spidev->rx_cnt_in_rx_thread:%d \n",
+ times,spidev->rx_cnt_in_tx_thread,spidev->rx_cnt_in_rx_thread);
+
+ }
+ msleep((count%5)+1);
+ //usleep_range(5+(count%10),20);
+ }
+#if 0
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+#endif
+
+ }
+
+ ret = strcmp(cmd_str,"send_msg_fixed_len");
+ if(ret == 0) {
+ int times = param1;
+ int debug = param3;
+ count = param2;
+ if(count > 4096) {
+ printk("msg_fixed_len(%d bytes) out of range(4KB)\r\n",count);
+ return n;
+ }
+ while(times--) {
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ return n;
+ }
+ }
+
+ get_random_bytes(spidev->tx_buffer,count);
+ //memset(spidev->rx_buffer,0x0,bufsiz);
+ #if 0
+ for(i = 0;i<count;i++) {
+ spidev->tx_buffer[i]=i;
+ }
+ memset(spidev->rx_buffer,0x0,bufsiz);
+ #endif
+ if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+ ret = spi_dev_send_one_pack_test(spi, count);
+ }
+ else {
+ ret = spi_dev_slave_send_one_pack_test(spi, count);
+ }
+ if(ret == count) {
+ if(debug) {
+ dev_info(dev, "spi write data(%d bytes) \n",ret);
+ print_buf_data(spidev->tx_buffer,count);
+ }
+ dev_info(dev, "write_and_read success. retain times:%d rx_cnt_in_tx_thread:%d spidev->rx_cnt_in_rx_thread:%d \n",
+ times,spidev->rx_cnt_in_tx_thread,spidev->rx_cnt_in_rx_thread);
+ }
+ msleep((count%5)+1);
+ }
+#if 0
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+#endif
+
+ }
+
+ ret = strcmp(cmd_str,"data_check_ctrl");
+ if(ret == 0) {
+ if(param1) {
+ spidev->is_data_check = true;
+ spidev->rx_data_check_ok_cnt = 0;
+ spidev->rx_data_check_err_cnt = 0;
+ }
+ else {
+ spidev->is_data_check = false;
+ }
+ dev_info(dev, "rx_check_ok_cnt:%d rx_check_err_cnt:%d\n",spidev->rx_data_check_ok_cnt,spidev->rx_data_check_err_cnt);
+ }
+ ret = strcmp(cmd_str,"send_msg_with_check");
+ if(ret == 0) {
+ int times = param1;
+ int debug = param3;
+ count = param2;
+ if(count > 4096 || count < 4) {
+ printk("msg_fixed_len(%d bytes) out of range(4KB)\r\n",count);
+ return n;
+ }
+ while(times--) {
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ return n;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+ return n;
+ }
+ }
+
+ get_random_bytes(spidev->tx_buffer,count);
+ spidev->tx_buffer[0] = 0xa5;
+ spidev->tx_buffer[count-1] = 0x7e;
+ ret = data_to_packet(spidev->tx_buffer,count);
+ //memset(spidev->rx_buffer,0x0,bufsiz);
+ if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+ ret = spi_dev_send_one_pack_test(spi, count);
+ }
+ else {
+ ret = spi_dev_slave_send_one_pack_test(spi, count);
+ }
+ if(ret == count) {
+ if(debug) {
+ dev_info(dev, "spi write data(%d bytes) \n",ret);
+ print_buf_data(spidev->tx_buffer,count);
+ }
+ dev_info(dev, "complete.retain:%d rx_cnt_in_tx_thread:%d spidev->rx_cnt_in_rx_thread:%d rx_check_ok_cnt:%d rx_check_err_cnt:%d\n",
+ times,spidev->rx_cnt_in_tx_thread,spidev->rx_cnt_in_rx_thread,
+ spidev->rx_data_check_ok_cnt,spidev->rx_data_check_err_cnt);
+ }
+ usleep_range(5+(count%10),20);
+ }
+ }
+
+ ret = strcmp(cmd_str,"gpio_out_val");
+ if(ret == 0) {
+ if(param1)
+ gpio_set_value(spidev->gpio_ex,1);
+ else
+ gpio_set_value(spidev->gpio_ex,0);
+ }
+ ret = strcmp(cmd_str,"test_ktime_get");
+ if(ret == 0) {
+ ktime_t k_time_start = 0;
+ ktime_t k_time_end = 0;
+ ktime_t diff = 0;
+
+ k_time_start = ktime_get();
+ gpio_set_value(spidev->gpio_ex,0);
+ do {
+ diff = ktime_sub(ktime_get(),k_time_start);
+ }while(diff <= (param1*1000));
+ gpio_set_value(spidev->gpio_ex,1);
+ printk("test ktime_get: start=%lld end=%lld diff=%lld \r\n",k_time_start,ktime_get(),diff);
+ }
+ return n;
+
+}
+
+
+SPIDEV_ATTR(spidevinfo);
+
+
+static struct attribute * test_attr[] = {
+ &spidevinfo_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = test_attr,
+};
+
+static const struct attribute_group *attr_groups[] = {
+ &attr_group,
+
+ NULL,
+};
+#endif
+
+
+#ifdef TEST_SWAP_KERNEL_AND_USER
+
+/* v3e
+spi0(master)-------------------------------------spi1(slave)
+GPIO129 <----------------------------------------INT4(GPIO51)
+INT7(GPIO54) <-----------------------------------GPIO130
+*/
+
+/* v3 mdl
+4#(master) --------------------------------------5#(slave)
+GPIO130 <----------------------------------------INT6(GPIO53)
+INT7(GPIO54) <-----------------------------------GPIO131
+*/
+
+//#define TEST_SPI_SLAVE
+#ifdef TEST_SPI_SLAVE
+#define GPIO_NUM_EX 131
+#define GPIO_NUM_INT 53
+#else
+#define GPIO_NUM_EX 130
+#define GPIO_NUM_INT 54
+#endif
+
+static int spi_dev_pin_init_test(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ enum of_gpio_flags flags;
+ int status = 0;
+
+
+ spidev->pctrl = devm_pinctrl_get(&spi->dev);
+ if(!spidev->pctrl) {
+ dev_info(&spi->dev,"get dev pctrl failed!\n",status);
+ return status;
+ }
+
+ spidev->pint_ex = pinctrl_lookup_state(spidev->pctrl, "int_ex");
+ if (IS_ERR(spidev->pint_ex)) {
+ dev_err(&spi->dev, "TEST: missing pint_ex \n");
+ return status;
+ }
+
+ if (pinctrl_select_state(spidev->pctrl, spidev->pint_ex) < 0) {
+ dev_err(&spi->dev, "TEST: slect pint_ex \n");
+ return status;
+ }
+ if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+ spidev->gpio_ex = GPIO_NUM_EX;
+ spidev->gpio_int = GPIO_NUM_INT;
+ } else {
+ spidev->gpio_ex = 130;
+ spidev->gpio_int = 51;
+ }
+ return status;
+
+}
+
+static void send_signal(int sig_no,void *dev_id)
+{
+ int ret;
+ struct spidev_data *spidev = (struct spidev_data *)dev_id;
+ struct kernel_siginfo info;
+ struct task_struct * my_task = NULL;
+
+ //printk("send signal %d to pid %d \n",sig_no,spidev->pid);
+ memset(&info,0,sizeof(struct siginfo));
+ if(spidev->pid == 0) {
+ printk("send_signal pid is not valid \n");
+ return;
+ }
+
+ info.si_signo = sig_no;
+ info.si_code = gpio_get_value(spidev->gpio_int);
+ info.si_errno = gpio_get_value(spidev->gpio_ex);
+ rcu_read_lock();
+ my_task = pid_task(find_vpid(spidev->pid),PIDTYPE_PID);
+ rcu_read_unlock();
+
+ if(!my_task) {
+ printk("%s get pid_task failed! \n",__FUNCTION__);
+ return;
+ }
+ ret = send_sig_info(sig_no, &info, my_task);
+ if(ret < 0)
+ printk("send signal failed! \n");
+
+}
+
+static int spi_dev_sig_process_test(void *arg)
+{
+ struct spi_device *spi = (struct spi_device *)arg;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ while(1) {
+ down(&spidev->sig_req);
+ send_signal(SIGUSR1,spidev);
+
+ }
+ return 0;
+}
+static void send_dma_cfg_done_signal(int sig_no,void *dev_id)
+{
+ int ret;
+ struct spidev_data *spidev = (struct spidev_data *)dev_id;
+ struct kernel_siginfo info;
+ struct task_struct * my_task = NULL;
+ int dma_cfg_done = 0;
+ //printk("send signal %d to pid %d \n",sig_no,spidev->pid);
+ memset(&info,0,sizeof(struct siginfo));
+
+ if(spidev->dma_cfg_done == 1) {
+ dma_cfg_done = spidev->dma_cfg_done;
+ spidev->dma_cfg_done= 0;
+ }
+ if(spidev->pid == 0) {
+ printk("%s is not valid\n",__FUNCTION__);
+ return;
+ }
+ info.si_signo = sig_no;
+ info.si_errno = dma_cfg_done;
+ rcu_read_lock();
+ my_task = pid_task(find_vpid(spidev->pid),PIDTYPE_PID);
+ rcu_read_unlock();
+
+ if(!my_task) {
+ printk("%s get pid_task failed! \n",__FUNCTION__);
+ return;
+ }
+ ret = send_sig_info(sig_no, &info, my_task);
+ if(ret < 0)
+ printk("send signal failed! \n");
+
+}
+
+static int spi_dev_dma_cfg_done_process_test(void *arg)
+{
+ struct spi_device *spi = (struct spi_device *)arg;
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ while(1) {
+ down(&spidev->sem_dma_cfg_done);
+ send_dma_cfg_done_signal(SIGUSR2,spidev);
+ }
+ return 0;
+}
+
+static irqreturn_t spidev_hand_shake_irq(int irqno, void *dev_id)
+{
+ struct spidev_data *spidev = (struct spidev_data *)dev_id;
+
+ int gpio_out_status = gpio_get_value(spidev->gpio_ex);
+ int gpio_int_status = gpio_get_value(spidev->gpio_int);
+
+ up(&spidev->sig_req);
+ dev_dbg(&spidev->spi->dev,"out=%d int=%d \r\n",gpio_out_status,gpio_int_status);
+
+ return IRQ_HANDLED;
+}
+
+
+static int spi_dev_irq_init_test(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ int irq = 0,ret = 0;
+
+ if(!spi || !spidev) {
+ ret = -ENOENT;
+ return ret;
+ }
+
+ sema_init(&spidev->sig_req, 0);
+ sema_init(&spidev->sem_dma_cfg_done, 0);
+ kernel_thread(spi_dev_sig_process_test,spi, 0); /* fork the main thread */
+ kernel_thread(spi_dev_dma_cfg_done_process_test,spi, 0); /* fork the main thread */
+ irq = irq_of_parse_and_map(spi->dev.of_node, 0);
+ if (irq <= 0) {
+ dev_err(&spi->dev, "ERROR: invalid interrupt number, irq = %d\n",irq);
+ return -EBUSY;
+ }
+ spidev->irq = irq;
+ dev_info(&spi->dev, "used interrupt num is %d\n", spidev->irq);
+
+ ret = devm_request_irq(&spi->dev, spidev->irq, spidev_hand_shake_irq,
+ 0, dev_name(&spi->dev), spidev);
+
+ if (ret < 0) {
+ dev_err(&spi->dev, "probe - cannot get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+
+}
+
+#endif
+
+#ifdef TEST_SWAP_KERNEL_AND_USER
+void spi_dev_send_dma_cfg_down(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+ spidev->dma_cfg_done = 1;
+ up(&spidev->sem_dma_cfg_done);
+}
+#else
+void spi_dev_send_dma_cfg_down(struct spi_device *spi)
+{
+ return;
+}
+#endif
+/*-------------------------------------------------------------------------*/
+
+static int spidev_probe(struct spi_device *spi)
+{
+ struct spidev_data *spidev;
+ int status;
+ unsigned long minor;
+ u32 val;
+ /*
+ * spidev should never be referenced in DT without a specific
+ * compatible string, it is a Linux implementation thing
+ * rather than a description of the hardware.
+ */
+ WARN(spi->dev.of_node &&
+ of_device_is_compatible(spi->dev.of_node, "spidev"),
+ "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
+
+ spidev_probe_acpi(spi);
+
+ /* Allocate driver data */
+ spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
+ if (!spidev)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ spidev->spi = spi;
+ spin_lock_init(&spidev->spi_lock);
+ mutex_init(&spidev->buf_lock);
+
+ INIT_LIST_HEAD(&spidev->device_entry);
+
+ if (device_property_read_u32(&spi->dev, "enable_dma",&val)) {
+ spi->dma_used = 0;
+ dev_err(&spi->dev,"enable_dma get failed");
+ }
+ else {
+ spi->dma_used = val;
+ dev_info(&spi->dev,"enable_dma = 0x%x",val);
+ }
+
+ if (device_property_read_u32(&spi->dev, "enable_trans_gap",&val)) {
+ spi->trans_gaped = 0;
+ dev_err(&spi->dev,"enable_trans_gap get failed");
+ }
+ else {
+ spi->trans_gaped = val;
+ dev_info(&spi->dev,"enable_trans_gap = 0x%x",val);
+ }
+
+ if (device_property_read_u32(&spi->dev, "trans_gap_num",&val)) {
+ spi->trans_gap_num = 0;
+ dev_err(&spi->dev,"trans_gap_num get failed");
+ }
+ else {
+ spi->trans_gap_num = val;
+ dev_info(&spi->dev,"trans_gap_num = 0x%x",val);
+ }
+
+ // yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme.
+
+ /* If we can allocate a minor number, hook up this device.
+ * Reusing minors is fine so long as udev or mdev is working.
+ */
+ mutex_lock(&device_list_lock);
+ minor = find_first_zero_bit(minors, N_SPI_MINORS);
+ if (minor < N_SPI_MINORS) {
+ struct device *dev;
+
+ spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
+ dev = device_create(spidev_class, &spi->dev, spidev->devt,
+ spidev, "spidev%d.%d",
+ spi->master->bus_num, spi->chip_select);
+ status = PTR_ERR_OR_ZERO(dev);
+ } else {
+ dev_dbg(&spi->dev, "no minor number available!\n");
+ status = -ENODEV;
+ }
+ if (status == 0) {
+ set_bit(minor, minors);
+ list_add(&spidev->device_entry, &device_list);
+ }
+ mutex_unlock(&device_list_lock);
+
+ spidev->speed_hz = spi->max_speed_hz;
+ spidev->rd_from_rx_buffer = 0;
+ if (status == 0)
+ spi_set_drvdata(spi, spidev);
+ else
+ kfree(spidev);
+ spi_setup(spi);
+ if(0 == status && spi->master->slave)
+ device_init_wakeup(&spi->dev, true);
+#ifdef SPIDEV_DEBUG
+ int ret = sysfs_create_groups(&spi->dev.kobj, attr_groups);
+
+ if (ret) {
+ dev_err(&spi->dev, "create test_kobj attr group fain error=%d\n", ret);
+ return ret;
+ }
+
+ ret = spidev_debug_test_init(spi);
+ if (ret) {
+ dev_err(&spi->dev, "spidev_debug_test_init error=%d\n", ret);
+ return ret;
+ }
+#endif
+
+#ifdef TEST_SWAP_KERNEL_AND_USER
+ int ret;
+ spidev->dma_cfg_done = 0;
+ spidev->pid = 0;
+ ret =spi_dev_pin_init_test(spi);
+ if(ret) {
+ dev_info(&spi->dev, "spi_dev_pin_init_test,ret=%d \n",ret);
+ return ret;
+ }
+
+ ret =spi_dev_irq_init_test(spi);
+ if(ret) {
+ dev_info(&spi->dev, "spi_dev_irq_init_test,ret=%d \n",ret);
+ return ret;
+ }
+#endif
+ return status;
+}
+
+static int spidev_remove(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
+ /* make sure ops on existing fds can abort cleanly */
+ spin_lock_irq(&spidev->spi_lock);
+ spidev->spi = NULL;
+ spin_unlock_irq(&spidev->spi_lock);
+
+ list_del(&spidev->device_entry);
+ device_destroy(spidev_class, spidev->devt);
+ clear_bit(MINOR(spidev->devt), minors);
+ if (spidev->users == 0)
+ kfree(spidev);
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static struct spi_driver spidev_spi_driver = {
+ .driver = {
+ .name = "spidev",
+ .of_match_table = of_match_ptr(spidev_dt_ids),
+ .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
+ },
+ .probe = spidev_probe,
+ .remove = spidev_remove,
+
+ /* NOTE: suspend/resume methods are not necessary here.
+ * We don't do anything except pass the requests to/from
+ * the underlying controller. The refrigerator handles
+ * most issues; the controller driver handles the rest.
+ */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init spidev_init(void)
+{
+ int status;
+
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ BUILD_BUG_ON(N_SPI_MINORS > 256);
+ status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
+ if (status < 0)
+ return status;
+
+ spidev_class = class_create(THIS_MODULE, "spidev");
+ if (IS_ERR(spidev_class)) {
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+ return PTR_ERR(spidev_class);
+ }
+
+ status = spi_register_driver(&spidev_spi_driver);
+ if (status < 0) {
+ class_destroy(spidev_class);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+ }
+ return status;
+}
+module_init(spidev_init);
+
+static void __exit spidev_exit(void)
+{
+ spi_unregister_driver(&spidev_spi_driver);
+ class_destroy(spidev_class);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+}
+module_exit(spidev_exit);
+
+MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
+MODULE_DESCRIPTION("User mode SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:spidev");
diff --git a/upstream/linux-5.10/drivers/tty/serial/zx29_uart.c b/upstream/linux-5.10/drivers/tty/serial/zx29_uart.c
new file mode 100755
index 0000000..60629b6
--- /dev/null
+++ b/upstream/linux-5.10/drivers/tty/serial/zx29_uart.c
@@ -0,0 +1,4389 @@
+/****************************************************************************/
+/*
+ * zx29_uart.c sanchips
+ *
+ * (C) Copyright 2003-2007, gaowei
+ * (C) Copyright 2003-2007, sanchips
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/sched/clock.h>
+#include <linux/soc/zte/spinlock.h>
+
+#if 0
+#include <mach/gpio_def.h>
+#include <mach/irqs.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/debug.h>
+#include <mach/iomap.h>
+#include <mach/dma.h>
+#include <mach/dma_cfg.h>
+#endif
+//#include <linux/wakelock.h>
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+
+#include <linux/dma/zx-dma.h>
+//#include "../../dma/zte/zx298501_dma.h"
+
+#include "zx29_uart.h"
+#include <linux/soc/zte/rpmsg.h>
+#include <linux/soc/sc/drv_idle.h>
+#include "pub_debug_info.h"
+//#include <linux/soc/zte/pm/drv_idle.h>
+//#include <mach/pcu.h>
+//#define DEBUG_UART
+
+#ifdef DEBUG_UART
+#pragma GCC optimize("O0")
+#endif
+
+#define UART_WCLK_NAME "uartclk"
+#define UART_APBCLK_NAME "apb_pclk"
+
+#define CONFIG_SERIAL_ZX29_DMA 1
+
+
+extern bool xp2xp_Ap2CpIsApWakeup(void);
+extern int xp2xp_enable_4line(void);
+
+extern signed int zx29_dma_stop(unsigned int channel_id);
+extern signed int zx29_dma_get_transfer_num(unsigned int channel_id);
+
+
+
+char uart_names[5][12] = {
+ "zx29_uart.0",
+ "zx29_uart.1",
+ "zx29_uart.2",
+ "zx29_uart.3",
+ "zx29_uart.4"
+};
+
+#if CONFIG_SERIAL_ZX29_DMA
+#define ZX29_DMA_BUFFER_SIZE PAGE_SIZE
+#define UART_DMA_RX_MAX_COUNT 2
+//#define RX_DMA_TIMEOUT (HZ / 10)//60
+#define RX_DMA_TIMEOUT (HZ / 100)
+#define RX_DMA_WORK 1
+struct zx29_sgbuf {
+ struct scatterlist sg;
+ dma_addr_t dma_addr;
+ char *buf;
+};
+
+struct zx29_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ dma_channel_def rx_def[UART_DMA_RX_MAX_COUNT];
+ u32 rx_index;
+ bool use_buf_b;
+ struct zx29_sgbuf sgbuf_a;
+ struct zx29_sgbuf sgbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+ atomic_t count;
+ bool used;
+};
+
+struct zx29_dmatx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ dma_channel_def tx_def;
+ struct scatterlist sg;
+ char *buf;
+ bool queued;
+ atomic_t count;
+};
+#define UART_DMA_CYCLE_RX_CONFIG_COUNT 5
+struct zx29_dma_cycle_data{
+ int id;
+ int flg_enter_th;
+ int flg_enter_to;
+ char flg_overrun;
+ char flg_pe;
+ char flg_be;
+ char flg_fe;
+ char from_resume;
+ unsigned long cnt_callback_total;
+ unsigned long cnt_th_total;
+ int cnt_callback;
+ int cnt_th;
+ struct zx29_sgbuf sgbuf[UART_DMA_CYCLE_RX_CONFIG_COUNT];
+ dma_channel_def rxdef[UART_DMA_CYCLE_RX_CONFIG_COUNT];
+};
+struct zx29_dma_cycle_data uart_dma_cycle[5];
+#endif
+
+
+
+#define UART_NUM 5
+int g_uart_overrun[5];
+ktime_t g_hr_interval;
+
+
+int g_cons_id_cmdline;
+EXPORT_SYMBOL(g_cons_id_cmdline);
+
+#ifdef DEBUG_CONSOLE
+#undef DEBUG_CONSOLE
+#endif
+#define DEBUG_CONSOLE g_cons_id_cmdline
+/****************************************************************************/
+
+/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
+ * name, but it is legally reserved for the 8250 driver. */
+#define SERIAL_zx29_MAJOR TTY_MAJOR
+#define SERIAL_MINOR_START 64
+
+#define UART_PORT_AUTOBAUD_ON 1
+#define UART_PORT_AUTOBAUD_OFF 0
+#define UART_PORT_AUTOBAUD_BYTE 2
+#define UART_AT_SENDOK_NUM 6
+#define UART_AUTOBAUD_LEVEL 5
+#define UART_AUTOBAUD_CHECKBYTE 4
+#define UART_AUTOBAUD_RATE 115200
+#define UART1_AUTOBAUD_RATE 921600
+
+
+unsigned char uart_port_autobaud_buffer[UART_PORT_AUTOBAUD_BYTE] = {0};
+unsigned char uart_port_autobaud_gtflag = 0 ;
+unsigned char uart_port_autobaud_suflag = 0 ;
+unsigned char g_console_open_flag = 1;
+
+
+unsigned char UART_AT_send_ok[UART_AT_SENDOK_NUM] =
+ {
+ 0x0d,0x0a,0x4F,0x4B,0x0d,0x0a
+ };
+
+unsigned char UART_baud_check[UART_AUTOBAUD_LEVEL][UART_AUTOBAUD_CHECKBYTE]=
+ {
+ {0x61,0x74,0x41,0x54},{0x06,0x9e,0x06,0x98},{0x1c,0x80,0x1c,0x00},
+ {0xe0,0x00,0xe0,0x00},{0x00,0x00,0x00,0x00},
+ };
+unsigned int UART_baud[UART_AUTOBAUD_LEVEL] =
+ {
+ 115200,57600,38400,19200,9600
+ };
+unsigned int UART_termios_cflag[UART_AUTOBAUD_LEVEL] =
+ {
+ B115200,B57600,B38400,B19200,B9600
+ };
+
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+/****************************************************************************/
+/*
+ * Local per-uart structure.
+ */
+struct zx29_uart_port
+{
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned int old_status;
+ unsigned char imr; /* Local interrupt mask reg mirror */
+#if CONFIG_SERIAL_ZX29_DMA
+ unsigned char dmacr; /* DMA reg*/
+#endif
+ bool rts_state;
+ bool autorts; /* hardware flow control */
+ struct clk *wclk; /* uart work clock */
+ struct clk *busclk; /* uart apb clock */
+ bool autobaud;
+ bool autobaud_state;
+ unsigned int baudrate;
+ bool uartwake;
+
+ int irq;
+ int irq_state;
+ int rxd_irq;
+ struct tasklet_struct write_wakeup;
+ bool rxd_wakeup;
+ int rxd_int_depth;
+ bool enter_suspend;
+#if CONFIG_SERIAL_ZX29_DMA
+ /* DMA stuff */
+ bool using_tx_dma;
+ bool using_rx_dma;
+ struct zx29_dmarx_data dmarx;
+ struct zx29_dmatx_data dmatx;
+ struct timer_list rx_dma_timer;
+ struct hrtimer rx_dma_hrtimer;
+ struct task_struct *dma_compl_th;
+ struct semaphore sema;
+ struct semaphore sema_cyclic;
+ bool port_close;
+ bool work_state;
+ size_t pre_pending;
+ struct zx29_sgbuf *sg2tty;
+ size_t sg2tty_len;
+ struct zx29_sgbuf *curr_sg;
+ int enable_ctsrts;
+ int enable_wakeup;
+
+ struct notifier_block wakeup_notifier;
+
+#endif
+ //means application decide close and release DMA &wakelock
+ int app_ctrl;
+ int sleep_state;
+ //if app_ctrl is set or using kernel control sleep,set this flag
+ int uart_power_mode;
+};
+
+
+
+static struct zx29_uart_port zx29_uart_ports[UART_NUM];
+
+#define zx29_MAXPORTS ARRAY_SIZE(zx29_uart_ports)
+typedef struct __UART_STATIC{
+ int cnt;
+ char head[16];
+ unsigned long long s_time;
+ int func_step;
+ unsigned int fr;
+ unsigned int ris;
+}uart_static;
+#define STATIC_UART_ID 0
+uart_static g_uart_static[256] = {0};
+int g_uart_static_cnt = 0;
+void test_uart_static(int uart_id, char *buf, int cnt, int steps)
+{
+ if(uart_id != STATIC_UART_ID)
+ return;
+ if(buf){
+ if(cnt >= 16){
+ strncpy(g_uart_static[g_uart_static_cnt].head, buf, 16);
+ }else{
+ memcpy(g_uart_static[g_uart_static_cnt].head, buf, cnt);
+ }
+ }
+ g_uart_static[g_uart_static_cnt].cnt = cnt;
+ g_uart_static[g_uart_static_cnt].s_time = local_clock();
+ g_uart_static[g_uart_static_cnt].func_step = steps;
+ g_uart_static[g_uart_static_cnt].fr = UART_GET_FR(&zx29_uart_ports[uart_id].port);
+ g_uart_static[g_uart_static_cnt].ris = UART_GET_RIS(&zx29_uart_ports[uart_id].port);
+
+ if(++g_uart_static_cnt >= 256)
+ g_uart_static_cnt = 0;
+}
+
+
+
+
+#define zx29_MAXPORTS ARRAY_SIZE(zx29_uart_ports)
+void zx29_uart_stop_rx(struct uart_port *port);
+
+#if CONFIG_SERIAL_ZX29_DMA
+static inline bool zx29_dma_tx_start(struct zx29_uart_port *zup);
+static inline void zx29_dma_tx_stop(struct zx29_uart_port *zup);
+static bool zx29_dma_tx_irq(struct zx29_uart_port *zup);
+static int zx29_uart_dma_tx_chars(struct zx29_uart_port *zup);
+void uart_dma_rx_callback(void *data);
+void uart_dma_rx_callback_use_dma_cyclic(void * data);
+static void zx29_uart_dma_rx_chars(struct zx29_uart_port *zup,
+ //u32 pending, bool use_buf_b,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ bool readfifo, unsigned long *flags);
+static inline void zx29_dma_rx_stop(struct zx29_uart_port *zup);
+static inline bool zx29_dma_rx_available(struct zx29_uart_port *zup);
+static inline bool zx29_dma_rx_running(struct zx29_uart_port *zup);
+static int zx29_dma_rx_trigger_dma(struct zx29_uart_port *zup);
+static int zx29_dma_rx_trigger_dma_use_dma_cyclic(struct zx29_uart_port *zup);
+
+static void zx29_uart_rx_dma_chars(struct zx29_uart_port *zup, unsigned long *flags);
+dma_peripheral_id uart_get_rx_dma_peripheral_id(struct zx29_uart_port *zup);
+
+#if RX_DMA_WORK
+static void zx29_uart_rx_timeout_chars(struct zx29_uart_port *zup, unsigned long *flags);
+static inline bool zx29_dma_rx_work_scheduled(struct zx29_uart_port *zup);
+
+static void zx29_uart_rt_dma(struct zx29_uart_port *zup, unsigned long *flags);
+static void uart_dma_cycle_deinit(struct zx29_uart_port *zup);
+#endif
+#endif
+
+
+
+/*******************************************************************************
+* Function: uart_wakeup_callback.
+* Description: uart_wakeup_callback.
+* Parameters:
+* Input:val:means wakeup or sleep notify to other device
+*
+* Output:v:means devices been called return result
+*
+* Returns:
+*
+* Others:
+********************************************************************************/
+int uart_wakeup_callback(struct notifier_block * nb, unsigned long val, void * v)
+{
+ int *call_result = (int *)v;
+ unsigned long flags = 0;
+ struct zx29_uart_port *zup = container_of(nb, struct zx29_uart_port, wakeup_notifier);
+
+ if(!zup || zup->port_close){
+ *call_result |= 0;
+ return 0;
+ }
+ struct platform_device *pdev = zup->port.private_data;
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ if(val == 1){//wakeup
+ zup->sleep_state = 0;
+ pm_stay_awake(&pdev->dev);
+ zx29_uart_rx_dma_chars(zup, &flags);
+
+ }else{//sleep
+ zup->sleep_state = 1;
+ zx29_uart_stop_rx(&zup->port);
+ pm_relax(&pdev->dev);
+
+ }
+ *call_result |= 0;
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return 0;
+}
+
+int zx29_get_sleep_state(int uart_index)
+{
+ if(uart_index < 0 || uart_index > 2){
+ printk("invalid uart index\n");
+ return -1;
+ }
+
+ return zx29_uart_ports[uart_index].sleep_state;
+}
+EXPORT_SYMBOL_GPL(zx29_get_sleep_state);
+
+void zx29_set_sleep_state(int state, int uart_index)
+{
+ if(uart_index < 0 || uart_index > 2){
+ printk("invalid uart index\n");
+ return ;
+ }
+ printk(" uart %d, state change to:%d\n", uart_index, state);
+ zx29_uart_ports[uart_index].sleep_state = (state ? 1: 0);
+}
+EXPORT_SYMBOL_GPL(zx29_set_sleep_state);
+
+static ssize_t sleep_state_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "\n wakeup_enable = %d\n",zx29_uart_ports[pdev->id].sleep_state);
+}
+
+static ssize_t sleep_state_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ flag = simple_strtoul(buf, NULL, 16);
+ //pdata->uart_wakeup_enable = flag;
+ zx29_uart_ports[pdev->id].sleep_state = (flag ? 1: 0);
+ return count;
+}
+
+DEVICE_ATTR(sleep_state, S_IRUGO | S_IWUSR, sleep_state_show,
+ sleep_state_store);
+//bool uart_dma_filter_fn (struct dma_chan *chan, void *param)
+//{
+// dma_peripheral_id peri_id = (dma_peripheral_id) param;
+// if (chan->chan_id == (unsigned int)peri_id){
+// printk("uart_dma_filter_fn, peri_id:%d, ok\n", peri_id);
+// return true;
+// }
+// chan->private = param;
+//
+// return false;
+//}
+static void zx29_uart_console_putc(struct uart_port *port, int c);
+void zx29_uart_putc(struct uart_port *port, int c);
+
+#if CONFIG_SERIAL_ZX29_DMA
+void uart_mod_timer(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ unsigned long t_delay = 0;
+ t_delay = msecs_to_jiffies(RX_DMA_TIMEOUT);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ //printk("uart_mod_timer, delay %d jiffies\n", t_delay);
+ mod_timer(&(zup->rx_dma_timer), jiffies + t_delay);
+
+ spin_lock_irqsave(&zup->port.lock, *flags);
+}
+#endif
+/**
+* Show the console_input attribute.
+*/
+static ssize_t console_input_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "\n console_input = %d\n",g_console_open_flag);
+}
+
+/**
+ * Store the console_input attribure.
+ * 0: disable console input function,only out put log
+ * 1: able console input, can input commands
+ */
+static ssize_t console_input_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ flag = simple_strtoul(buf, NULL, 16);
+ g_console_open_flag = flag;
+
+ return count;
+}
+
+DEVICE_ATTR(console_input, S_IRUGO | S_IWUSR, console_input_show,
+ console_input_store);
+
+static ssize_t ctsrts_input_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+// struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ if(pdev->id < 0 || pdev->id >= UART_NUM){
+ printk("ctsrts_input_store, invalid uart id, return error\n");
+ return 0;
+ }
+// return sprintf(buf, "\n ctsrts_input = %d\n",pdata->uart_ctsrtsuse);
+return sprintf(buf, "\n uart %d ctsrts_input = %d\n", pdev->id, zx29_uart_ports[pdev->id].enable_ctsrts);
+
+}
+
+static ssize_t ctsrts_input_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+
+ if(pdev->id != 0){
+ printk("ctsrts_input_store, invalid uart id, only uart support hardware control\n");
+ }
+ flag = simple_strtoul(buf, NULL, 16);
+ zx29_uart_ports[pdev->id].enable_ctsrts = flag;
+
+ return count;
+}
+
+DEVICE_ATTR(ctsrts_input, S_IRUGO | S_IWUSR, ctsrts_input_show,
+ ctsrts_input_store);
+
+static ssize_t wakeup_enable_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "\n wakeup_enable = %d\n",1);
+}
+
+static ssize_t wakeup_enable_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ if(pdev->id != 4){
+ printk("\nctsrts_input_store, invalid uart id, only lp_uart(uart 4) support wakeup\n");
+ }
+ flag = simple_strtoul(buf, NULL, 16);
+ zx29_uart_ports[pdev->id].enable_wakeup = flag;
+
+ return count;
+}
+
+DEVICE_ATTR(wakeup_enable, S_IRUGO | S_IWUSR, wakeup_enable_show,
+ wakeup_enable_store);
+
+static ssize_t app_ctrl_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "%d\n",zx29_uart_ports[pdev->id].app_ctrl);
+}
+
+static ssize_t app_ctrl_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ flag = simple_strtoul(buf, NULL, 16);
+ // pdata->uart_wakeup_enable = flag;
+ zx29_uart_ports[pdev->id].app_ctrl = (flag == 0) ? 0 : 1;
+
+ return count;
+}
+DEVICE_ATTR(app_ctrl, S_IRUGO | S_IWUSR, app_ctrl_show,
+ app_ctrl_store);
+
+int rxd_wake_cnt = 0;
+static ssize_t statics_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+
+ return sprintf(buf, "\n RX:%u,TX:%u,OE:%u,brk:%u,FE:%u,PE:%u ,rxd_wake_cnt:%d\n",
+ zx29_uart_ports[pdev->id].port.icount.rx,
+ zx29_uart_ports[pdev->id].port.icount.tx,
+ zx29_uart_ports[pdev->id].port.icount.overrun,
+ zx29_uart_ports[pdev->id].port.icount.brk,
+ zx29_uart_ports[pdev->id].port.icount.frame,
+ zx29_uart_ports[pdev->id].port.icount.parity,
+ rxd_wake_cnt
+ );
+}
+DEVICE_ATTR(statics, S_IRUGO, statics_show, NULL);
+#define VEHICLE_USE_ONE_UART_LOG 1
+#if VEHICLE_USE_ONE_UART_LOG
+#define ICP_CORE_ID_PS CORE_PS0
+#define ICP_CORE_ID_CAP 1
+#define ICP_CHANNEL_CONSOLE_UART 7
+#define ICP_MSG_LEN_CONSOLE_UART 2
+#define ICP_BUFFERSIZE_CONSOLE_TOGGLE 16
+#define SYMB_PS_CORE_ID ICP_CORE_ID_PS
+#define SYMB_CAP_CORE_ID ICP_CORE_ID_CAP
+#define SYMB_WHAT_CORE_ID 3
+#define ENABLE_CURRENT_CONSOLE_UART 1
+#define DISABLE_CURRENT_CONSOLE_UART 0
+#define ENABLE_TOGGLE 1
+#define DISABLE_TOGGLE 0
+unsigned char g_core_id_occupy_uart = 0;
+unsigned char g_cap_uart_toggle = 0;
+static irqreturn_t zx29_uart_interrupt(int irq, void *dev_id);
+static void restart_current_cons_uart(void)
+{
+ struct zx29_uart_port *zup = &zx29_uart_ports[DEBUG_CONSOLE];
+ struct uart_port *port = &zup->port;
+ enable_irq(port->irq);
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ spin_lock(&zup->port.lock);
+ tasklet_schedule(&zup->write_wakeup);
+ spin_unlock(&zup->port.lock);
+}
+static void forbid_current_cons_uart(void)
+{
+ struct zx29_uart_port *zup = &zx29_uart_ports[DEBUG_CONSOLE];
+ struct uart_port *port = &zup->port;
+ disable_irq(port->irq);
+ g_core_id_occupy_uart = SYMB_PS_CORE_ID;
+}
+static void process_ps2cap_rpmsg(char *arr)
+{
+ if((arr[0] == SYMB_CAP_CORE_ID) && (arr[1] == ENABLE_CURRENT_CONSOLE_UART)){
+ restart_current_cons_uart();
+ }else if((arr[0] == SYMB_CAP_CORE_ID) && (arr[1] == DISABLE_CURRENT_CONSOLE_UART)){
+ printk("current console uart not enable.\n");
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }else if((arr[0] == SYMB_WHAT_CORE_ID) && (arr[1] == SYMB_PS_CORE_ID)){
+ g_core_id_occupy_uart = SYMB_PS_CORE_ID;
+ forbid_current_cons_uart();
+ }else if((arr[0] == SYMB_WHAT_CORE_ID) && (arr[1] == SYMB_CAP_CORE_ID)){
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }
+ else{
+ printk("%s error!!\n",__func__);
+ }
+}
+static void icp_callback_ps2cap(void *buf, unsigned int len)
+{
+ char *arr_ps2cap;
+ if (len==0){
+ printk("%s empty.\n", __func__);
+ return ;
+ }
+ arr_ps2cap = (char *)buf;
+ process_ps2cap_rpmsg(arr_ps2cap);
+}
+static void echo_to_change_other_uart(uint32_t val)
+{
+ int ret;
+ if(val > ENABLE_TOGGLE)
+ {
+ printk("echo para error!!!\n");
+ return;
+ }
+ char arr[2] = {0};
+ arr[0] = SYMB_PS_CORE_ID;
+ arr[1] = val;
+ T_RpMsg_Msg icp_msg;
+ icp_msg.coreID = CORE_PS0;
+ icp_msg.chID = ICP_CHANNEL_CONSOLE_UART;
+ icp_msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ icp_msg.buf = arr;
+ icp_msg.len = ICP_MSG_LEN_CONSOLE_UART;
+ ret = rpmsgWrite(&icp_msg);
+ if(ret == 0){
+ if(val == ENABLE_TOGGLE)
+ g_core_id_occupy_uart = SYMB_PS_CORE_ID;
+ else if(val == DISABLE_TOGGLE)
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }else
+ printk("echo_to_change_ohter_uart fail.\n");
+}
+static ssize_t console_uart_toggle_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "\n console_uart_toggle_show %d. \n", g_cap_uart_toggle);
+}
+static ssize_t console_uart_toggle_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ flag = simple_strtoul(buf, NULL, 16);
+ if(flag == ENABLE_TOGGLE){
+ g_cap_uart_toggle = 1;
+ forbid_current_cons_uart();
+ echo_to_change_other_uart(flag);
+ }else if(flag == DISABLE_TOGGLE){
+ g_cap_uart_toggle = 0;
+ g_core_id_occupy_uart = SYMB_CAP_CORE_ID;
+ }
+ return count;
+}
+DEVICE_ATTR(console_uart_toggle, S_IRUGO | S_IWUSR, console_uart_toggle_show,
+ console_uart_toggle_store);
+static void notify_occupy_uart_coreid_to_other(void)
+{
+ char arr[2] = {0};
+ arr[0] = SYMB_WHAT_CORE_ID;
+ arr[1] = g_core_id_occupy_uart;
+ T_RpMsg_Msg icp_msg;
+ icp_msg.coreID = CORE_AP;
+ icp_msg.chID = ICP_CHANNEL_CONSOLE_UART;
+ icp_msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ icp_msg.buf = arr;
+ icp_msg.len = ICP_MSG_LEN_CONSOLE_UART;
+ rpmsgWrite(&icp_msg);
+}
+static ssize_t coreid_occupy_uart_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "\n core %d occupy cons uart now! \n",g_core_id_occupy_uart);
+}
+static ssize_t coreid_occupy_uart_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t flag = 0;
+ flag = simple_strtoul(buf, NULL, 16);
+ g_core_id_occupy_uart = flag;
+ if(flag == SYMB_CAP_CORE_ID){
+ g_cap_uart_toggle = 0;
+ }else if(SYMB_PS_CORE_ID){
+ g_cap_uart_toggle = 1;
+ }
+ return count;
+}
+DEVICE_ATTR(coreid_occupy_uart, S_IRUGO | S_IWUSR, coreid_occupy_uart_show,
+ coreid_occupy_uart_store);
+#endif
+
+//extern int (*pm_callback_fn)(void);
+#ifdef CONFIG_CPU_IDLE
+typedef int (*pm_callback_fn)(void);
+extern int zx_pm_register_callback(pm_callback_fn enter_cb, pm_callback_fn exit_cb);
+
+extern void disable_irq_nosync(unsigned int irq);
+extern void enable_irq(unsigned int irq);
+
+void uart_rxd_int_disable(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->rxd_int_depth++;
+}
+EXPORT_SYMBOL(uart_rxd_int_disable);
+
+int uart_0_pm_enter(void)
+{
+ struct zx29_uart_port *zup = &zx29_uart_ports[0];
+
+ //zDrvInt_UnmaskIrq(UART0_RXD_INT);
+ if(zup->irq_state == 0 || zup->imr== 0)
+ return 0;
+
+ //pcu_int_clear(PCU_UART0_RXD_INT);
+ if(!zup->rxd_int_depth){
+ //enable_irq(UART0_RXD_INT);
+ zup->rxd_int_depth++;
+ }
+ return 0;
+}
+
+int uart_0_pm_exit(void)
+{
+
+ return 0;
+}
+#endif
+/****************************************************************************/
+
+static int zx29_sgbuf_init(struct dma_chan *chan, struct zx29_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ dma_addr_t dma_addr;
+
+ sg->buf = dma_alloc_coherent(chan->device->dev,
+ ZX29_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
+ if (!sg->buf){
+ printk("zx29_sgbuf_init fail, no mem\n");
+ return -ENOMEM;
+ }
+ sg_init_table(&sg->sg, 1);
+ sg_set_page(&sg->sg, phys_to_page(dma_addr),
+ ZX29_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+ sg_dma_address(&sg->sg) = dma_addr;
+ sg_dma_len(&sg->sg) = ZX29_DMA_BUFFER_SIZE;
+ sg->dma_addr = dma_addr;
+ return 0;
+}
+
+static void zx29_sgbuf_free(struct dma_chan *chan, struct zx29_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ if (sg->buf) {
+ dma_free_coherent(chan->device->dev,
+ ZX29_DMA_BUFFER_SIZE, sg->buf,
+ sg_dma_address(&sg->sg));
+ sg->dma_addr = NULL;
+ }
+}
+
+
+/****************************************************************************/
+static unsigned int zx29_uart_tx_empty(struct uart_port *port)
+{
+ return (UART_GET_FR(port)&(UART_FR_TXBUSY|UART_FR_TXFF)) ? 0 : TIOCSER_TEMT;
+}
+
+/****************************************************************************/
+static void zx29_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int control = 0;
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->sigs = mctrl;
+ control = UART_GET_CR(&zup->port);
+ if(mctrl & TIOCM_DTR)
+ control |= UART_CR_DTR;
+ else
+ control &= ~ UART_CR_DTR;
+
+ if(mctrl & TIOCM_RTS)
+ control |= UART_CR_RTS;
+ else
+ control &= ~UART_CR_RTS;
+
+ if(mctrl & TIOCM_LOOP)
+ control |= UART_CR_LBE;
+ else
+ control &= ~UART_CR_LBE;
+
+ /* We need to disable auto-RTS if we want to turn RTS off */
+ if (zup->autorts) {
+ if (mctrl & TIOCM_RTS)
+ control |= UART_CR_RTSEN;
+ else
+ control &= ~UART_CR_RTSEN;
+ }
+ UART_PUT_CR(port, control);
+}
+
+/****************************************************************************/
+static unsigned int zx29_uart_get_mctrl(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned int mctrl = 0;
+ unsigned int uart_flag = 0;
+
+ uart_flag = UART_GET_FR(port);
+
+ mctrl = (uart_flag&UART_FR_CTS) ?TIOCM_CTS : 0;
+ mctrl |= (zup->sigs & TIOCM_RTS);
+ mctrl |= (uart_flag&UART_FR_DCD) ? TIOCM_CD : 0;
+ mctrl |= (uart_flag&UART_FR_DSR) ? TIOCM_DSR : 0;
+ mctrl |= (uart_flag&UART_FR_RI) ? TIOCM_RI : 0;
+
+ return mctrl;
+}
+
+/****************************************************************************/
+static void zx29_uart_start_tx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned int control = 0;
+ unsigned int reg_bak[10] = {0};
+ struct circ_buf *xmit = &zup->port.state->xmit;
+ int count = 0;
+#if VEHICLE_USE_ONE_UART_LOG
+ if((port->line == DEBUG_CONSOLE))
+ {
+ if(g_core_id_occupy_uart == SYMB_PS_CORE_ID){
+ #if 1
+ count = uart_circ_chars_pending(xmit);
+ while(count-- > 0)
+ {
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ if (uart_circ_empty(xmit))
+ break;
+ }
+ #endif
+ return;
+ }
+ count = uart_circ_chars_pending(xmit);
+ while(count-- > 0)
+ {
+ zx29_uart_console_putc(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit)){
+ break;
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ {
+ spin_lock(&zup->port.lock);
+ tasklet_schedule(&zup->write_wakeup);
+ spin_unlock(&zup->port.lock);
+ return;
+ }
+ return;
+ }
+else
+#endif
+{
+ if(!(UART_GET_RIS(port)&UART_TXIS) && (UART_GET_FR(port) & UART_FR_TXFE))
+ {
+ if(!(UART_GET_RIS(port)&UART_TXIS))
+ {
+ count = uart_circ_chars_pending(xmit);
+ if(count >= zup->port.fifosize)
+ count = 15;//sent data more than TX ifls, TXIS will coming soon
+ if(count != 0){
+ do {
+ zx29_uart_putc(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit) || (UART_GET_RIS(port)&UART_TXIS))
+ break;
+ } while (--count > 0);
+ }
+ }
+
+ }
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ if(!uart_console(port))
+ {
+ if (!zx29_dma_tx_start(zup))
+ {
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+ if(!(UART_GET_RIS(port)&UART_TXIS)){
+ if((UART_GET_FR(port) & UART_FR_TXFF))
+ return;
+ count = uart_circ_chars_pending(xmit);
+ while (count > 0) {
+ UART_PUT_CHAR(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit) || (UART_GET_RIS(port)&UART_TXIS) ||
+ (UART_GET_FR(port) & UART_FR_TXFF))
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+ }
+#else
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+#endif
+}
+
+static void uart_write_wakeup_task(unsigned long _port)
+{
+ struct uart_port *port = (void *)_port;
+struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ struct platform_device *pdev=port->private_data;
+ //printk("wakeup_task,port:%d, rxd_wakeup:%d\n", port->line, zup->rxd_wakeup);
+
+ if(zup->rxd_wakeup){
+ //rxd wake
+ printk("wakeup_task,port:%d, rxd_wakeup:%d\n", port->line, zup->rxd_wakeup);
+ pm_wakeup_dev_event(&pdev->dev, 5000, false);
+ disable_irq(zup->rxd_irq);
+ zup->rxd_wakeup = false;
+ } else {
+ uart_write_wakeup(port);
+ }
+
+}
+
+#if CONFIG_SERIAL_ZX29_DMA
+int dma_complete_thread_use_dma_cyclic(void *ptr)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)ptr;
+ size_t pending;
+ int dma_count = 0;
+ struct device *dev = NULL;
+ dev = zup->dmarx.chan->device->dev;
+ int uart_id = zup->port.line;
+ while(down_interruptible(&zup->sema_cyclic) == 0)
+ {
+ if(zup->port_close || !uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_th].dma_addr)
+ break;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ uart_dma_cycle[uart_id].cnt_th_total++;
+ uart_dma_cycle[uart_id].cnt_th++;
+ zup->sg2tty = &uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_th];
+ zup->sg2tty_len = 4096;
+ pending = zup->sg2tty_len;
+ if(uart_dma_cycle[uart_id].flg_be || uart_dma_cycle[uart_id].flg_fe|| uart_dma_cycle[uart_id].flg_pe){
+ printk("error in uart%d: fe %u ,be %u pe %u.\n",zup->port.line,zup->port.icount.frame,
+ zup->port.icount.brk,zup->port.icount.parity);
+ uart_dma_cycle[uart_id].flg_be = 0;
+ uart_dma_cycle[uart_id].flg_fe = 0;
+ uart_dma_cycle[uart_id].flg_pe = 0;
+ }
+ dma_sync_sg_for_cpu(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ zup->sg2tty->buf, pending);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock_irqsave(&zup->port.lock, flags);
+ dma_sync_sg_for_device(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ uart_dma_cycle[uart_id].flg_enter_th = (uart_dma_cycle[uart_id].flg_enter_th+1)%UART_DMA_CYCLE_RX_CONFIG_COUNT;
+ uart_dma_cycle[uart_id].cnt_callback--;
+ if(!hrtimer_active(&zup->rx_dma_hrtimer))
+ hrtimer_restart(&zup->rx_dma_hrtimer);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ }
+ return 0;
+}
+int dma_complete_thread(void *ptr)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)ptr;
+
+ size_t pending;
+ struct dma_tx_state state;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ bool lastbuf;
+ int dma_count = 0;
+ struct zx29_sgbuf *sgbuf = NULL;
+ struct device *dev = NULL;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ dev = zup->dmarx.chan->device->dev;
+
+ while(down_interruptible(&zup->sema) == 0)
+ {
+ if(zup->port_close)
+ break;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ // tty = zup->port.state->port.tty;
+ if(!zup->sg2tty)
+ panic("dma_complete_thread, buffer 2 tty is invalid\n");
+ // dev = zup->dmarx.chan->device->dev;
+ pending = zup->sg2tty_len;
+ if(zx29_dma_rx_running(zup)){
+
+ test_uart_static(zup->port.line, NULL, 0, 10);
+ //uart_mod_timer(zup, &flags);
+ if(!hrtimer_active(&zup->rx_dma_hrtimer))
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ }
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+ dma_sync_sg_for_cpu(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+ //BUG();
+
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ zup->sg2tty->buf, pending);
+ test_uart_static(zup->port.line, zup->sg2tty->buf, pending, 11);
+ tty_flip_buffer_push(&zup->port.state->port);
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &zup->sg2tty->sg, 1, DMA_FROM_DEVICE);
+
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->port.icount.rx += dma_count;
+
+ //if(zup->port.line == 0)
+ //printk("yanming dma_complete_thread, dma2tty:%d\n", dma_count);
+ if (dma_count < pending){
+ sc_debug_info_record(MODULE_ID_CAP_UART, "uart%d couldn't insert all characters \n",zup->port.line);
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+
+ }
+#if 0
+ zup->work_state = false;
+ zup->pre_pending = 0;
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+#endif
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ }
+
+ return 0;
+}
+#endif
+
+/****************************************************************************/
+static void zx29_uart_stop_tx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->imr &= ~UART_TXIM;
+ UART_PUT_IMSC(port, zup->imr);
+#ifdef CONFIG_SERIAL_ZX29_UART_CONSOLE
+ if((port->line == DEBUG_CONSOLE) && uart_tx_stopped(port))
+ {
+ //uart_write_wakeup(port);
+ tasklet_schedule(&zup->write_wakeup);
+ }
+#endif
+
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_tx_stop(zup);
+#endif
+
+ zx_cpuidle_set_free(IDLE_FLAG_UART);
+
+}
+
+/****************************************************************************/
+void zx29_uart_stop_rx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+
+ zup->imr &= ~(UART_RXIM|UART_RTIM|UART_FEIM|UART_PEIM|UART_BEIM|UART_OEIM);
+ UART_PUT_IMSC(port, zup->imr);
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_rx_stop(zup);
+#endif
+}
+
+/****************************************************************************/
+static void zx29_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ unsigned int lcr_h;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ lcr_h = UART_GET_LCRH(port);
+ if (break_state == -1)
+ lcr_h |= UART_LCRH_BRK;
+ else
+ lcr_h &= ~UART_LCRH_BRK;
+ UART_PUT_LCRH(port, lcr_h);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+}
+
+/****************************************************************************/
+static void zx29_uart_enable_ms(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ zup->imr |= UART_RIMIM|UART_CTSMIM|UART_DCDMIM|UART_DSRMIM;
+ UART_PUT_IMSC(port, zup->imr);
+}
+
+/****************************************************************************/
+/*--------------------------------------------------------------------
+ * Reads up to 256 characters from the FIFO or until it's empty and
+ * inserts them into the TTY layer. Returns the number of characters
+ * read from the FIFO.
+ --------------------------------------------------------------------*/
+static int zx29_uart_fifo_to_tty(struct zx29_uart_port *zup)
+{
+ struct uart_port *port = &zup->port;
+ u32 status, ch, i = 0;
+ unsigned int flag, max_count = 256;
+ int fifotaken = 0;
+ u8 uart_poll_char[16] ={0};
+
+ while (max_count--) {
+ status = UART_GET_FR(port);
+ if (status & UART_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = UART_GET_CHAR(port) | UART_DUMMY_DR_RX;
+
+#if 0
+ if(g_console_open_flag == 0 &&
+ port->line == DEBUG_CONSOLE){
+ if((ch&0xff) == 't'){
+ memset(uart_poll_char, 0, sizeof(uart_poll_char));
+ uart_poll_char[0] = 't';
+ i = 0;
+ printk("ch = %c i = %d\n",ch,i);
+ }else if ((ch&0xff) == 'y' && (i == 1)){
+ uart_poll_char[1] = 'y';
+ printk("ch = %c i = %d\n",ch,i);
+ }else if ((ch&0xff) == 'o' && (i == 2)){
+ uart_poll_char[2] = 'o';
+ printk("ch = %c i = %d\n",ch,i);
+ }else if ((ch&0xff) == 'p' && (i == 3)){
+ uart_poll_char[3] = 'p';
+ printk("ch = %c i = %d\n",ch,i);
+
+ }else if ((ch&0xff) == 'e' && (i == 4)){
+ uart_poll_char[4] = 'e';
+ printk("ch = %c i = %d\n",ch,i);
+
+ }else if ((ch&0xff) == 'n' && (i == 5)){
+ uart_poll_char[5] = 'n';
+ printk("ch = %c i = %d\n",ch,i);
+ g_console_open_flag = 1;
+ printk("ch = %c i = %d,g_console_open_flag:%d\n",ch,i,g_console_open_flag);
+ }else {
+ i = 10;
+ }
+ i++;
+ }
+#endif
+ flag = TTY_NORMAL;
+ if(zup->autobaud_state == UART_PORT_AUTOBAUD_ON)
+ {
+ if(zup->port.icount.rx < UART_PORT_AUTOBAUD_BYTE)
+ {
+ uart_port_autobaud_buffer[zup->port.icount.rx] = ch;
+ }
+ else
+ {
+ uart_port_autobaud_gtflag = 1 ;
+ }
+ }
+ zup->port.icount.rx++;
+ if(zup->autobaud_state == UART_PORT_AUTOBAUD_OFF)
+ {
+ if(fifotaken < 16){
+ uart_poll_char[fifotaken] = ch & 0xFF;
+ }
+ fifotaken++;
+
+ if (unlikely(ch & UART_DR_ERROR)) {
+ if (ch & UART_DR_BE) {
+ ch &= ~(UART_DR_FE | UART_DR_PE);
+ zup->port.icount.brk++;
+ if (uart_handle_break(&zup->port))
+ continue;
+ } else if (ch & UART_DR_PE)
+ zup->port.icount.parity++;
+ else if (ch & UART_DR_FE)
+ zup->port.icount.frame++;
+ else if (ch & UART_DR_OE){
+ zup->port.icount.overrun++;
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ ch &= zup->port.read_status_mask;
+
+ if (ch & UART_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART_DR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&zup->port, ch & 255))
+ continue;
+ if(g_console_open_flag || port->line != DEBUG_CONSOLE){
+ uart_insert_char(&zup->port, ch, UART_DR_OE, ch, flag);
+ }
+ }
+ }
+
+ test_uart_static(zup->port.line, uart_poll_char, fifotaken, 3);
+
+ return fifotaken;
+}
+
+/****************************************************************************/
+static void zx29_uart_rx_chars(struct zx29_uart_port *zup)
+{
+ unsigned long flags;
+
+ //struct tty_struct *tty = zup->port.state->port.tty;
+
+ zx29_uart_fifo_to_tty(zup);
+ spin_unlock(&zup->port.lock);
+
+ tty_flip_buffer_push(&zup->port.state->port);
+
+#if CONFIG_SERIAL_ZX29_DMA
+ if(!uart_console(&zup->port)){//console doesn't use dma rcv data
+ if (zx29_dma_rx_available(zup)) {
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ dev_dbg(zup->port.dev, "could not trigger RX DMA job "
+ "fall back to interrupt mode again\n");
+ zup->imr |= UART_RXIM;
+ } else{
+ zup->imr &= ~UART_RXIM;
+ }
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }
+ }
+#endif
+RX_END:
+ spin_lock(&zup->port.lock);
+
+}
+
+/****************************************************************************/
+static void zx29_uart_tx_chars(struct zx29_uart_port *zup)
+{
+ struct circ_buf *xmit = &zup->port.state->xmit;
+ unsigned long flags;
+ int count;
+
+ if (zup->port.x_char) {
+ UART_PUT_CHAR(&zup->port, zup->port.x_char);
+ zup->port.icount.tx++;
+ zup->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&zup->port)) {
+ zx29_uart_stop_tx(&zup->port);
+ return;
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ /* If we are using DMA mode, try to send some characters. */
+ if(!uart_console(&(zup->port)))
+ {
+ if (zx29_dma_tx_irq(zup))
+ return;
+ }
+#endif
+ count = zup->port.fifosize >> 1;
+ do {
+ zx29_uart_putc(&zup->port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ {
+ spin_unlock(&zup->port.lock);
+ //uart_write_wakeup(&zup->port);
+ tasklet_schedule(&zup->write_wakeup);
+ spin_lock(&zup->port.lock);
+ }
+
+ if (uart_circ_empty(xmit))
+ zx29_uart_stop_tx(&zup->port);
+}
+
+#if CONFIG_SERIAL_ZX29_DMA
+
+dma_peripheral_id uart_get_rx_dma_peripheral_id(struct zx29_uart_port *zup)
+{
+ struct uart_port *port = &zup->port;
+ if(port->line < UART0 || port->line > UART4){
+ printk("get_rx_dma_peripheral_id,fail, invalid port->line:%d\n", port->line);
+ }
+ if(port->line == UART0){
+ return DMA_CH_UART0_RX;
+ } else if(port->line == UART1){
+ return DMA_CH_UART1_RX;
+ }else if(port->line == UART2){
+ return DMA_CH_UART2_RX;
+ }
+// else if(port->line == UART3){
+// return DMA_CH_UART3_RX;
+// }else if(port->line == UART4){
+// return DMA_CH_UART4_RX;
+// }
+
+ return DMA_CH_NUM;
+}
+
+/*
+ * We received a transmit interrupt without a pending X-char but with
+ * pending characters.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * false if we want to use PIO to transmit
+ * true if we queued a DMA buffer
+ */
+static bool zx29_dma_tx_irq(struct zx29_uart_port *zup)
+{
+ if (!zup->using_tx_dma)
+ return false;
+
+ /*
+ * If we already have a TX buffer queued, but received a
+ * TX interrupt, it will be because we've just sent an X-char.
+ * Ensure the TX DMA is enabled and the TX IRQ is disabled.
+ */
+ if (zup->dmatx.queued) {
+ zup->dmacr |= UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ zup->imr &= ~UART_TXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ return true;
+ }
+
+ /*
+ * We don't have a TX buffer queued, so try to queue one.
+ * If we successfully queued a buffer, mask the TX IRQ.
+ */
+ if (zx29_uart_dma_tx_chars(zup) > 0) {
+ zup->imr &= ~UART_TXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * Stop the DMA transmit (eg, due to received XOFF).
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void zx29_dma_tx_stop(struct zx29_uart_port *zup)
+{
+ if (zup->dmatx.queued) {
+ zup->dmacr &= ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ }
+}
+
+
+/*
+ * Try to start a DMA transmit, or in the case of an XON/OFF
+ * character queued for send, try to get that character out ASAP.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * false if we want the TX IRQ to be enabled
+ * true if we have a buffer queued
+ */
+static inline bool zx29_dma_tx_start(struct zx29_uart_port *zup)
+{
+ u16 dmacr;
+
+ if (!zup->using_tx_dma)
+ return false;
+
+ if (!zup->port.x_char) {
+ /* no X-char, try to push chars out in DMA mode */
+ bool ret = true;
+
+ if (!zup->dmatx.queued) {
+ if (zx29_uart_dma_tx_chars(zup) > 0) {
+ zup->imr &= ~UART_TXIM;
+ ret = true;
+ } else {
+ zup->imr |= UART_TXIM;
+ ret = false;
+ }
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ } else if (!(zup->dmacr & UART_TXDMAE)) {
+ zup->dmacr |= UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ }
+ return ret;
+ }
+
+ /*
+ * We have an X-char to send. Disable DMA to prevent it loading
+ * the TX fifo, and then see if we can stuff it into the FIFO.
+ */
+ dmacr = zup->dmacr;
+ zup->dmacr &= ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+
+ if (UART_GET_FR(&zup->port) & UART_FR_TXFF) {
+ /*
+ * No space in the FIFO, so enable the transmit interrupt
+ * so we know when there is space. Note that once we've
+ * loaded the character, we should just re-enable DMA.
+ */
+ return false;
+ }
+
+ UART_PUT_CHAR(&zup->port, zup->port.x_char);
+ //writew(uap->port.x_char, uap->port.membase + UART01x_DR);
+ zup->port.icount.tx++;
+ zup->port.x_char = 0;
+
+ /* Success - restore the DMA state */
+ zup->dmacr = dmacr;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ //writew(dmacr, uap->port.membase + UART011_DMACR);
+
+ return true;
+}
+
+/****************************************************************************/
+
+//#if CONFIG_SERIAL_ZX29_DMA
+/*
+ * Flush the transmit buffer.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static void zx29_dma_flush_buffer(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)port;
+ if (!zup->using_tx_dma)
+ return;
+
+ /* Avoid deadlock with the DMA engine callback */
+ //dmaengine_terminate_all(zup->dmatx.chan);
+ if (zup->dmatx.queued) {
+
+ //printk(KERN_INFO "zx29_dma_flush_buffer enter[%s][%d] Port[%d]\n",__func__,__LINE__,port->line);
+ dma_unmap_sg(zup->dmatx.chan->device->dev, &zup->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ zup->dmatx.queued = false;
+ zup->dmacr &= ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ }
+}
+
+static int zx29_dma_rx_trigger_dma(struct zx29_uart_port *zup)
+{
+ struct dma_chan *rxchan = zup->dmarx.chan;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ struct zx29_sgbuf *sgbuf;
+
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ if (!rxchan)
+ {
+ printk("[%s][%d]\n",__func__,__LINE__);
+ return -EIO;
+ }
+
+ /* Start the RX DMA job */
+
+ sgbuf = zup->dmarx.use_buf_b ?
+ &zup->dmarx.sgbuf_b : &zup->dmarx.sgbuf_a;
+ /*
+
+ sgbuf = zup->dmarx.use_buf_b ?
+ &zup->dmarx.sgbuf_a : &zup->dmarx.sgbuf_b;
+ */
+ zup->dmarx.rx_def[zup->dmarx.rx_index].link_addr=0;
+ zup->dmarx.rx_def[zup->dmarx.rx_index].dest_addr=(unsigned int)(sgbuf->dma_addr);
+ zup->dmarx.rx_def[zup->dmarx.rx_index].count=ZX29_DMA_BUFFER_SIZE;//fifo or max buffer?
+ wmb();
+
+ dmaengine_slave_config(rxchan, (struct dma_slave_config*)&zup->dmarx.rx_def[zup->dmarx.rx_index]);
+ desc = rxchan->device->device_prep_interleaved_dma(rxchan,NULL,0);
+
+
+ /*
+ * If the DMA engine is busy and cannot prepare a
+ * channel, no big deal, the driver will fall back
+ * to interrupt mode as a result of this error code.
+ */
+ if (!desc) {
+ printk(KERN_INFO "!!ERROR DESC !!![%s][%d]Port:[%d]\n",__func__,__LINE__,zup->port.line);
+ sc_debug_info_record(MODULE_ID_CAP_UART, "uart%d ERROR DESC \n",zup->port.line);
+ zup->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ //zx29_dma_force_stop(rx_id);
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = uart_dma_rx_callback;
+ desc->callback_param = zup;
+ zup->curr_sg = sgbuf;
+ wmb();
+
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+ atomic_inc(&zup->dmarx.count);
+ zup->dmarx.rx_index = (zup->dmarx.rx_index +1)%UART_DMA_RX_MAX_COUNT;
+ zup->dmacr |= UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ zup->dmarx.running = true;
+ zup->dmarx.used = true;
+ zup->imr &= ~(UART_RXIM | UART_RTIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+
+
+ return 0;
+}
+static int zx29_dma_rx_trigger_dma_use_dma_cyclic(struct zx29_uart_port *zup)
+{
+ struct dma_chan *rxchan = zup->dmarx.chan;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ int uart_id = zup->port.line;
+ if (!rxchan)
+ {
+ printk("[%s][%d]\n",__func__,__LINE__);
+ return -EIO;
+ }
+ dmaengine_slave_config(rxchan, (struct dma_slave_config*)&uart_dma_cycle[uart_id].rxdef);
+ desc = rxchan->device->device_prep_dma_cyclic(rxchan,NULL,(ZX29_DMA_BUFFER_SIZE *5) , ZX29_DMA_BUFFER_SIZE,0,0);
+ if (!desc) {
+ printk(KERN_INFO "!!ERROR DESC !!![%s][%d]Port:[%d]\n",__func__,__LINE__,zup->port.line);
+ zup->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+ desc->callback = uart_dma_rx_callback_use_dma_cyclic;
+ desc->callback_param = zup;
+ wmb();
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+ zup->dmacr |= UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ uart_dma_cycle[uart_id].flg_enter_th = 0;
+ zup->dmarx.running = true;
+ zup->dmarx.used = true;
+ zup->imr &= ~(UART_RXIM | UART_RTIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ return 0;
+}
+
+void uart_dma_rx_callback(void *data)
+{
+ unsigned long flags;
+
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)data;
+ int uart_id = zup->port.line;
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct device *dev = NULL;
+// struct dma_tx_state state;
+ unsigned int ris_status;
+
+ bool lastbuf;
+ int dma_count = 0;
+ struct zx29_sgbuf *sgbuf = zup->curr_sg;
+ size_t pending;
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 4;
+ test_uart_static(zup->port.line, NULL, 0, 20);
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ zx29_dma_stop(rx_id);
+
+ dev = zup->dmarx.chan->device->dev;
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+
+ //spin_lock_irqsave(&zup->port.lock, flags);
+ zup->sg2tty = sgbuf;
+// rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ zup->sg2tty_len = zup->sg2tty->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //zx29_dma_force_stop(rx_id);
+ // dmaengine_terminate_all(rxchan);
+ dmarx->use_buf_b = ! dmarx->use_buf_b;
+ wmb();
+ //BUG_ON(pending > ZX29_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ //dmaengine_terminate_all(rxchan);
+
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ test_uart_static(zup->port.line, NULL, 0, 9);
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+
+ up(&zup->sema);
+}
+
+void uart_dma_rx_callback_use_dma_cyclic(void *data)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)data;
+ unsigned int ris_status;
+ int uart_id = zup->port.line;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ uart_dma_cycle[uart_id].cnt_callback_total++;
+ uart_dma_cycle[uart_id].cnt_callback++;
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ uart_dma_cycle[uart_id].flg_overrun = 1;
+ }
+ if(ris_status & UART_BEIS){
+ uart_dma_cycle[uart_id].flg_be = 1;
+ zup->port.icount.brk++;
+ }
+ if(ris_status & UART_PEIS){
+ uart_dma_cycle[uart_id].flg_pe = 1;
+ zup->port.icount.parity++;
+ }
+ if(ris_status & UART_FEIS){
+ uart_dma_cycle[uart_id].flg_fe = 1;
+ zup->port.icount.frame++;
+ }
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 26);
+ up(&zup->sema_cyclic);
+}
+static inline void zx29_dma_rx_stop(struct zx29_uart_port *zup)
+{
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ //zx29_dma_force_stop(rx_id);
+ //dmaengine_terminate_all(zup->dmarx.chan);
+ /* FIXME. Just disable the DMA enable */
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+#if 0
+ //do we need check data received?
+ if(zup->pre_pending){
+ printk("pre_pending :%d\n ", zup->pre_pending);
+ }
+#endif
+ zup->curr_sg = NULL;
+}
+
+static void zx29_dma_remove(struct zx29_uart_port *zup)
+{
+ /* TODO: remove the initcall if it has not yet executed */
+ if (zup->dmatx.chan)
+ dma_release_channel(zup->dmatx.chan);
+ if (zup->dmarx.chan)
+ dma_release_channel(zup->dmarx.chan);
+}
+
+
+static void zx29_dma_shutdown(struct zx29_uart_port *zup)
+{
+ unsigned long flags;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+
+ if (!(zup->using_tx_dma || zup->using_rx_dma))
+ return;
+ /* Disable RX and TX DMA */
+ while(UART_GET_FR(&zup->port) & (UART_FR_TXBUSY | UART_FR_TXBUSY))
+ barrier();
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ //zx29_dma_force_stop(rx_id);
+ // dmaengine_terminate_all(zup->dmarx.chan);
+ zup->dmacr &= ~(UART_DMAONERR | UART_RXDMAE | UART_TXDMAE);
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->curr_sg = NULL;
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ if (zup->using_tx_dma) {
+ /* In theory, this should already be done by zx29_dma_flush_buffer */
+ dmaengine_terminate_all(zup->dmatx.chan);
+ if (zup->dmatx.queued) {
+ dma_unmap_sg(zup->dmatx.chan->device->dev, &zup->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ zup->dmatx.queued = false;
+ }
+ if(!zup->dmatx.buf)
+ kfree(zup->dmatx.buf);
+ zup->dmatx.buf = NULL;
+ zup->using_tx_dma = false;
+ }
+ if (zup->using_rx_dma) {
+ //dmaengine_terminate_all(zup->dmarx.chan);
+ /* Clean up the RX DMA */
+ if(!zup->uart_power_mode){
+ zx29_sgbuf_free(zup->dmarx.chan, &zup->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+ zx29_sgbuf_free(zup->dmarx.chan, &zup->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ }else if(zup->uart_power_mode == 1){
+ uart_dma_cycle_deinit(zup);
+ }else
+ printk("uart%d dma shutdown fail.\n",zup->port.line);
+ zup->using_rx_dma = false;
+ zup->dmarx.used = false;
+ zup->dmarx.running = false;
+ zup->dmarx.use_buf_b = false;
+ zup->dmarx.rx_index = 0;
+ }
+ zup->pre_pending = 0;
+ zup->work_state = false;
+
+}
+
+static void zx29_shutdown_channel(struct zx29_uart_port *zup,
+ unsigned int lcrh)
+{
+ unsigned long val;
+
+ val = UART_GET_LCRH(&zup->port);
+ val &= ~(UART_LCRH_BRK | UART_LCRH_FEN);
+ UART_PUT_LCRH(&zup->port, val);
+}
+
+
+static inline bool zx29_dma_rx_available(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma;
+}
+
+static inline bool zx29_dma_rx_running(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma && zup->dmarx.running;
+}
+
+static inline bool zx29_dma_rx_used(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma && zup->dmarx.used;
+}
+
+static inline bool zx29_dma_rx_work_scheduled(struct zx29_uart_port *zup)
+{
+ return zup->using_rx_dma && zup->work_state;
+}
+
+
+void uart_dma_tx_callback(void *data)
+{
+ struct zx29_uart_port *zup = data;
+ struct zx29_dmatx_data *dmatx = &zup->dmatx;
+
+ unsigned long flags;
+ u16 dmacr;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ if (zup->dmatx.queued)
+ dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+ DMA_TO_DEVICE);
+
+ dmacr = zup->dmacr;
+ zup->dmacr = dmacr & ~UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+
+ /*
+ * If TX DMA was disabled, it means that we've stopped the DMA for
+ * some reason (eg, XOFF received, or we want to send an X-char.)
+ *
+ * Note: we need to be careful here of a potential race between DMA
+ * and the rest of the driver - if the driver disables TX DMA while
+ * a TX buffer completing, we must update the tx queued status to
+ * get further refills (hence we check dmacr).
+ */
+ if (!(dmacr & UART_TXDMAE) || uart_tx_stopped(&zup->port) ||
+ uart_circ_empty(&zup->port.state->xmit)) {
+ zup->dmatx.queued = false;
+
+
+ zx_cpuidle_set_free(IDLE_FLAG_UART);
+
+
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return;
+ }
+
+ if (zx29_uart_dma_tx_chars(zup) <= 0) {
+ /*
+ * We didn't queue a DMA buffer for some reason, but we
+ * have data pending to be sent. Re-enable the TX IRQ.
+ */
+ zup->imr |= UART_TXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+}
+
+static int zx29_uart_dma_tx_chars(struct zx29_uart_port *zup)
+{
+ struct zx29_dmatx_data *dmatx = &zup->dmatx;
+ struct dma_chan *tx_chan = dmatx->chan;
+ struct dma_device *dma_dev = tx_chan->device;
+ struct dma_async_tx_descriptor *desc;
+ struct circ_buf *xmit = &zup->port.state->xmit;
+ unsigned int count;
+
+ /*
+ * Try to avoid the overhead involved in using DMA if the
+ * transaction fits in the first half of the FIFO, by using
+ * the standard interrupt handling. This ensures that we
+ * issue a uart_write_wakeup() at the appropriate time.
+ */
+
+ count = uart_circ_chars_pending(xmit);
+ if (count < (16 >> 1)) {
+ zup->dmatx.queued = false;
+ return 0;
+ }
+
+ if (xmit->tail < xmit->head)
+ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
+ else {
+ size_t first = UART_XMIT_SIZE - xmit->tail;
+ size_t second ;//= xmit->head;
+
+ if (first > count)
+ first = count;
+ second = count - first;
+ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
+ if (second)
+ memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+ }
+ dmatx->sg.length = count;
+
+ if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ zup->dmatx.queued = false;
+ dev_dbg(zup->port.dev, "unable to map TX DMA\n");
+ return -EBUSY;
+ }
+
+
+ zup->dmatx.tx_def.link_addr=0;
+ zup->dmatx.tx_def.src_addr=(unsigned int)(dmatx->sg.dma_address);
+ zup->dmatx.tx_def.count=count;
+ wmb();
+ dmaengine_slave_config(tx_chan, (struct dma_slave_config*)&zup->dmatx.tx_def);
+ desc = tx_chan->device->device_prep_interleaved_dma(tx_chan,NULL,0);
+
+ if (!desc) {
+ printk(KERN_INFO "!!!!!ERROR TX DESC[%s][%d]\n",__func__,__LINE__);
+ dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ zup->dmatx.queued = false;
+ /*
+ * If DMA cannot be used right now, we complete this
+ * transaction via IRQ and let the TTY layer retry.
+ */
+ dev_dbg(zup->port.dev, "TX DMA busy\n");
+ return -EBUSY;
+ }
+ desc->callback = (dma_async_tx_callback)uart_dma_tx_callback;
+ desc->callback_param = (void *)zup;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(tx_chan);
+ atomic_inc(&zup->dmatx.count);
+ zup->dmacr |= UART_TXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zup->dmatx.queued = true;
+
+ /*
+ * Now we know that DMA will fire, so advance the ring buffer
+ * with the stuff we just dispatched.
+ */
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ zup->port.icount.tx += count;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ //uart_write_wakeup(&zup->port);
+ tasklet_schedule(&zup->write_wakeup);
+
+ return 1;
+}
+
+static void zx29_uart_dma_rx_chars(struct zx29_uart_port *zup,
+ //u32 pending, bool use_buf_b,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ bool readfifo, unsigned long *flags)
+{
+ struct tty_struct *tty = zup->port.state->port.tty;
+#if 0
+ struct zx29_sgbuf *sgbuf = use_buf_b ?
+ &zup->dmarx.sgbuf_b : &zup->dmarx.sgbuf_a;
+#endif
+ struct device *dev = zup->dmarx.chan->device->dev;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+ //unsigned long flags;
+
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ /*
+ * First take all chars in the DMA pipe, then look in the FIFO.
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ sgbuf->buf, pending);
+
+ test_uart_static(zup->port.line, sgbuf->buf, pending, 6);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+ * been taken first.
+ */
+ //if (dma_count == pending && readfifo) {
+ if (readfifo) {
+ /* Clear any error flags */
+ //UART_PUT_ICR(&zup->port,UART_OEIC | UART_BEIC | UART_PEIC | UART_FEIC);
+ /*
+ * If we read all the DMA'd characters, and we had an
+ * incomplete buffer, that could be due to an rx error, or
+ * maybe we just timed out. Read any pending chars and check
+ * the error status.
+ *
+ * Error conditions will only occur in the FIFO, these will
+ * trigger an immediate interrupt and stop the DMA job, so we
+ * will always find the error in the FIFO, never in the DMA
+ * buffer.
+ */
+ test_uart_static(zup->port.line, NULL, 0, 7);
+ fifotaken = zx29_uart_fifo_to_tty(zup);
+ }
+ if((pending > 0) || (fifotaken > 0)) {
+ spin_unlock(&zup->port.lock);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock(&zup->port.lock);
+ }
+}
+static void zx29_uart_deal_dma_fifo_rx_chars_cyclic(struct zx29_uart_port *zup,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ unsigned long *flags, char *fifo_buf, int fifo_len)
+{
+ struct tty_struct *tty = zup->port.state->port.tty;
+ struct device *dev = zup->dmarx.chan->device->dev;
+ int dma_count = 0;
+ int fifo_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+ if ((pending) && (pending != 4096)) {
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ sgbuf->buf, pending);
+ test_uart_static(zup->port.line, sgbuf->buf, pending, 6);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+ if(fifo_len){
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ fifo_count = tty_insert_flip_string(&zup->port.state->port,
+ fifo_buf, fifo_len);
+ fifo_buf[0] = '\0';
+ fifo_buf[1] = '\0';
+ fifo_buf[2] = '\0';
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ }
+ zup->port.icount.rx += fifo_count;
+ if(((pending) && (pending != 4096)) || (fifo_len > 0)){
+ spin_unlock(&zup->port.lock);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock(&zup->port.lock);
+ }
+}
+
+static void zx29_uart_deal_dma_fifo_rx_chars(struct zx29_uart_port *zup,
+ u32 pending, struct zx29_sgbuf *sgbuf,
+ unsigned long *flags, char *fifo_buf, int fifo_len)
+{
+ struct tty_struct *tty = zup->port.state->port.tty;
+
+ struct device *dev = zup->dmarx.chan->device->dev;
+ int dma_count = 0;
+ int fifo_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+ if (pending) {
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ dma_count = tty_insert_flip_string(&zup->port.state->port,
+ sgbuf->buf, pending);
+ test_uart_static(zup->port.line, sgbuf->buf, pending, 6);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ spin_unlock_irqrestore(&zup->port.lock, *flags);
+ zup->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_info(zup->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ if(fifo_len){
+ //printk("qq >> fifo len %d.\n",fifo_len);
+ fifo_count = tty_insert_flip_string(&zup->port.state->port,
+ fifo_buf, fifo_len);
+ //printk("qq >>fifo count %d,buf is %x %x %x .\n",fifo_count, fifo_buf[0],fifo_buf[1],fifo_buf[2]);
+ fifo_buf[0] = '\0';
+ fifo_buf[1] = '\0';
+ fifo_buf[2] = '\0';
+ //memset(fifo_buf, '\0', 4);
+ }
+
+ zup->port.icount.rx += fifo_count;
+ test_uart_static(zup->port.line, fifo_buf, fifo_count, 18);
+ if(pending > 0 || (fifo_len > 0)){
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock_irqsave(&zup->port.lock, *flags);
+ }
+}
+
+#if 0
+static void zx29_dma_rx_irq(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct zx29_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+
+ uint32_t ris_status = UART_GET_RIS(&zup->port);
+ //printk("rx irq\n");
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+
+ if(zx29_dma_rx_running(zup)){
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the block, else we may
+ * overflow the FIFO.
+ */
+ // if(zx29_dma_stop(rx_id))
+ // printk( "uart%d unable to pause DMA transfer\n", zup->port.line);
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ //zx29_dma_force_stop(rx_id);
+ //dmaengine_terminate_all(rxchan);
+
+ //dmastat = zx29_dma_get_status();//Normally,this value is insignificance.
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);//state.residue;
+ BUG_ON(pending > ZX29_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ //dmaengine_terminate_all(rxchan);
+
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, false, flags);
+ }
+
+ /* Switch buffer & re-trigger DMA job */
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("uart%d could not retrigger RX DMA job\n",zup->port.line);
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ }
+#if RX_DMA_WORK
+ //printk("add timer\n");
+ else{
+ // mod_timer(&(zup->rx_dma_timer), jiffies + msecs_to_jiffies(RX_DMA_TIMEOUT));
+ uart_mod_timer(zup, flags);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+#endif
+
+}
+#endif
+/****************************************************************************/
+static void zx29_uart_rx_dma_chars(struct zx29_uart_port *zup, unsigned long *flags)
+{
+
+ struct tty_struct *tty = zup->port.state->port.tty;
+ //zx29_uart_fifo_to_tty(zup);
+// spin_unlock(&zup->port.lock);
+ if (zx29_dma_rx_available(zup)) {
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM | UART_RTIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }
+#if RX_DMA_WORK
+//printk("add timer\n");
+ else{
+ //mod_timer(&(zup->rx_dma_timer), jiffies + msecs_to_jiffies(RX_DMA_TIMEOUT));
+ uart_mod_timer(zup, flags);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+#endif
+ }
+
+ //tty_flip_buffer_push(tty);
+ //spin_lock(&zup->port.lock);
+}
+
+
+/****************************************************************************/
+static void zx29_uart_rx_timeout_chars(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ int rt_cnt = 0;
+// unsigned long flags;
+
+ int fr = UART_GET_FR(&zup->port);
+ //printk("rx_timeout_chars\n");
+
+ rt_cnt = zx29_uart_fifo_to_tty(zup);
+ if(rt_cnt){
+ if(g_console_open_flag == 1 || zup->port.line != DEBUG_CONSOLE){
+ spin_unlock(&zup->port.lock);
+ tty_flip_buffer_push(&zup->port.state->port);
+ spin_lock(&zup->port.lock);
+ }
+ }
+}
+
+static void zx29_uart_rt_dma(struct zx29_uart_port *zup, unsigned long *flags)
+{
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct zx29_sgbuf *sgbuf = zup->curr_sg;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ uint32_t ris_status = UART_GET_RIS(&zup->port);
+
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //printk("---zx29_uart_rt_dma, pending:%d, residue:%d\n", pending, state.residue);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ // if(!uart_console(&zup->port))
+ //BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+
+ if(zx29_dma_rx_running(zup)){
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the block, else we may
+ * overflow the FIFO.
+ */
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ //printk( "uart%d unable to pause DMA transfer\n", zup->port.line);
+ //dmastat = rxchan->device->device_tx_status(rxchan,
+ // dmarx->cookie, &state);
+ // dmastat = zx29_dma_get_status();//Normally,this value is insignificance.
+
+ //zx29_dma_force_stop(rx_id);
+ //dmaengine_terminate_all(rxchan);
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ zup->curr_sg = zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);//state.residue;
+
+ //printk("---zx29_uart_rt_dma, after stop pending:%d, residue:%d\n", pending, state.residue);
+ BUG_ON(pending > ZX29_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ //dmaengine_terminate_all(rxchan);
+
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ test_uart_static(zup->port.line, NULL, 0, 5);
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, true, flags);
+ }
+
+#if 0
+//printk("rt dma\n");
+ /* Switch buffer & re-trigger DMA job */
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("zx29_dma_rx_trigger_dma fail,uart:%d\n", zup->port.line);
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ }
+#if RX_DMA_WORK
+ //printk("add timer\n");
+ else{
+ //mod_timer(&(zup->rx_dma_timer), jiffies + msecs_to_jiffies(RX_DMA_TIMEOUT));
+ uart_mod_timer(zup, flags);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ zup->dmarx.used = true;
+ }
+#endif
+
+#endif
+
+}
+char g_fifo_residue_buf[5][4];
+char g_fifo_residue_all[5][20];
+unsigned char g_fifo_cnt[5];
+static void zx29_uart_rx_dma_timeout(struct timer_list *t)
+{
+ struct zx29_uart_port *zup = from_timer(zup, t, rx_dma_timer);
+
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ static bool dma_timeout_flag = false;
+ size_t pending, tmp_len;
+ uint32_t ris_status = 0;
+ int cancel_timer = 0;
+ int sg_idx = (dmarx->use_buf_b ? 1 : 0);
+
+ unsigned long flags;
+ struct zx29_sgbuf *sgbuf = NULL;
+ int uart_id = zup->port.line;
+ if(!zx29_dma_rx_running(zup))
+ //printk("---uart_rx_dma_timeout enter, dma stopped\n");
+ return;
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ if(zup->port_close || (zup->curr_sg == NULL)){
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return;
+ }
+ //rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ if(zup->sg2tty) {//dma complete now, later check again
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 14);
+ mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ return;
+ }
+ sgbuf = zup->curr_sg;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ //printk("---uart_rx_dma_timeout enter,sg.length:%d, pending:%d, state.residue:%d\n", sgbuf->sg.length, pending, state.residue);
+ if(pending == zup->pre_pending){
+ int fr = UART_GET_FR(&zup->port);
+ //if RXBUSY,means data come again
+
+ if((fr & UART_FR_RXBUSY)){
+
+ uart_mod_timer(zup, &flags);
+ test_uart_static(zup->port.line, NULL, 0, 12);
+ goto deal_end;
+
+ }
+
+ ris_status = UART_GET_RIS(&zup->port);
+
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 1;
+ test_uart_static(zup->port.line, NULL, 0, 19);
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ tmp_len = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(tmp_len != pending){
+ pending = tmp_len;
+ }
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ if(zup->uart_power_mode){
+ int i;
+ for(i= 0;i < 3;i++){
+ fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXFE) == 0){
+ g_fifo_residue_buf[uart_id][i] = UART_GET_CHAR(&zup->port) | UART_DUMMY_DR_RX;
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++] = g_fifo_residue_buf[uart_id][i];
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ else
+ break;
+ }
+ if(i){
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++]=i;
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+
+ //zup->sg2tty = sgbuf;
+ //when app ctrl sleep ,always start dma receive
+ if(zup->sleep_state == 0){
+ //now start dma again
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ uart_mod_timer(zup, &flags);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ }
+ if(pending || (i > 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_deal_dma_fifo_rx_chars(zup, pending, sgbuf, &flags, g_fifo_residue_buf[uart_id],i);
+ }
+
+ }else{
+ //for normal mode, dma start only on rx busy after timeout came
+ if(pending || (( fr & UART_FR_RXFE) == 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, true, &flags);
+ }
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ zup->pre_pending = 0;
+ zup->work_state = false;
+ if((UART_GET_RIS(&zup->port) & (UART_RXIS | UART_RTIS)) ||
+ (UART_GET_FR(&zup->port) & UART_FR_RXBUSY)){
+ zup->imr &= ~(UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ uart_mod_timer(zup, &flags);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ UART_PUT_ICR(&zup->port,(UART_RTIS|UART_RXIS));
+ }
+ }
+
+ }
+deal_end:
+
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ }else{
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ zup->pre_pending = pending;
+ mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ //uart_mod_timer(zup, &flags);
+ }
+
+
+}
+enum hrtimer_restart zx29_uart_rx_dma_hrtimeout(struct hrtimer *t)
+{
+ struct zx29_uart_port *zup = from_timer(zup, t, rx_dma_hrtimer);
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ static bool dma_timeout_flag = false;
+ size_t pending, tmp_len;
+ uint32_t ris_status = 0;
+ int cancel_timer = 0;
+ int sg_idx = (dmarx->use_buf_b ? 1 : 0);
+ int uart_id = zup->port.line;
+ unsigned long flags;
+ struct zx29_sgbuf *sgbuf = NULL;
+ if(!zx29_dma_rx_running(zup))
+ return HRTIMER_NORESTART;
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ if(zup->port_close || (zup->curr_sg == NULL)){
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_NORESTART;
+ }
+ if(zup->sg2tty) {//dma complete now, later check again
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 14);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ if(zup->enter_suspend){
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 15);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ sgbuf = zup->curr_sg;
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if((pending == zup->pre_pending)) {
+ int fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXBUSY)){
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, 0, 12);
+ goto deal_end;
+ }
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 1;
+ test_uart_static(zup->port.line, NULL, 0, 19);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ tmp_len = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(tmp_len != pending){
+ pending = tmp_len;
+ }
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ if(zup->uart_power_mode){
+ int i;
+ for(i= 0;i < 3;i++){
+ fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXFE) == 0){
+ g_fifo_residue_buf[uart_id][i] = UART_GET_CHAR(&zup->port) | UART_DUMMY_DR_RX;
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++] = g_fifo_residue_buf[uart_id][i];
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ else
+ break;
+ }
+ if(i){
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++]=i;
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ if(zup->sleep_state == 0){
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ }
+ if(pending || (i > 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_deal_dma_fifo_rx_chars(zup, pending, sgbuf, &flags, g_fifo_residue_buf[uart_id],i);
+ }
+ }else{
+ if(pending || (( fr & UART_FR_RXFE) == 0)){
+ test_uart_static(zup->port.line, NULL, 0, 13);
+ zx29_uart_dma_rx_chars(zup, pending, sgbuf, true, &flags);
+ printk("at pending %d.\n",pending);
+ }
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ zup->pre_pending = 0;
+ zup->work_state = false;
+ if((UART_GET_RIS(&zup->port) & (UART_RXIS | UART_RTIS)) ||
+ (UART_GET_FR(&zup->port) & UART_FR_RXBUSY)){
+ zup->imr &= ~(UART_RXIM);
+ UART_PUT_IMSC(&zup->port, zup->imr);
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ UART_PUT_ICR(&zup->port,(UART_RTIS|UART_RXIS));
+ }
+ }
+ }
+deal_end:
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }else{
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ zup->pre_pending = pending;
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, zup->pre_pending, 22);
+ return HRTIMER_RESTART;
+ }
+}
+enum hrtimer_restart zx29_uart_rx_dma_hrtimeout_cyclic(struct hrtimer *t)
+{
+ struct zx29_uart_port *zup = from_timer(zup, t, rx_dma_hrtimer);
+ struct zx29_dmarx_data *dmarx = &zup->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ size_t pending, tmp_len;
+ uint32_t ris_status = 0;
+ unsigned long flags;
+ struct zx29_sgbuf *sgbuf = NULL;
+ int uart_id = zup->port.line;
+ if(!zx29_dma_rx_running(zup))
+ return HRTIMER_NORESTART;
+ if(uart_dma_cycle[uart_id].cnt_callback > 0){
+ return HRTIMER_NORESTART;
+ }
+
+ spin_lock_irqsave(&zup->port.lock, flags);
+ sgbuf = &uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_th];
+ if(zup->port_close || (sgbuf == NULL)){
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }
+ if(zup->sema_cyclic.count > 0){
+ printk("uart has th not deal.\n");
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }
+ if((zup->sg2tty)){//dma not complete now, later check again
+ printk("dmath_cyclic not end.\n");
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 14);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ if(zup->enter_suspend){
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ test_uart_static(zup->port.line, NULL, 0, 15);
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ return HRTIMER_RESTART;
+ }
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ pending = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(((pending == zup->pre_pending) && pending) || uart_dma_cycle[uart_id].from_resume){
+ uart_dma_cycle[uart_id].from_resume = 0;
+#if 0
+ if(uart_dma_cycle[uart_id].flg_enter_th == 0)
+ uart_dma_cycle[uart_id].flg_enter_to = 4;
+ else
+ uart_dma_cycle[uart_id].flg_enter_to = uart_dma_cycle[uart_id].flg_enter_th - 1;
+ struct zx29_sgbuf *sgbuf_tmp = NULL;
+ sgbuf_tmp = &uart_dma_cycle[uart_id].sgbuf[uart_dma_cycle[uart_id].flg_enter_to];
+ test_uart_static(zup->port.line, NULL, 0, 61);
+ if (sgbuf->sg.dma_address != (zx29_dma_cur_dst(rx_id)&0xfffff000)){
+ if(sgbuf_tmp->sg.dma_address != ((zx29_dma_cur_dst(rx_id)&0xfffff000)-0x1000)){
+ printk("uart lose dma isr enter self resume.\n");
+ up(&zup->sema_cyclic);
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return;
+ }
+ }
+ #endif
+ int fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXBUSY)){
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, 0, 12);
+ goto deal_end;
+ }
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ uart_dma_cycle[uart_id].flg_overrun = 1;
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ printk("error in uart%d: fe %u ,be %u pe %u.\n",zup->port.line,zup->port.icount.frame,
+ zup->port.icount.brk,zup->port.icount.parity);
+ }
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ dmaengine_terminate_all(rxchan);
+ test_uart_static(zup->port.line, NULL, 0, 60);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ wmb();
+ int i = 0;
+ for(i= 0;i < 3;i++){
+ fr = UART_GET_FR(&zup->port);
+ if((fr & UART_FR_RXFE) == 0){
+ g_fifo_residue_buf[uart_id][i] = UART_GET_CHAR(&zup->port) | UART_DUMMY_DR_RX;
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++] = g_fifo_residue_buf[uart_id][i];
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ else
+ break;
+ }
+ if(i){
+ g_fifo_residue_all[uart_id][g_fifo_cnt[uart_id]++]=i;
+ if(g_fifo_cnt[uart_id] >= 20) g_fifo_cnt[uart_id] = 0;
+ }
+ if (zx29_dma_rx_trigger_dma_use_dma_cyclic(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ }
+ if((pending && (pending != 4096)) || (i > 0)){
+ zx29_uart_deal_dma_fifo_rx_chars_cyclic(zup, pending, sgbuf, &flags, g_fifo_residue_buf[uart_id],i);
+ }
+ uart_dma_cycle[uart_id].cnt_th = 0;
+ uart_dma_cycle[uart_id].cnt_callback=0;
+deal_end:
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ return HRTIMER_RESTART;
+ }else{
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+ zup->pre_pending = pending;
+ hrtimer_forward_now(&zup->rx_dma_hrtimer, g_hr_interval);
+ test_uart_static(zup->port.line, NULL, zup->pre_pending, 22);
+ return HRTIMER_RESTART;
+ }
+}
+#endif
+
+
+static void zx29_uart_modem_status(struct zx29_uart_port *zup)
+{
+ unsigned int status, delta;
+
+ status = UART_GET_FR(&zup->port)& UART_FR_MODEM_ANY;
+
+ delta = status ^ zup->old_status;
+ zup->old_status = status;
+
+ if (!delta)
+ return;
+
+ if (delta & UART_FR_DCD)
+ uart_handle_dcd_change(&zup->port, status & UART_FR_DCD);
+
+ if (delta & UART_FR_DSR)
+ zup->port.icount.dsr++;
+
+ if (delta & UART_FR_CTS)
+ uart_handle_cts_change(&zup->port, status & UART_FR_CTS);
+
+ wake_up_interruptible(&zup->port.state->port.delta_msr_wait);
+}
+
+/****************************************************************************/
+static irqreturn_t zx29_uart_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ unsigned int status,ris, pass_counter = 256;
+ int handled = 0;
+ int uart_id = zup->port.line;
+ spin_lock_irqsave(&zup->port.lock, flags);
+ status = UART_GET_MIS(port) & zup->imr;
+ ris = UART_GET_RIS(port);
+ if (status) {
+ do {
+ UART_PUT_ICR(port,(status & ~(UART_TXIS|UART_RTIS|UART_RXIS)));
+ if(uart_console(&zup->port)){
+ if (status & (UART_RTIS|UART_RXIS))
+ zx29_uart_rx_chars(zup);
+ }else{
+#ifdef CONFIG_CPU_IDLE
+ zup->rxd_int_depth = 0;
+#endif
+ if (status & (UART_RXIS)){
+#if CONFIG_SERIAL_ZX29_DMA
+ if(ris & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun[uart_id] = 8;
+ test_uart_static(zup->port.line, NULL, 0, 21);
+ //if(!uart_console(&zup->port))
+ // BUG_ON(1);
+ }
+ if (zx29_dma_rx_used(zup)){
+ UART_PUT_ICR(port,UART_RXIS);
+ if(!(zup->imr & UART_RTIM)){
+ zup->imr |= UART_RTIM;
+ UART_PUT_IMSC(port,zup->imr);
+ }
+
+ test_uart_static(port->line, NULL, 0, 8);
+ uart_mod_timer(zup, &flags);
+
+ }else{
+ test_uart_static(port->line, NULL, 0, 1);
+
+ zup->imr &= ~UART_RXIM;
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ zx29_uart_rx_dma_chars(zup, &flags);
+
+ zup->dmarx.used = true;
+ //when RX&RT comes both, we trigger dma and add timer,so clear RT,waiting the timer
+ if(status & (UART_RTIS))
+ status &= ~UART_RTIS;
+ }
+#else
+ zx29_uart_rx_chars(zup);
+#endif
+ }
+
+ if (status & (UART_RTIS)){
+#if CONFIG_SERIAL_ZX29_DMA
+ if(!zx29_dma_rx_running(zup)){
+ test_uart_static(port->line, NULL, 0, 2);
+ zx29_uart_rx_timeout_chars(zup, &flags);
+ }else{
+ UART_PUT_ICR(port, UART_RTIS);
+ test_uart_static(port->line, NULL, 0, 4);
+ zx29_uart_rt_dma(zup, &flags);
+ }
+#else
+ zx29_uart_rx_chars(zup);
+#endif
+ }
+ }
+
+ if (status & (UART_DSRMIS|UART_DCDMIS|UART_CTSMIS|UART_RIMIS))
+ zx29_uart_modem_status(zup);
+
+ if (status & UART_TXIS)
+ zx29_uart_tx_chars(zup);
+
+ if (pass_counter-- == 0)
+ break;
+
+ status = UART_GET_MIS(port);
+ } while (status != 0);
+ handled = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+#if CONFIG_SERIAL_ZX29_DMA
+extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
+static void uart_dma_init(struct zx29_uart_port *zup)
+{
+ int i=0;
+ struct dma_chan *chan = NULL;
+
+ atomic_set(&zup->dmarx.count, 1);
+ atomic_set(&zup->dmatx.count, 1);
+#if 1
+ if(zup->port.line == UART0)
+ {
+ zup->dmatx.tx_def.dest_addr = (unsigned int)(ZX_UART0_BASE +zx29_UART_DR);
+ }
+ else if(zup->port.line == UART1)
+ {
+ zup->dmatx.tx_def.dest_addr = (unsigned int)(ZX_UART1_BASE+zx29_UART_DR);
+ }
+ else if(zup->port.line == UART2)
+ {
+ zup->dmatx.tx_def.dest_addr = (unsigned int)(ZX_UART2_BASE+zx29_UART_DR);
+ }
+
+ zup->dmatx.tx_def.dma_control.tran_mode = TRAN_MEM_TO_PERI;
+ zup->dmatx.tx_def.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ zup->dmatx.tx_def.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmatx.tx_def.dma_control.src_burst_len = DMA_BURST_LEN_4;
+ zup->dmatx.tx_def.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmatx.tx_def.dma_control.dest_burst_len = DMA_BURST_LEN_4;
+
+ dma_cap_mask_t mask;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ if(zup->port.line == UART0)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART0_TX);
+ }
+ else if(zup->port.line == UART1)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART1_TX);
+ }
+ else if(zup->port.line == UART2)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART2_TX);
+ }
+ if(!chan){
+ printk("UART%d DMA TX channel request fail.\n", zup->port.line);
+ return;
+ }
+ zup->dmatx.chan = chan;
+
+
+
+ for(i=0;i<UART_DMA_RX_MAX_COUNT;i++)
+ {
+ if(zup->port.line == UART0)
+ {
+ zup->dmarx.rx_def[i].src_addr = ZX_UART0_BASE+zx29_UART_DR;
+ }
+ else if(zup->port.line == UART1)
+ {
+ zup->dmarx.rx_def[i].src_addr = ZX_UART1_BASE+zx29_UART_DR;
+ }
+ else if(zup->port.line == UART2)
+ {
+ zup->dmarx.rx_def[i].src_addr = ZX_UART2_BASE+zx29_UART_DR;
+ }
+
+ zup->dmarx.rx_def[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
+ zup->dmarx.rx_def[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ zup->dmarx.rx_def[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmarx.rx_def[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
+ zup->dmarx.rx_def[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ zup->dmarx.rx_def[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ }
+
+ zup->dmarx.rx_index = 0;
+ chan = NULL;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ if(zup->port.line == UART0)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART0_RX);
+ }
+ else if(zup->port.line == UART1)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART1_RX);
+ }
+ else if(zup->port.line == UART2)
+ {
+ chan = dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_UART2_RX);
+ }
+ if(!chan){
+ printk("UART%d DMA RX channel request fail.\n", zup->port.line);
+ return;
+ }
+ zup->dmarx.chan = chan;
+#endif
+}
+
+static int uart_dma_cycle_init(struct zx29_uart_port *zup)
+{
+ int ret;
+ int uart_id = zup->port.line;
+ uart_dma_cycle[uart_id].id = zup->port.line;
+ int i,j;
+ for(i=0;i<UART_DMA_CYCLE_RX_CONFIG_COUNT;i++){
+ ret = zx29_sgbuf_init(zup->dmarx.chan, &uart_dma_cycle[uart_id].sgbuf[i],DMA_FROM_DEVICE);
+ if(ret){
+ printk( "init uart_dma_cycle sgbuf failed,uart: %d,ret:%d\n", zup->port.line, ret);
+ for(j=0;j<i;j++){
+ zx29_sgbuf_free(zup->dmarx.chan, &uart_dma_cycle[uart_id].sgbuf[j],DMA_FROM_DEVICE);
+ }
+ return -1;
+ }
+ }
+ for(i=0;i<UART_DMA_CYCLE_RX_CONFIG_COUNT;i++){
+ if(zup->port.line == UART0)
+ uart_dma_cycle[uart_id].rxdef[i].src_addr = ZX_UART0_BASE+zx29_UART_DR;
+ else if(zup->port.line == UART1)
+ uart_dma_cycle[uart_id].rxdef[i].src_addr = ZX_UART1_BASE+zx29_UART_DR;
+ else{
+ uart_dma_cycle[uart_id].rxdef[i].src_addr = ZX_UART2_BASE+zx29_UART_DR;
+ }
+ uart_dma_cycle[uart_id].rxdef[i].dest_addr = (unsigned int)(uart_dma_cycle[uart_id].sgbuf[i].dma_addr);
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
+ uart_dma_cycle[uart_id].rxdef[i].count = ZX29_DMA_BUFFER_SIZE;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
+ uart_dma_cycle[uart_id].rxdef[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+ uart_dma_cycle[uart_id].rxdef[i].link_addr = 1;
+ }
+ return 0;
+}
+static void uart_dma_cycle_deinit(struct zx29_uart_port *zup)
+{
+ int i;
+ int uart_id = zup->port.line;
+ for(i=0;i<UART_DMA_CYCLE_RX_CONFIG_COUNT;i++){
+ zx29_sgbuf_free(zup->dmarx.chan, &uart_dma_cycle[uart_id].sgbuf[i],DMA_FROM_DEVICE);
+ }
+ memset(uart_dma_cycle[uart_id].rxdef, 0, sizeof(uart_dma_cycle[uart_id].rxdef));
+}
+static void uart_dma_startup(struct zx29_uart_port *zup)
+{
+ int ret = 0;
+ if (!zup->dmatx.chan)
+ {
+ printk("tx_chan is error[%s][%d]\n",__func__,__LINE__);
+ return;
+ }
+
+ zup->dmatx.buf = kmalloc(ZX29_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
+ if (!zup->dmatx.buf) {
+ printk("tx_buf is error[%s][%d]\n",__func__,__LINE__);
+ return;
+ }
+
+ sg_init_one(&zup->dmatx.sg, zup->dmatx.buf, ZX29_DMA_BUFFER_SIZE);
+
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ zup->port.fifosize = 16;//ZX29_DMA_BUFFER_SIZE;
+ zup->using_tx_dma = true;
+
+ if(!zup->uart_power_mode)
+ {
+ if (!zup->dmarx.chan)
+ {
+ printk(KERN_INFO "[%s][%d]uart_%d rx_chan is error\n",__func__,__LINE__, zup->port.line);
+ goto skip_rx;
+ }
+
+ /* Allocate and map DMA RX buffers */
+ ret = zx29_sgbuf_init(zup->dmarx.chan, &zup->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ printk(KERN_INFO "[%s][%d] uart_%d rx_buf_a is error\n",__func__,__LINE__, zup->port.line);
+ //dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ // "RX buffer A", ret);
+ goto skip_rx;
+ }
+
+ ret = zx29_sgbuf_init(zup->dmarx.chan, &zup->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ printk( "failed to init DMA uart: %d RX buffer B ,ret:%d\n", zup->port.line, ret);
+ zx29_sgbuf_free(zup->dmarx.chan, &zup->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+
+ zup->using_rx_dma = true;
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->curr_sg = NULL;
+#if RX_DMA_WORK
+ timer_setup(&(zup->rx_dma_timer), zx29_uart_rx_dma_timeout, 0);
+ hrtimer_init(&zup->rx_dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ zup->rx_dma_hrtimer.function = zx29_uart_rx_dma_hrtimeout;
+ g_hr_interval = ktime_set(0, 1500000);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ zup->dmarx.use_buf_b = false;
+ zup->dmarx.rx_index = 0;
+
+ zup->pre_pending = 0;
+ zup->work_state = false;
+
+ zup->dma_compl_th = kthread_run(dma_complete_thread, zup, "uart_dma_compl");
+ BUG_ON(IS_ERR(zup->dma_compl_th));
+#endif
+
+skip_rx:
+
+ /* Turn on DMA error (RX/TX will be enabled on demand) */
+ printk("uart_dma_startup, port:%d, ret:%d\n", zup->port.line,ret );
+ zup->dmacr &= ~UART_DMAONERR;
+ //zup->dmacr |= UART_DMAONERR;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ if(zup->uart_power_mode){
+ if (zup->using_rx_dma) {
+ //printk(KERN_INFO "[%s][%d]\n",__func__,__LINE__);
+ if (zx29_dma_rx_trigger_dma(zup)){
+ dev_dbg(zup->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }else{
+ mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+ }
+ }
+ }
+ else if(zup->uart_power_mode == 1)
+ {
+ ret = uart_dma_cycle_init(zup);
+ if(ret){
+ printk("uart%d dma cycle init failed,ret %d.\n",zup->port.line,ret);
+ return;
+ }
+ zup->using_rx_dma = true;
+ zup->sg2tty = NULL;
+ zup->sg2tty_len = 0;
+ zup->curr_sg = NULL;
+#if RX_DMA_WORK
+ hrtimer_init(&zup->rx_dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ zup->rx_dma_hrtimer.function = zx29_uart_rx_dma_hrtimeout_cyclic;
+ g_hr_interval = ktime_set(0, 1500000);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ zup->dmarx.use_buf_b = false;
+ zup->dmarx.rx_index = 0;
+ zup->pre_pending = 0;
+ zup->work_state = false;
+ sema_init(&zup->sema_cyclic, 0);
+ zup->dma_compl_th = kthread_run(dma_complete_thread_use_dma_cyclic, zup, "uart_dma_th_cyc");
+ BUG_ON(IS_ERR(zup->dma_compl_th));
+#endif
+ printk("uart_dma_startup, port:%d, ret:%d\n", zup->port.line,ret );
+ zup->dmacr &= ~UART_DMAONERR;
+ UART_PUT_DMACR(&zup->port, zup->dmacr);
+ if(zup->uart_power_mode){
+ if (zup->using_rx_dma) {
+ if (zx29_dma_rx_trigger_dma_use_dma_cyclic(zup)){
+ dev_dbg(zup->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }else{
+ hrtimer_start(&zup->rx_dma_hrtimer, g_hr_interval, HRTIMER_MODE_REL);
+ //mod_timer(&(zup->rx_dma_timer), jiffies + RX_DMA_TIMEOUT);
+ zup->pre_pending = 0;
+ zup->work_state = true;
+ }
+ }
+ }
+ }else
+ printk("uart%d power mode set error,dma dont startup.\n",zup->port.line);
+}
+
+
+#endif
+
+static irqreturn_t zx29_uart_rxd_irq(int irq, void *dev_id)
+{
+ struct zx29_uart_port *zup = (struct zx29_uart_port *)dev_id;
+
+ rxd_wake_cnt++;
+ zup->rxd_wakeup = true;
+ tasklet_schedule(&zup->write_wakeup);
+ zup->rxd_int_depth = 0;
+ return IRQ_HANDLED;//IRQ_RETVAL(retval);
+}
+
+/****************************************************************************/
+static int zx29_uart_startup(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags = 0;
+ unsigned long control = 0;
+ int retval = 0;
+ struct platform_device *pdev=port->private_data;
+// struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ int i = 0,j = 0,iflag = 0;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ibrd, fbrd,lcr_h, old_cr;
+ int ret=0;
+ printk("-----zx29_uart_startup, port:%d\n", port->line);
+ #if 0//def CONFIG_ARCH_ZX297520V3_WATCH
+
+ if(port->line == 0)
+ {
+ gpio_free(pdata->uart_txd.gpionum);
+ gpio_free(pdata->uart_rxd.gpionum);
+ //printk("gpio_free err err!\n");
+ }
+
+ wmb();
+
+ #endif
+ if(DEBUG_CONSOLE != pdev->id){
+ char temp_buf[TASK_COMM_LEN]= {0};
+ int th_ctrl = 0;
+ if((strlen(get_task_comm(temp_buf,get_current())) > 0) && (strcmp(get_task_comm(temp_buf,get_current()),"at_ctl") != 0))
+ th_ctrl = 1;
+
+ //app ctrl or kernel ctrl set this
+ int kernel_ctrl = xp2xp_enable_4line();
+ zup->uart_power_mode = (kernel_ctrl | zup->app_ctrl | th_ctrl);
+ printk("zx29_uart%d open task is %s,power_mode is %d.\n",pdev->id, get_task_comm(temp_buf,get_current()),zup->uart_power_mode);
+ if(zup->uart_power_mode){
+ //pm_stay_awake(&pdev->dev);
+ }
+ }
+ //when open, clear last statistic info
+ port->icount.brk = port->icount.buf_overrun = port->icount.frame = 0;
+ port->icount.overrun = port->icount.parity = port->icount.rng = 0;
+ port->icount.rx = port->icount.tx = 0;
+ /*
+ *enable uart clock
+ *if uart is used for console, don't need do these, these was done before
+ */
+ if (DEBUG_CONSOLE != port->line) {
+ /* config uart apb_clk */
+ clk_prepare_enable(zup->busclk);
+ /* enable uart work clock */
+ clk_prepare_enable(zup->wclk);
+ }
+
+ /* Clear all pending error and receive interrupts */
+ UART_PUT_ICR(port, 0xfff);
+
+ /* Allocate the IRQ */
+ retval = request_irq(port->irq, zx29_uart_interrupt, 0, "uart-zx29", zup);
+ if (retval){
+ printk("[UART]unable to attach zx29 UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return retval;
+ }
+
+ /* set interrupt fifo level RX:1/2 Full, TX:1/2 Full */
+#if 0//CONFIG_SERIAL_ZX29_DMA
+ UART_PUT_IFLS(port, UART_IFLS_RX2_8|UART_IFLS_TX6_8);
+#else
+ UART_PUT_IFLS(port, UART_IFLS_RX2_8|UART_IFLS_TX4_8);
+#endif
+
+#if 0
+ /* Provoke TX FIFO interrupt into asserting. */
+ control = UART_CR_UARTEN | UART_CR_TXE | UART_CR_LBE;
+ UART_PUT_CR(port, control);
+ UART_PUT_FBRD(port, 0);
+ UART_PUT_IBRD(port, 1);
+ UART_PUT_LCRH(port, 0);
+ UART_PUT_CHAR(port, 0);
+ while (UART_GET_FR(port) & UART_FR_TXBUSY)
+ barrier();
+#endif
+ control = UART_CR_UARTEN | UART_CR_RXE | UART_CR_TXE;
+ //console & lp_uart don't need dma
+ if ((DEBUG_CONSOLE != port->line) && (port->line != 4)) {
+#if CONFIG_SERIAL_ZX29_DMA
+ UART_PUT_DMACR(port, UART_TXDMAE | UART_RXDMAE);
+ uart_dma_startup(zup);
+#endif
+ }
+
+ tasklet_init(&zup->write_wakeup, uart_write_wakeup_task, (unsigned long) port);
+ if((pdev->id == 0) && (zup->irq_state == 0) && (zup->uart_power_mode == 0)){
+ ret = request_irq(zup->rxd_irq,
+ zx29_uart_rxd_irq,
+ 0,
+ "uart0_rxd_wake",
+ zup);
+ if(ret<0){
+ panic("request uart0 rxd wake irq fail\n");
+ }
+ printk("--------rxd wake up interrupt ok\n");
+ enable_irq_wake(zup->rxd_irq);
+ zup->irq_state = 1;
+ zup->rxd_int_depth = 1;
+ }
+#if 0
+ /*configure gpio pin to UART*/
+ if((pdata->uart_use)/*&&(port->line == UART0 )*/)
+ {
+ retval=gpio_request(pdata->uart_rxd.gpionum,pdata->uart_rxd.gpioname);
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_txd.gpionum,pdata->uart_txd.gpioname);
+ if(retval)
+ BUG();
+ /*uart rxd*/
+ zx29_gpio_config(pdata->uart_rxd.gpionum, pdata->uart_rxd.gpiofnc);
+ if(pdata->uart_rxd.gpionum == ZX29_GPIO_121 ) {
+ //pull up gpio121
+ *(volatile unsigned int *)0xf843c82c |= 0xf0;
+ }
+ /*uart txd*/
+ zx29_gpio_config(pdata->uart_txd.gpionum, pdata->uart_txd.gpiofnc);
+#ifdef CONFIG_ARCH_ZX297520V3
+ if((pdev->id != DEBUG_CONSOLE) && (pdata->uart_wakeup_enable == 1) && (zup->irq_state == 0)){
+ zup->irq = platform_get_irq_byname(pdev, "zx29_uart_rxd_wakeup");
+ printk(KERN_INFO"zx29_uart_startup,irq:%d,%s.%d\n",zup->irq,pdata->uart_cts.gpioname,zup->irq_state);
+ if(zup->irq >= 0){
+
+ pcu_int_set_type(PCU_UART0_RXD_INT, IRQF_TRIGGER_FALLING);
+ pcu_int_clear(PCU_UART0_RXD_INT);
+ ret = request_irq(zup->irq, zx29_uart_rxd_irq,
+ IRQF_ONESHOT , "uart_rxd_irq",
+ zup);
+ printk(KERN_INFO"zx29_uart_startup, retval:%d\n",ret);
+ irq_set_irq_wake(zup->irq,1);
+#ifdef CONFIG_CPU_IDLE
+ zup->rxd_int_depth = rxd_wake_cnt = 0;
+ zx_pm_register_callback(uart_0_pm_enter, uart_0_pm_exit);
+ disable_irq_nosync(UART0_RXD_INT);
+#endif
+ zup->irq_state = 1;
+ }else{
+ printk("uart_startup, request wake irq fail:%d\n",zup->irq);
+ }
+ }
+#endif
+ if(pdata->uart_ctsrtsuse)
+ {
+ retval=gpio_request(pdata->uart_cts.gpionum,pdata->uart_cts.gpioname);
+ if(retval)
+ BUG();
+
+ retval=gpio_request(pdata->uart_rts.gpionum,pdata->uart_rts.gpioname);
+ if(retval)
+ BUG();
+/*uart cts*/
+ zx29_gpio_config(pdata->uart_cts.gpionum, pdata->uart_cts.gpiofnc);
+/*uart rts*/
+ zx29_gpio_config(pdata->uart_rts.gpionum, pdata->uart_rts.gpiofnc);
+
+ control |= (UART_CR_RTSEN |UART_CR_CTSEN );
+ control |= UART_CR_RTS; //wl write1 for allow send
+ }
+ zup->autobaud = pdata->uart_abauduse ;
+ }
+#if 0
+ if((pdata->uart_use)&&(port->line == UART1 ))
+ {
+ retval=gpio_request(pdata->uart_rx.gpionum,pdata->uart_rx.gpioname);
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_txd.gpionum,pdata->uart_tx.gpioname);
+ if(retval)
+ BUG();
+/*uart rxd*/
+ zx29_gpio_config(pdata->uart_rxd.gpionum, pdata->uart_rxd.gpiofnc);
+/*uart txd*/
+ zx29_gpio_config(pdata->uart_txdnum, pdata->uart_txdfnc);
+
+ if(pdata->uart_ctsrtsuse)
+ {
+ retval=gpio_request(pdata->uart_ctsnum,"uart1_cts");
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_rtsnum,"uart1_rts");
+ if(retval)
+ BUG();
+/*uart cts*/
+ zx29_gpio_config(pdata->uart_ctsnum, pdata->uart_ctsfnc);
+/*uart rts*/
+ zx29_gpio_config(pdata->uart_rtsnum, pdata->uart_rtsfnc);
+
+ control |= (UART_CR_RTSEN |UART_CR_CTSEN );
+ control |= UART_CR_RTS; //wl write1 for allow send
+ }
+ zup->autobaud = pdata->uart_abauduse;
+ }
+ if((pdata->uart_use)&&(port->line == UART2 ))
+ {
+ retval=gpio_request(pdata->uart_rxdnum,"uart2_rxd");
+ if(retval)
+ BUG();
+ retval=gpio_request(pdata->uart_txdnum,"uart2_txd");
+ if(retval)
+ BUG();
+
+/*uart rxd*/
+ zx29_gpio_config(pdata->uart_rxdnum, pdata->uart_rxdfnc);
+ if(pdata->uart_rxdnum == ZX29_GPIO_121 ) {
+ //pull up gpio121
+ *(volatile unsigned int *)0xf843c82c |= 0xf0;
+ }
+/*uart txd*/
+ zx29_gpio_config(pdata->uart_txdnum, pdata->uart_txdfnc);
+
+ if(pdata->uart_ctsrtsuse)
+ {
+ retval=gpio_request(pdata->uart_ctsnum,"uart2_cts");
+ if(retval)
+ BUG();
+
+ retval=gpio_request(pdata->uart_rtsnum,"uart2_rts");
+ if(retval)
+ BUG();
+/*uart cts*/
+ zx29_gpio_config(pdata->uart_ctsnum, pdata->uart_ctsfnc);
+/*uart rts*/
+ zx29_gpio_config(pdata->uart_rtsnum, pdata->uart_rtsfnc);
+
+ control |= (UART_CR_RTSEN |UART_CR_CTSEN );
+ control |= UART_CR_RTS; //wl write1 for allow send
+ }
+ zup->autobaud = pdata->uart_abauduse ;
+ }
+#endif
+#endif
+ zup->autobaud_state = UART_PORT_AUTOBAUD_OFF;
+ UART_PUT_CR(port, control);
+
+ /*
+ * Finally, enable interrupts, only timeouts when using DMA
+ * if initial RX DMA job failed, start in interrupt mode
+ * as well.
+ */
+ spin_lock_irqsave(&zup->port.lock, flags);
+ /* Clear out any spuriously appearing RX interrupts */
+ UART_PUT_ICR(port, (UART_RTIS | UART_RXIS));
+ //when dma not running,set UART_RTIM | UART_RXIM
+ if(!zx29_dma_rx_running(zup)){
+ zup->imr = UART_RTIM | UART_RXIM;
+ UART_PUT_IMSC(port, zup->imr);
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ zup->port_close = false;
+#endif
+ spin_unlock_irqrestore(&zup->port.lock, flags);
+
+
+ return 0;
+}
+
+/****************************************************************************/
+static void zx29_uart_shutdown(struct uart_port *port)
+{
+ printk("zx29_uart%d_shutdown.\n",port->line);
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ uint32_t val;
+ int retval = 0;
+ struct platform_device *pdev=port->private_data;
+ //struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+#if CONFIG_SERIAL_ZX29_DMA
+ zup->port_close = true;
+ up(&zup->sema);
+#endif
+ int ret;
+ tasklet_kill(&zup->write_wakeup);
+#if RX_DMA_WORK
+ if(zx29_dma_rx_work_scheduled(zup)){
+ ret = del_timer_sync(&(zup->rx_dma_timer));
+ ret = hrtimer_cancel(&zup->rx_dma_hrtimer);
+ zup->work_state = 0;
+ }
+#endif
+ /* Disable and clear all interrupts now */
+ spin_lock_irqsave(&port->lock, flags);
+ zup->imr = 0;
+ UART_PUT_IMSC(port, zup->imr);
+ UART_PUT_ICR(port, 0xFFFF);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_shutdown(zup);
+#endif
+ /* Free the interrupt */
+ free_irq(zup->port.irq, zup);
+
+ /* Disable UART transmitter and receiver */
+ zup->autorts = false;
+ val = UART_GET_CR(port);
+ if (val & UART_CR_RTS) {
+ zup->rts_state = true;
+ val = UART_CR_RTS;
+ } else
+ zup->rts_state = false;
+ val = UART_CR_UARTEN | UART_CR_TXE;
+ UART_PUT_CR(port, val);
+
+ /* disable break condition and fifos */
+ val = UART_GET_LCRH(port);
+ val &= ~(UART_LCRH_BRK | UART_LCRH_FEN);
+ UART_PUT_LCRH(port, val);
+ if(zup->uart_power_mode){
+ //pm_relax(&pdev->dev);
+ zup->app_ctrl = 0;
+ zup->uart_power_mode = 0;
+ }
+
+ if((pdev->id == 0) && (zup->irq_state == 1) && (zup->uart_power_mode == 0)){
+ free_irq(zup->rxd_irq, zup);
+ disable_irq_wake(zup->rxd_irq);
+ zup->irq_state = 0;
+ }
+
+#if 0
+ if(pdata->uart_use)
+ {
+ if(pdata->uart_ctsrtsuse)
+ {
+ gpio_free(pdata->uart_cts.gpionum);
+ gpio_free(pdata->uart_rts.gpionum);
+ }
+#ifdef CONFIG_ARCH_ZX297520V3
+ if((pdev->id != DEBUG_CONSOLE) && (pdata->uart_wakeup_enable == 1) && (zup->irq_state == 1)){
+ printk(KERN_INFO"zx29_uart_shutdown,irq:%d,%s\n",zup->irq,pdata->uart_cts.gpioname);
+ if(zup->irq){
+ free_irq(zup->irq, zup);
+ pcu_int_clear(PCU_UART0_RXD_INT);
+ irq_set_irq_wake(zup->irq, 0);
+ zup->irq_state = 0;
+ zup->rxd_int_depth = 0;
+ }
+ }
+#endif
+ gpio_free(pdata->uart_rxd.gpionum);
+ gpio_free(pdata->uart_txd.gpionum);
+
+#ifdef CONFIG_ARCH_ZX297520V3_WATCH
+ if(port->line == 0)
+ {
+ retval = gpio_request(pdata->uart_txd.gpionum, pdata->uart_txd.gpioname);
+ if(retval)
+ {
+ BUG();
+ }
+ zx29_gpio_config(pdata->uart_txd.gpionum, GPIO30_GPIO30);
+ gpio_direction_input(pdata->uart_txd.gpionum);
+
+ retval = gpio_request(pdata->uart_rxd.gpionum, pdata->uart_rxd.gpioname);
+ if(retval)
+ {
+ BUG();
+ }
+ zx29_gpio_config(pdata->uart_rxd.gpionum, GPIO29_GPIO29);
+ gpio_direction_input(pdata->uart_rxd.gpionum);
+ }
+#endif
+
+ }
+#endif
+ /* Shutdown uart clock */
+}
+
+/****************************************************************************/
+static void zx29_uart_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned int lcr_h, old_cr;
+ unsigned long flags;
+ unsigned int baud, ibrd, fbrd,j;
+
+ //temple change,using setting from cmm script
+ //if(port->line == DEBUG_CONSOLE)
+ //return;
+
+ /* Set baud rate */
+ /* Ask the core to calculate the divisor for us. */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ printk("uart port %d baud is %d.\n",port->line,baud);
+
+ //this should not hapend
+ if(baud == 0)
+ BUG_ON(1);
+ zup->baudrate = baud;
+ ibrd = port->uartclk / (baud<<4);
+ fbrd = ((port->uartclk % (baud<<4) )*8 + baud)/(2*baud);
+ UART_PUT_FBRD(port, fbrd);
+ UART_PUT_IBRD(port, ibrd);
+
+printk("-------zx29_uart_set_termios,line:%d, new baud:%d, uartclk:%d,ibrd:%d, fbrd:%d \n", port->line,
+ baud, port->uartclk, ibrd, fbrd);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr_h = UART_LCRH_WLEN_5;
+ break;
+ case CS6:
+ lcr_h = UART_LCRH_WLEN_6;
+ break;
+ case CS7:
+ lcr_h = UART_LCRH_WLEN_7;
+ break;
+ default: // CS8
+ lcr_h = UART_LCRH_WLEN_8;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ lcr_h |= UART_LCRH_STP2;
+ if (termios->c_cflag & PARENB) {
+ lcr_h |= UART_LCRH_PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_h |= UART_LCRH_EPS;
+ }
+ if (port->fifosize > 1)
+ lcr_h |= UART_LCRH_FEN;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UART_DR_OE | 255;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_DR_FE | UART_DR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_DR_BE;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_DR_FE | UART_DR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_DR_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_DR_OE;
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_DUMMY_DR_RX;
+
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ zx29_uart_enable_ms(port);
+
+ /* first, disable everything */
+ old_cr = UART_GET_CR(port);
+ UART_PUT_CR(port, 0);
+
+ if (termios->c_cflag & CRTSCTS) {
+ if (old_cr & UART_CR_RTS)
+ old_cr |= UART_CR_RTSEN;
+
+ old_cr |= UART_CR_CTSEN;
+ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
+ zup->autorts = true;
+ } else {
+ old_cr &= ~(UART_CR_CTSEN | UART_CR_RTSEN);
+ zup->autorts = false;
+ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
+ }
+
+ /*
+ * ----------v----------v----------v----------v-----
+ * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
+ * ----------^----------^----------^----------^-----
+ */
+ UART_PUT_LCRH(port, lcr_h);
+ UART_PUT_CR(port, old_cr);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ if( zup->autobaud_state == UART_PORT_AUTOBAUD_ON)
+ {
+ msleep(50);
+ zup->port.icount.rx = 0;
+
+ for( j = 0; j<UART_AT_SENDOK_NUM; j++)
+ {
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(&zup->port, UART_AT_send_ok[j]);
+ }
+
+ zup->autobaud_state = UART_PORT_AUTOBAUD_OFF;
+ }
+}
+
+/****************************************************************************/
+static const char *zx29_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ZX29) ? "zx29_UART" : NULL;
+}
+
+/****************************************************************************/
+
+static int zx29_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+// return request_mem_region(port->mapbase, SZ_4K, "uart-zx29")!= NULL ? 0 : -EBUSY;
+ return 0;
+}
+
+/****************************************************************************/
+static void zx29_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_ZX29;
+ zx29_uart_request_port(port);
+ }
+}
+
+/****************************************************************************/
+
+static void zx29_uart_release_port(struct uart_port *port)
+{
+// release_mem_region(port->mapbase, SZ_4K);
+}
+
+/****************************************************************************/
+
+static int zx29_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ZX29))
+ return -EINVAL;
+ return 0;
+}
+
+
+void zx29_uart_putc(struct uart_port *port, int c)
+{
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, c);
+}
+
+
+#ifdef CONFIG_CONSOLE_POLL
+/****************************************************************************/
+static int zx29_get_poll_char(struct uart_port *port)
+{
+ if (UART_GET_FR(port) & UART_FR_RXFE)
+ return NO_POLL_CHAR;
+
+ return UART_PUT_CHAR(port);
+}
+
+/****************************************************************************/
+static void zx29_put_poll_char(struct uart_port *port, unsigned char ch)
+{
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+static void zx29_uart_throttle_rx(struct uart_port *port)
+{
+ unsigned long flags;
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
+ while(zx29_dma_get_transfer_num(rx_id) != 4096)
+ msleep(1);
+ spin_lock_irqsave(&port->lock, flags);
+ zup->dmacr &= ~UART_RXDMAE;
+ UART_PUT_DMACR(&zup->port,zup->dmacr);
+ zx29_dma_stop(rx_id);
+ zup->dmarx.running = false;
+ zup->dmarx.used = false;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+static void zx29_uart_unthrottle_rx(struct uart_port *port)
+{
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
+ if (zx29_dma_rx_trigger_dma(zup)) {
+ printk("rx_dma_chars RXDMA start fail\n");
+ zup->imr |= (UART_RTIM|UART_RXIM);
+ UART_PUT_IMSC(&zup->port,zup->imr);
+ }else{
+ uart_mod_timer(zup, &flags);
+ zup->pre_pending = 0;
+ zup->dmarx.used = true;
+ zup->work_state = true;
+ UART_PUT_ICR(&zup->port,(UART_RTIS|UART_RXIS));
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/****************************************************************************/
+/*
+ * Define the basic serial functions we support.
+ */
+static const struct uart_ops zx29_uart_ops = {
+ .tx_empty = zx29_uart_tx_empty,
+ .set_mctrl = zx29_uart_set_mctrl,
+ .get_mctrl = zx29_uart_get_mctrl,
+ .start_tx = zx29_uart_start_tx,
+ .stop_tx = zx29_uart_stop_tx,
+ .stop_rx = zx29_uart_stop_rx,
+ .throttle = zx29_uart_throttle_rx,
+ .unthrottle = zx29_uart_unthrottle_rx,
+ .enable_ms = zx29_uart_enable_ms,
+ .break_ctl = zx29_uart_break_ctl,
+ .startup = zx29_uart_startup,
+ .shutdown = zx29_uart_shutdown,
+ .set_termios = zx29_uart_set_termios,
+#if CONFIG_SERIAL_ZX29_DMA
+ .flush_buffer = zx29_dma_flush_buffer,
+#endif
+ .type = zx29_uart_type,
+ .request_port = zx29_uart_request_port,
+ .release_port = zx29_uart_release_port,
+ .config_port = zx29_uart_config_port,
+ .verify_port = zx29_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = zx29_get_poll_char,
+ .poll_put_char = zx29_put_poll_char,
+#endif
+
+};
+
+
+/****************************************************************************/
+static int zx29_init_ports(struct zx29_uart_port *zx29_port,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ struct uart_port *port=&zx29_port->port;
+ unsigned int offset=(unsigned int)(pdev->id);
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int baud, ibrd, fbrd;
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 start*/
+ unsigned int max_bus_clk;
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 end*/
+ struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ //struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if(!regs){
+ dev_err(&pdev->dev, "zx29_init_ports, get resource fail,\n");
+ return -ENODEV;
+ }
+/*get apb clock*/
+ zx29_port->busclk = devm_clk_get(&pdev->dev, UART_APBCLK_NAME);
+ if (IS_ERR(zx29_port->busclk)) {
+ ret = PTR_ERR(zx29_port->busclk);
+ printk("failed to get zx29_port->busclk: %d\n", ret);
+ return ret;
+ }
+
+ /*get work clock*/
+ zx29_port->wclk = devm_clk_get(&pdev->dev, UART_WCLK_NAME);
+
+ if (IS_ERR(zx29_port->wclk)) {
+ ret = PTR_ERR(zx29_port->wclk);
+ printk("failed to get zx29_port->wclk: %d\n", ret);
+ return ret;
+ }
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 start*/
+ if(0 == pdev->id || 2 == pdev->id){
+ device_property_read_u32(&pdev->dev, "uart-max-bus-freq", &max_bus_clk);
+ clk_set_rate(zx29_port->wclk, max_bus_clk);
+ }
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 end*/
+ if(offset == 0){
+ clk_set_rate(zx29_port->wclk, 104 * 1000000);
+ }
+ port->line = offset;
+ port->type = PORT_ZX29;
+ port->fifosize = UART_TXFIFO_SIZE;
+ //port->iotype = UPIO_MEM;
+ //port->irq = irq->start;
+ port->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if(pdev->id == 0){
+ zx29_port->rxd_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ }
+ //port->membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+ // resource_size(regs));
+ port->membase = devm_platform_ioremap_resource(pdev, 0);
+
+ if (!port->membase)
+ return -ENODEV;
+ port->mapbase = regs->start;
+ port->mapsize = resource_size(regs);
+
+ //port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &zx29_uart_ops;
+ port->uartclk = clk_get_rate(zx29_port->wclk);
+
+ port->private_data = pdev;
+ //here is temple def
+ if(port->uartclk == 0){
+ printk("---zx29_init_ports, uartclk hard set to 26M\n");
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 start*/
+ if(0 == pdev->id || 2 == pdev->id)
+ port->uartclk = 104000000;
+ else
+ /*cz.li add for uart1 2 change baudrate to 4M on 2023/8/15 end*/
+ port->uartclk = 26000000;
+ }
+ printk("---zx29_init_ports, line:%d, irq:%d, membase:%08x, uartclk:%d\n", port->line, port->irq, port->membase, port->uartclk);
+ /*
+ * just configure clock,
+ * actually pin configuration is needed, but now gpio driver is not OK
+ * use bootloader default configuration
+ */
+ if(DEBUG_CONSOLE == pdev->id){
+ /* config uart apb_clk */
+ clk_prepare_enable(zx29_port->busclk);
+
+ /* enable uart work clock */
+ clk_prepare_enable(zx29_port->wclk);
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_SERIAL_ZX29_UART_CONSOLE
+
+#if VEHICLE_USE_ONE_UART_LOG
+static void zx29_uart_console_putc(struct uart_port *port, int c)
+{
+ if(g_core_id_occupy_uart == SYMB_PS_CORE_ID)
+ return;
+ int ret = soft_spin_lock_printf(UART_SFLOCK);
+ if(ret)
+ return;
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, c);
+ soft_spin_unlock(UART_SFLOCK);
+}
+#else
+static void zx29_uart_console_putc(struct uart_port *port, int c)
+{
+ while (UART_GET_FR(port) & UART_FR_TXFF)
+ barrier();
+ UART_PUT_CHAR(port, c);
+}
+
+#endif
+
+
+
+/****************************************************************************/
+static void zx29_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = &zx29_uart_ports[co->index].port;
+
+ //spin_lock(&port->lock);
+ for (; (count); count--, s++) {
+ zx29_uart_console_putc(port, *s);
+ if (*s == '\n')
+ zx29_uart_console_putc(port, '\r');
+ }
+
+ //spin_unlock(&port->lock);
+}
+
+/***************************************************************************
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ ****************************************************************************/
+static void __init zx29_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ if (UART_GET_CR(port) & UART_CR_UARTEN) {
+ unsigned int lcr_h, ibrd, fbrd;
+
+ lcr_h = UART_GET_LCRH(port);
+ *parity = 'n';
+ if (lcr_h & UART_LCRH_PEN) {
+ if (lcr_h & UART_LCRH_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+ if ((lcr_h & 0x60) == UART_LCRH_WLEN_7)
+ *bits = 7;
+ else
+ *bits = 8;
+
+ ibrd = UART_GET_IBRD(port);
+ fbrd = UART_GET_FBRD(port);
+
+ *baud = port->uartclk * 8 / (16*8 * ibrd + 2*fbrd-1);
+ }
+}
+
+/****************************************************************************/
+static int __init zx29_uart_console_setup(struct console *co, char *options)
+{
+ printk("zx29_uart_console_setup.\n");
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ unsigned int uart_cr = 0;
+
+ if ((co->index < 0) || (co->index >= zx29_MAXPORTS))
+ co->index = CONFIG_UART_CONSOLE_ID;
+
+ port = &zx29_uart_ports[co->index].port;
+ if (port->membase == NULL)
+ return -ENODEV;
+
+ uart_cr = UART_GET_CR(port);
+ uart_cr |= UART_CR_UARTEN | UART_CR_TXE;
+ UART_PUT_CR(port,uart_cr);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ zx29_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+/****************************************************************************/
+
+static struct uart_driver zx29_uart_driver;
+int zx29_get_console_index(void)
+{
+#if 0
+ int dev_cnt = zx29_device_table_num;
+ int idx = 0;
+ struct platform_device *pdev = NULL;
+ for(idx = 0; idx < dev_cnt; idx++)
+ {
+ pdev = zx29_device_table[idx];
+ if(strcmp(pdev->name,"zx29_uart") == 0 && pdev->id == CONFIG_UART_CONSOLE_ID)
+ return idx;
+ }
+#endif
+ return CONFIG_UART_CONSOLE_ID;
+ //return -1;
+}
+static struct console zx29_uart_console = {
+ .name = "ttyS",
+ .write = zx29_uart_console_write,
+ .device = uart_console_device,
+ .setup = zx29_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &zx29_uart_driver,
+};
+static int __init zx29_uart_console_init(void)
+{
+ int console_dev_id = zx29_get_console_index();
+
+ if(console_dev_id < 0){
+ printk("console init fail, uart config fail, console_dev_id is: %d", console_dev_id);
+ return -1;
+ }
+ //zx29_init_ports(&zx29_uart_ports[DEBUG_CONSOLE], zx29_device_table[console_dev_id]);
+ register_console(&zx29_uart_console);
+ pr_info("[UART]register_console: zx29 console registered!\n");
+
+ return 0;
+}
+
+//console_initcall(zx29_uart_console_init);
+
+#define zx29_UART_CONSOLE (&zx29_uart_console)
+
+
+static void zx29_uart_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, zx29_uart_console_putc);
+}
+
+static int __init zx29_uart_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = zx29_uart_early_write;
+
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(zx29_uart, "zxic,zx29-uart", zx29_uart_early_console_setup);
+#else
+#define zx29_UART_CONSOLE NULL
+#endif /* CONFIG_zx29_UART_CONSOLE */
+static void zx29_uart_pin_ctrl(struct platform_device *pdev)
+{
+ struct pinctrl *pin_ctrl;
+ struct pinctrl_state *state0;
+ pin_ctrl = devm_pinctrl_get(&pdev->dev);
+ switch(pdev->id){
+ case 0:
+ printk("zx29_uart %d use default pinctrl.",pdev->id);
+ break;
+ case 1:
+ printk("zx29_uart %d use default pinctrl.",pdev->id);
+ break;
+
+ case 2:
+ if(IS_ERR(pin_ctrl)){
+ dev_warn(&pdev->dev, "fail to get uart2 pins.");
+ pin_ctrl = NULL;
+ return;
+ }
+ state0 = pinctrl_lookup_state(pin_ctrl, "default");
+ if(IS_ERR(state0)){
+ dev_err(&pdev->dev, "uart2 pinstate get fail.\n");
+ }
+ if(pinctrl_select_state(pin_ctrl, state0)){
+ dev_err(&pdev->dev, "uart2 select pinstate fail.\n");
+ }
+ break;
+ }
+}
+
+/*
+ * Define the zx29 UART driver structure.
+ */
+static struct uart_driver zx29_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "zx29_uart",
+ .dev_name = "ttyS",
+ .major = SERIAL_zx29_MAJOR,
+ .minor = SERIAL_MINOR_START,
+ .nr = zx29_MAXPORTS,
+ .cons = zx29_UART_CONSOLE,
+};
+
+unsigned char uart_wakelock_name[zx29_MAXPORTS][20]={{0}};
+/****************************************************************************/
+
+
+#ifdef CONFIG_PM_SLEEP
+static int zx29_uart_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct zx29_uart_port *zup = dev_get_drvdata(dev);
+ unsigned int flags;
+ if (!zup)
+ return -EINVAL;
+ if(zup->port.line == UART1)
+ return 0;
+#if 1
+ pinctrl_pm_select_sleep_state(dev);
+#endif
+
+ printk("zx29_uart%d suspend.\n",zup->port.line);
+
+ raw_spin_lock_irqsave(&zup->port.lock, flags);
+ zup->enter_suspend = 1;
+ if(zup->port.line == UART2){
+ zx29_dma_stop(DMA_CH_UART2_RX);
+ zx29_dma_stop(DMA_CH_UART2_TX);
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+ return 0;
+ }
+
+ zx29_dma_stop(DMA_CH_UART0_RX);
+ zx29_dma_stop(DMA_CH_UART0_TX);
+ raw_spin_unlock_irqrestore(&zup->port.lock, flags);
+#if 0
+ ret = irq_set_irq_type(unsigned int irq, unsigned int type);
+
+#endif
+ //pcu_int_clear(PCU_UART0_RXD_INT);
+ if(zup->irq_state && (zup->rxd_int_depth == 0)){
+ struct irq_data *data_rxd;
+ data_rxd= irq_get_irq_data(zup->rxd_irq);
+ if(data_rxd){
+ if(irqd_irq_disabled(data_rxd))
+ enable_irq(zup->rxd_irq);
+ }
+ zup->rxd_int_depth = 1;
+ }
+
+ return 0;
+}
+
+static int zx29_uart_resume(struct device *dev)
+{
+ struct zx29_uart_port *zup = dev_get_drvdata(dev);
+
+ if (!zup)
+ return -EINVAL;
+ int uart_id = zup->port.line;
+ if(zup->port.line == UART1)
+ return 0;
+#if 1
+ pinctrl_pm_select_default_state(dev);
+#endif
+
+ printk("zx29_uart%d resume.\n",zup->port.line);
+
+ zup->enter_suspend = 0;
+ uart_dma_cycle[uart_id].from_resume = 1;
+ return 0;
+}
+
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx29_uart_dev_pm_ops, zx29_uart_suspend, zx29_uart_resume);
+
+
+
+static int zx29_uart_probe(struct platform_device *pdev)
+{
+ int ret=0;
+ int error;
+ char wakelock_name[20];
+ struct device_node *np = pdev->dev.of_node;
+ ret = of_alias_get_id(np, "uart");
+
+ if(ret < 0){
+ printk("-----zx29_uart_probe,of_alias_get_id fail ret:%d\n", ret);
+ return -ENODEV;
+ }
+ pdev->id = ret;
+ printk("-----zx29_uart_probe,ret:%d\n", ret);
+ // struct zx29_uart_platdata *pdata = pdev->dev.platform_data;
+ struct zx29_uart_port *port = &zx29_uart_ports[pdev->id];
+
+
+#if 0//def CONFIG_SERIAL_ZX29_UART_CONSOLE
+ if(DEBUG_CONSOLE != pdev->id){
+ ret = zx29_init_ports(port,pdev);
+ }
+#else
+ ret = zx29_init_ports(port,pdev);
+
+#endif
+ if(ret < 0){
+ printk("-----zx29_uart_probe,zx29_init_ports fail ret:%d\n", ret);
+ }
+ zx29_uart_pin_ctrl(pdev);
+#if CONFIG_SERIAL_ZX29_DMA
+ if((DEBUG_CONSOLE != pdev->id ) && (pdev->id != 4))
+ {
+ uart_dma_init(port);
+ printk(KERN_INFO "[%s][%d]UART_%d DMA is OPENED\n",__func__,__LINE__,pdev->id);
+ }
+#endif
+ ret = 0;
+ ret=uart_add_one_port(&zx29_uart_driver, &port->port);
+
+ if(ret)
+ {
+#if CONFIG_SERIAL_ZX29_DMA
+ zx29_dma_remove(port);
+#endif
+ return ret;
+ }
+#if CONFIG_SERIAL_ZX29_DMA
+ sema_init(&port->sema, 0);
+#endif
+
+ platform_set_drvdata(pdev, port);
+
+ if(pdev->id == DEBUG_CONSOLE){
+ //g_console_open_flag = pdata->uart_input_enable ? pdata->uart_input_enable : 0;
+ error = device_create_file(&pdev->dev, &dev_attr_console_input);
+#if VEHICLE_USE_ONE_UART_LOG
+ error = device_create_file(&pdev->dev, &dev_attr_console_uart_toggle);
+ error = device_create_file(&pdev->dev, &dev_attr_coreid_occupy_uart);
+ int ret;
+ ret = rpmsgCreateChannel(CORE_PS0, ICP_CHANNEL_CONSOLE_UART, ICP_BUFFERSIZE_CONSOLE_TOGGLE);
+ if(ret){
+ printk("linux5 request icp channel for uart fail %d.\n",ret);
+ }
+ rpmsgRegCallBack(CORE_PS0, ICP_CHANNEL_CONSOLE_UART, icp_callback_ps2cap);
+#endif
+ }
+
+ if(pdev->id != DEBUG_CONSOLE){
+ error = device_create_file(&pdev->dev, &dev_attr_ctsrts_input);
+ error = device_create_file(&pdev->dev, &dev_attr_wakeup_enable);
+ error = device_create_file(&pdev->dev, &dev_attr_sleep_state);
+ error = device_create_file(&pdev->dev, &dev_attr_app_ctrl);
+
+ }
+ error = device_create_file(&pdev->dev, &dev_attr_statics);
+ device_init_wakeup(&pdev->dev, true);
+/*
+ strcpy(wakelock_name, "uart_wakelock_x");
+ wakelock_name[14] = '0' + pdev->id;
+ strcpy(uart_wakelock_name[pdev->id], wakelock_name);
+ wake_lock_init(&(port->port.port_wakelock),WAKE_LOCK_SUSPEND,uart_wakelock_name[pdev->id]);
+*/
+
+ printk(KERN_INFO "TSP zx29 UART_%d probe OK\n",pdev->id);
+ return 0;
+}
+
+/****************************************************************************/
+static int /*__devexit*/ zx29_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = NULL;
+#if CONFIG_SERIAL_ZX29_DMA
+ struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
+#endif
+ int i;
+ if(pdev->id == DEBUG_CONSOLE){
+ device_remove_file(&pdev->dev, &dev_attr_console_input);
+ }
+
+ if(pdev->id != DEBUG_CONSOLE){
+ device_remove_file(&pdev->dev, &dev_attr_ctsrts_input);
+ device_remove_file(&pdev->dev, &dev_attr_wakeup_enable);
+ }
+
+ for (i = 0; (i < zx29_MAXPORTS); i++) {
+ port = &zx29_uart_ports[i].port;
+ if (port){
+ uart_remove_one_port(&zx29_uart_driver, port);
+
+
+#if CONFIG_SERIAL_ZX29_DMA
+ zup=container_of(port,struct zx29_uart_port,port);
+ zx29_dma_remove(zup);
+
+#endif
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id zx29_uart_of_match[] = {
+ { .compatible = "zxic,zx29-uart"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, zx29_uart_of_match);
+
+static struct platform_driver zx29_uart_platform_driver = {
+ .probe = zx29_uart_probe,
+ //.remove = __devexit_p(zx29_uart_remove),
+ .remove = zx29_uart_remove,
+ .driver = {
+ .name = "zx29_uart",
+ .pm = &zx29_uart_dev_pm_ops,
+ .of_match_table = of_match_ptr(zx29_uart_of_match),
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init zx29_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&zx29_uart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&zx29_uart_platform_driver);
+ if (rc){
+ uart_unregister_driver(&zx29_uart_driver);
+ return rc;
+ }
+
+ printk(KERN_INFO "zx29 UART driver registered\n");
+
+ return 0;
+}
+
+static void __exit zx29_uart_exit(void)
+{
+#ifdef CONFIG_SERIAL_ZX29_UART_CONSOLE
+ unregister_console(&zx29_uart_console);
+#endif
+ platform_driver_unregister(&zx29_uart_platform_driver);
+ uart_unregister_driver(&zx29_uart_driver);
+}
+//arch_initcall(zx29_uart_init);
+
+//subsys_initcall(zx29_uart_init);
+module_init(zx29_uart_init);
+module_exit(zx29_uart_exit);
+
diff --git a/upstream/linux-5.10/include/linux/spi/spi.h b/upstream/linux-5.10/include/linux/spi/spi.h
new file mode 100755
index 0000000..569ba92
--- /dev/null
+++ b/upstream/linux-5.10/include/linux/spi/spi.h
@@ -0,0 +1,1581 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2005 David Brownell
+ */
+
+#ifndef __LINUX_SPI_H
+#define __LINUX_SPI_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ptp_clock_kernel.h>
+
+struct dma_chan;
+struct property_entry;
+struct spi_controller;
+struct spi_transfer;
+struct spi_controller_mem_ops;
+
+/*
+ * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * and SPI infrastructure.
+ */
+extern struct bus_type spi_bus_type;
+
+/**
+ * struct spi_statistics - statistics for spi transfers
+ * @lock: lock protecting this structure
+ *
+ * @messages: number of spi-messages handled
+ * @transfers: number of spi_transfers handled
+ * @errors: number of errors during spi_transfer
+ * @timedout: number of timeouts during spi_transfer
+ *
+ * @spi_sync: number of times spi_sync is used
+ * @spi_sync_immediate:
+ * number of times spi_sync is executed immediately
+ * in calling context without queuing and scheduling
+ * @spi_async: number of times spi_async is used
+ *
+ * @bytes: number of bytes transferred to/from device
+ * @bytes_tx: number of bytes sent to device
+ * @bytes_rx: number of bytes received from device
+ *
+ * @transfer_bytes_histo:
+ * transfer bytes histogramm
+ *
+ * @transfers_split_maxsize:
+ * number of transfers that have been split because of
+ * maxsize limit
+ */
+struct spi_statistics {
+ spinlock_t lock; /* lock for the whole structure */
+
+ unsigned long messages;
+ unsigned long transfers;
+ unsigned long errors;
+ unsigned long timedout;
+
+ unsigned long spi_sync;
+ unsigned long spi_sync_immediate;
+ unsigned long spi_async;
+
+ unsigned long long bytes;
+ unsigned long long bytes_rx;
+ unsigned long long bytes_tx;
+
+#define SPI_STATISTICS_HISTO_SIZE 17
+ unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+
+ unsigned long transfers_split_maxsize;
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+ struct spi_transfer *xfer,
+ struct spi_controller *ctlr);
+
+#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
+ do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&(stats)->lock, flags); \
+ (stats)->field += count; \
+ spin_unlock_irqrestore(&(stats)->lock, flags); \
+ } while (0)
+
+#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
+ SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+
+/**
+ * struct spi_delay - SPI delay information
+ * @value: Value for the delay
+ * @unit: Unit for the delay
+ */
+struct spi_delay {
+#define SPI_DELAY_UNIT_USECS 0
+#define SPI_DELAY_UNIT_NSECS 1
+#define SPI_DELAY_UNIT_SCK 2
+ u16 value;
+ u8 unit;
+};
+
+extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+
+/**
+ * struct spi_device - Controller side proxy for an SPI slave device
+ * @dev: Driver model representation of the device.
+ * @controller: SPI controller used with the device.
+ * @master: Copy of controller, for backwards compatibility.
+ * @max_speed_hz: Maximum clock rate to be used with this chip
+ * (on this board); may be changed by the device's driver.
+ * The spi_transfer.speed_hz can override this for each transfer.
+ * @chip_select: Chipselect, distinguishing chips handled by @controller.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
+ * @bits_per_word: Data transfers involve one or more words; word sizes
+ * like eight or 12 bits are common. In-memory wordsizes are
+ * powers of two bytes (e.g. 20 bit samples use 32 bits).
+ * This may be changed by the device's driver, or left at the
+ * default (0) indicating protocol words are eight bit bytes.
+ * The spi_transfer.bits_per_word can override this for each transfer.
+ * @rt: Make the pump thread real time priority.
+ * @irq: Negative, or the number passed to request_irq() to receive
+ * interrupts from this device.
+ * @controller_state: Controller's runtime state
+ * @controller_data: Board-specific definitions for controller, such as
+ * FIFO initialization parameters; from board_info.controller_data
+ * @modalias: Name of the driver to use with this device, or an alias
+ * for that name. This appears in the sysfs "modalias" attribute
+ * for driver coldplugging, and in uevents used for hotplugging
+ * @driver_override: If the name of a driver is written to this attribute, then
+ * the device will bind to the named driver and only the named driver.
+ * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
+ * not using a GPIO line) use cs_gpiod in new drivers by opting in on
+ * the spi_master.
+ * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
+ * not using a GPIO line)
+ * @word_delay: delay to be inserted between consecutive
+ * words of a transfer
+ *
+ * @statistics: statistics for the spi_device
+ *
+ * A @spi_device is used to interchange data between an SPI slave
+ * (usually a discrete chip) and CPU memory.
+ *
+ * In @dev, the platform_data is used to hold information about this
+ * device that's meaningful to the device's protocol driver, but not
+ * to its controller. One example might be an identifier for a chip
+ * variant with slightly different functionality; another might be
+ * information about how this particular board wires the chip's pins.
+ */
+struct spi_device {
+ struct device dev;
+ struct spi_controller *controller;
+ struct spi_controller *master; /* compatibility layer */
+ u32 max_speed_hz;
+ u8 chip_select;
+ u8 bits_per_word;
+ bool rt;
+ u32 mode;
+#define SPI_CPHA 0x01 /* clock phase */
+#define SPI_CPOL 0x02 /* clock polarity */
+#define SPI_MODE_0 (0|0) /* (original MicroWire) */
+#define SPI_MODE_1 (0|SPI_CPHA)
+#define SPI_MODE_2 (SPI_CPOL|0)
+#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
+#define SPI_CS_HIGH 0x04 /* chipselect active high? */
+#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
+#define SPI_3WIRE 0x10 /* SI/SO signals shared */
+#define SPI_LOOP 0x20 /* loopback mode */
+#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define SPI_READY 0x80 /* slave pulls low to pause */
+#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
+#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
+#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
+#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
+#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
+#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
+#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
+#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
+ int irq;
+ void *controller_state;
+ void *controller_data;
+ char modalias[SPI_NAME_SIZE];
+ const char *driver_override;
+ int cs_gpio; /* LEGACY: chip select gpio */
+ struct gpio_desc *cs_gpiod; /* chip select gpio desc */
+ struct spi_delay word_delay; /* inter-word delay */
+
+ /* the statistics */
+ struct spi_statistics statistics;
+
+ /*
+ * likely need more hooks for more protocol options affecting how
+ * the controller talks to each chip, like:
+ * - memory packing (12 bit samples into low bits, others zeroed)
+ * - priority
+ * - chipselect delays
+ * - ...
+ */
+ u16 error;
+ u8 dma_used;
+ u8 trans_gaped;
+ u8 trans_gap_num;
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+ u32 rd_pos;
+ u32 recv_pos;
+ u8 * rx_buf;
+ u8 * cyc_buf;
+ u8 cyc_index;
+ dma_addr_t rx_dma;
+ wait_queue_head_t rd_wait;
+ int recv_done;
+ bool is_rd_waiting;
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+};
+
+static inline struct spi_device *to_spi_device(struct device *dev)
+{
+ return dev ? container_of(dev, struct spi_device, dev) : NULL;
+}
+
+/* most drivers won't need to care about device refcounting */
+static inline struct spi_device *spi_dev_get(struct spi_device *spi)
+{
+ return (spi && get_device(&spi->dev)) ? spi : NULL;
+}
+
+static inline void spi_dev_put(struct spi_device *spi)
+{
+ if (spi)
+ put_device(&spi->dev);
+}
+
+/* ctldata is for the bus_controller driver's runtime state */
+static inline void *spi_get_ctldata(struct spi_device *spi)
+{
+ return spi->controller_state;
+}
+
+static inline void spi_set_ctldata(struct spi_device *spi, void *state)
+{
+ spi->controller_state = state;
+}
+
+/* device driver data */
+
+static inline void spi_set_drvdata(struct spi_device *spi, void *data)
+{
+ dev_set_drvdata(&spi->dev, data);
+}
+
+static inline void *spi_get_drvdata(struct spi_device *spi)
+{
+ return dev_get_drvdata(&spi->dev);
+}
+
+struct spi_message;
+struct spi_transfer;
+
+/**
+ * struct spi_driver - Host side "protocol" driver
+ * @id_table: List of SPI devices supported by this driver
+ * @probe: Binds this driver to the spi device. Drivers can verify
+ * that the device is actually present, and may need to configure
+ * characteristics (such as bits_per_word) which weren't needed for
+ * the initial configuration done during system setup.
+ * @remove: Unbinds this driver from the spi device
+ * @shutdown: Standard shutdown callback used during system state
+ * transitions such as powerdown/halt and kexec
+ * @driver: SPI device drivers should initialize the name and owner
+ * field of this structure.
+ *
+ * This represents the kind of device driver that uses SPI messages to
+ * interact with the hardware at the other end of a SPI link. It's called
+ * a "protocol" driver because it works through messages rather than talking
+ * directly to SPI hardware (which is what the underlying SPI controller
+ * driver does to pass those messages). These protocols are defined in the
+ * specification for the device(s) supported by the driver.
+ *
+ * As a rule, those device protocols represent the lowest level interface
+ * supported by a driver, and it will support upper level interfaces too.
+ * Examples of such upper levels include frameworks like MTD, networking,
+ * MMC, RTC, filesystem character device nodes, and hardware monitoring.
+ */
+struct spi_driver {
+ const struct spi_device_id *id_table;
+ int (*probe)(struct spi_device *spi);
+ int (*remove)(struct spi_device *spi);
+ void (*shutdown)(struct spi_device *spi);
+ struct device_driver driver;
+};
+
+static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
+{
+ return drv ? container_of(drv, struct spi_driver, driver) : NULL;
+}
+
+extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
+
+/**
+ * spi_unregister_driver - reverse effect of spi_register_driver
+ * @sdrv: the driver to unregister
+ * Context: can sleep
+ */
+static inline void spi_unregister_driver(struct spi_driver *sdrv)
+{
+ if (sdrv)
+ driver_unregister(&sdrv->driver);
+}
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define spi_register_driver(driver) \
+ __spi_register_driver(THIS_MODULE, driver)
+
+/**
+ * module_spi_driver() - Helper macro for registering a SPI driver
+ * @__spi_driver: spi_driver struct
+ *
+ * Helper macro for SPI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_spi_driver(__spi_driver) \
+ module_driver(__spi_driver, spi_register_driver, \
+ spi_unregister_driver)
+
+/**
+ * struct spi_controller - interface to SPI master or slave controller
+ * @dev: device interface to this driver
+ * @list: link with the global spi_controller list
+ * @bus_num: board-specific (and often SOC-specific) identifier for a
+ * given SPI controller.
+ * @num_chipselect: chipselects are used to distinguish individual
+ * SPI slaves, and are numbered from zero to num_chipselects.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
+ * @dma_alignment: SPI controller constraint on DMA buffers alignment.
+ * @mode_bits: flags understood by this controller driver
+ * @buswidth_override_bits: flags to override for this controller driver
+ * @bits_per_word_mask: A mask indicating which values of bits_per_word are
+ * supported by the driver. Bit n indicates that a bits_per_word n+1 is
+ * supported. If set, the SPI core will reject any transfer with an
+ * unsupported bits_per_word. If not set, this value is simply ignored,
+ * and it's up to the individual driver to perform any validation.
+ * @min_speed_hz: Lowest supported transfer speed
+ * @max_speed_hz: Highest supported transfer speed
+ * @flags: other constraints relevant to this driver
+ * @slave: indicates that this is an SPI slave controller
+ * @max_transfer_size: function that returns the max transfer size for
+ * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
+ * @max_message_size: function that returns the max message size for
+ * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
+ * @io_mutex: mutex for physical bus access
+ * @bus_lock_spinlock: spinlock for SPI bus locking
+ * @bus_lock_mutex: mutex for exclusion of multiple callers
+ * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
+ * @setup: updates the device mode and clocking records used by a
+ * device's SPI controller; protocol code may call this. This
+ * must fail if an unrecognized or unsupported mode is requested.
+ * It's always safe to call this unless transfers are pending on
+ * the device whose settings are being modified.
+ * @set_cs_timing: optional hook for SPI devices to request SPI master
+ * controller for configuring specific CS setup time, hold time and inactive
+ * delay interms of clock counts
+ * @transfer: adds a message to the controller's transfer queue.
+ * @cleanup: frees controller-specific state
+ * @can_dma: determine whether this controller supports DMA
+ * @queued: whether this controller is providing an internal message queue
+ * @kworker: pointer to thread struct for message pump
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @idling: the device is entering idle state
+ * @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+ * @cur_msg_mapped: message has been mapped for DMA
+ * @last_cs_enable: was enable true on the last call to set_cs.
+ * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
+ * @xfer_completion: used by core transfer_one_message()
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @rt: whether this queue is set to run as a realtime task
+ * @auto_runtime_pm: the core should ensure a runtime PM reference is held
+ * while the hardware is prepared, using the parent
+ * device for the spidev
+ * @max_dma_len: Maximum length of a DMA transfer for the device.
+ * @prepare_transfer_hardware: a message will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the transfer hardware
+ * by issuing this call
+ * @transfer_one_message: the subsystem calls the driver to transfer a single
+ * message while queuing transfers that arrive in the meantime. When the
+ * driver is finished with this message, it must call
+ * spi_finalize_current_message() so the subsystem can issue the next
+ * message
+ * @unprepare_transfer_hardware: there are currently no more messages on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ *
+ * @set_cs: set the logic level of the chip select line. May be called
+ * from interrupt context.
+ * @prepare_message: set up the controller to transfer a single message,
+ * for example doing DMA mapping. Called from threaded
+ * context.
+ * @transfer_one: transfer a single spi_transfer.
+ *
+ * - return 0 if the transfer is finished,
+ * - return 1 if the transfer is still in progress. When
+ * the driver is finished with this transfer it must
+ * call spi_finalize_current_transfer() so the subsystem
+ * can issue the next transfer. Note: transfer_one and
+ * transfer_one_message are mutually exclusive; when both
+ * are set, the generic subsystem does not call your
+ * transfer_one callback.
+ * @handle_err: the subsystem calls the driver to handle an error that occurs
+ * in the generic implementation of transfer_one_message().
+ * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
+ * This field is optional and should only be implemented if the
+ * controller has native support for memory like operations.
+ * @unprepare_message: undo any work done by prepare_message().
+ * @slave_abort: abort the ongoing transfer request on an SPI slave controller
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
+ * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
+ * CS number. Any individual value may be -ENOENT for CS lines that
+ * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
+ * in new drivers.
+ * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * number. Any individual value may be NULL for CS lines that
+ * are not GPIOs (driven by the SPI controller itself).
+ * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
+ * GPIO descriptors rather than using global GPIO numbers grabbed by the
+ * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
+ * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
+ * fill in this field with the first unused native CS, to be used by SPI
+ * controller drivers that need to drive a native CS when using GPIO CS.
+ * @max_native_cs: When cs_gpiods is used, and this field is filled in,
+ * spi_register_controller() will validate all native CS (including the
+ * unused native CS) against this value.
+ * @statistics: statistics for the spi_controller
+ * @dma_tx: DMA transmit channel
+ * @dma_rx: DMA receive channel
+ * @dummy_rx: dummy receive buffer for full-duplex devices
+ * @dummy_tx: dummy transmit buffer for full-duplex devices
+ * @fw_translate_cs: If the boot firmware uses different numbering scheme
+ * what Linux expects, this optional hook can be used to translate
+ * between the two.
+ * @ptp_sts_supported: If the driver sets this to true, it must provide a
+ * time snapshot in @spi_transfer->ptp_sts as close as possible to the
+ * moment in time when @spi_transfer->ptp_sts_word_pre and
+ * @spi_transfer->ptp_sts_word_post were transmitted.
+ * If the driver does not set this, the SPI core takes the snapshot as
+ * close to the driver hand-over as possible.
+ * @irq_flags: Interrupt enable state during PTP system timestamping
+ * @fallback: fallback to pio if dma transfer return failure with
+ * SPI_TRANS_FAIL_NO_START.
+ *
+ * Each SPI controller can communicate with one or more @spi_device
+ * children. These make a small bus, sharing MOSI, MISO and SCK signals
+ * but not chip select signals. Each device may be configured to use a
+ * different clock rate, since those shared signals are ignored unless
+ * the chip is selected.
+ *
+ * The driver for an SPI controller manages access to those devices through
+ * a queue of spi_message transactions, copying data between CPU memory and
+ * an SPI slave device. For each such message it queues, it calls the
+ * message's completion function when the transaction completes.
+ */
+struct spi_controller {
+ struct device dev;
+
+ struct list_head list;
+
+ /* other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. usually that simplifies to being SOC-specific.
+ * example: one SOC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. software
+ * would normally use bus_num=2 for that controller.
+ */
+ s16 bus_num;
+
+ /* chipselects will be integral to many controllers; some others
+ * might use board-specific GPIOs.
+ */
+ u16 num_chipselect;
+
+ /* some SPI controllers pose alignment requirements on DMAable
+ * buffers; let protocol drivers know about these requirements.
+ */
+ u16 dma_alignment;
+
+ /* spi_device.mode flags understood by this controller driver */
+ u32 mode_bits;
+
+ /* spi_device.mode flags override flags for this controller */
+ u32 buswidth_override_bits;
+
+ /* bitmask of supported bits_per_word for transfers */
+ u32 bits_per_word_mask;
+#define SPI_BPW_MASK(bits) BIT((bits) - 1)
+#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
+
+ /* limits on transfer speed */
+ u32 min_speed_hz;
+ u32 max_speed_hz;
+
+ /* other constraints relevant to this driver */
+ u16 flags;
+#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
+#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
+#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
+#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
+#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
+
+#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+
+ /* flag indicating this is a non-devres managed controller */
+ bool devm_allocated;
+
+ /* flag indicating this is an SPI slave controller */
+ bool slave;
+
+ /*
+ * on some hardware transfer / message size may be constrained
+ * the limit may depend on device transfer settings
+ */
+ size_t (*max_transfer_size)(struct spi_device *spi);
+ size_t (*max_message_size)(struct spi_device *spi);
+
+ /* I/O mutex */
+ struct mutex io_mutex;
+
+ /* lock and mutex for SPI bus locking */
+ spinlock_t bus_lock_spinlock;
+ struct mutex bus_lock_mutex;
+
+ /* flag indicating that the SPI bus is locked for exclusive use */
+ bool bus_lock_flag;
+
+ /* Setup mode and clock, etc (spi driver may call many times).
+ *
+ * IMPORTANT: this may be called when transfers to another
+ * device are active. DO NOT UPDATE SHARED REGISTERS in ways
+ * which could break those transfers.
+ */
+ int (*setup)(struct spi_device *spi);
+
+ /*
+ * set_cs_timing() method is for SPI controllers that supports
+ * configuring CS timing.
+ *
+ * This hook allows SPI client drivers to request SPI controllers
+ * to configure specific CS timing through spi_set_cs_timing() after
+ * spi_setup().
+ */
+ int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive);
+
+ /* bidirectional bulk transfers
+ *
+ * + The transfer() method may not sleep; its main role is
+ * just to add the message to the queue.
+ * + For now there's no remove-from-queue operation, or
+ * any other request management
+ * + To a given spi_device, message queueing is pure fifo
+ *
+ * + The controller's main job is to process its message queue,
+ * selecting a chip (for masters), then transferring data
+ * + If there are multiple spi_device children, the i/o queue
+ * arbitration algorithm is unspecified (round robin, fifo,
+ * priority, reservations, preemption, etc)
+ *
+ * + Chipselect stays active during the entire message
+ * (unless modified by spi_transfer.cs_change != 0).
+ * + The message transfers use clock and SPI mode parameters
+ * previously established by setup() for this device
+ */
+ int (*transfer)(struct spi_device *spi,
+ struct spi_message *mesg);
+
+ /* called on release() to free memory provided by spi_controller */
+ void (*cleanup)(struct spi_device *spi);
+
+ /*
+ * Used to enable core support for DMA handling, if can_dma()
+ * exists and returns true then the transfer will be mapped
+ * prior to transfer_one() being called. The driver should
+ * not modify or store xfer and dma_tx and dma_rx must be set
+ * while the device is prepared.
+ */
+ bool (*can_dma)(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer);
+
+ /*
+ * These hooks are for drivers that want to use the generic
+ * controller transfer queueing mechanism. If these are used, the
+ * transfer() function above must NOT be specified by the driver.
+ * Over time we expect SPI drivers to be phased over to this API.
+ */
+ bool queued;
+ struct kthread_worker *kworker;
+ struct kthread_work pump_messages;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct spi_message *cur_msg;
+ bool idling;
+ bool busy;
+ bool running;
+ bool rt;
+ bool auto_runtime_pm;
+ bool cur_msg_prepared;
+ bool cur_msg_mapped;
+ bool last_cs_enable;
+ bool last_cs_mode_high;
+ bool fallback;
+ struct completion xfer_completion;
+ size_t max_dma_len;
+
+ int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
+ int (*transfer_one_message)(struct spi_controller *ctlr,
+ struct spi_message *mesg);
+ int (*unprepare_transfer_hardware)(struct spi_controller *ctlr);
+ int (*prepare_message)(struct spi_controller *ctlr,
+ struct spi_message *message);
+ int (*unprepare_message)(struct spi_controller *ctlr,
+ struct spi_message *message);
+ int (*slave_abort)(struct spi_controller *ctlr);
+
+ /*
+ * These hooks are for drivers that use a generic implementation
+ * of transfer_one_message() provied by the core.
+ */
+ void (*set_cs)(struct spi_device *spi, bool enable);
+ int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *transfer);
+ void (*handle_err)(struct spi_controller *ctlr,
+ struct spi_message *message);
+
+ /* Optimized handlers for SPI memory-like operations. */
+ const struct spi_controller_mem_ops *mem_ops;
+
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
+
+ /* gpio chip select */
+ int *cs_gpios;
+ struct gpio_desc **cs_gpiods;
+ bool use_gpio_descriptors;
+ s8 unused_native_cs;
+ s8 max_native_cs;
+
+ /* statistics */
+ struct spi_statistics statistics;
+
+ /* DMA channels for use with core dmaengine helpers */
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+
+ /* dummy data for full duplex devices */
+ void *dummy_rx;
+ void *dummy_tx;
+
+ int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
+
+ /*
+ * Driver sets this field to indicate it is able to snapshot SPI
+ * transfers (needed e.g. for reading the time of POSIX clocks)
+ */
+ bool ptp_sts_supported;
+
+ /* Interrupt enable state during PTP system timestamping */
+ unsigned long irq_flags;
+
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+ int (*spi_slave_rd_start)(struct spi_device *spi);
+ int (*spi_slave_rd_stop)(struct spi_device *spi);
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+};
+
+static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
+{
+ return dev_get_drvdata(&ctlr->dev);
+}
+
+static inline void spi_controller_set_devdata(struct spi_controller *ctlr,
+ void *data)
+{
+ dev_set_drvdata(&ctlr->dev, data);
+}
+
+static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr)
+{
+ if (!ctlr || !get_device(&ctlr->dev))
+ return NULL;
+ return ctlr;
+}
+
+static inline void spi_controller_put(struct spi_controller *ctlr)
+{
+ if (ctlr)
+ put_device(&ctlr->dev);
+}
+
+static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
+{
+ return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
+}
+
+/* PM calls that need to be issued by the driver */
+extern int spi_controller_suspend(struct spi_controller *ctlr);
+extern int spi_controller_resume(struct spi_controller *ctlr);
+
+/* Calls the driver make to interact with the message queue */
+extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr);
+extern void spi_finalize_current_message(struct spi_controller *ctlr);
+extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
+
+/* Helper calls for driver to timestamp transfer */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off);
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off);
+
+/* the spi driver core manages memory for the spi_controller classdev */
+extern struct spi_controller *__spi_alloc_controller(struct device *host,
+ unsigned int size, bool slave);
+
+static inline struct spi_controller *spi_alloc_master(struct device *host,
+ unsigned int size)
+{
+ return __spi_alloc_controller(host, size, false);
+}
+
+static inline struct spi_controller *spi_alloc_slave(struct device *host,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __spi_alloc_controller(host, size, true);
+}
+
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave);
+
+static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __devm_spi_alloc_controller(dev, size, true);
+}
+
+extern int spi_register_controller(struct spi_controller *ctlr);
+extern int devm_spi_register_controller(struct device *dev,
+ struct spi_controller *ctlr);
+extern void spi_unregister_controller(struct spi_controller *ctlr);
+
+extern struct spi_controller *spi_busnum_to_master(u16 busnum);
+
+/*
+ * SPI resource management while processing a SPI message
+ */
+
+typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ void *res);
+
+/**
+ * struct spi_res - spi resource management structure
+ * @entry: list entry
+ * @release: release code called prior to freeing this resource
+ * @data: extra data allocated for the specific use-case
+ *
+ * this is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing
+ */
+struct spi_res {
+ struct list_head entry;
+ spi_res_release_t release;
+ unsigned long long data[]; /* guarantee ull alignment */
+};
+
+extern void *spi_res_alloc(struct spi_device *spi,
+ spi_res_release_t release,
+ size_t size, gfp_t gfp);
+extern void spi_res_add(struct spi_message *message, void *res);
+extern void spi_res_free(void *res);
+
+extern void spi_res_release(struct spi_controller *ctlr,
+ struct spi_message *message);
+
+/*---------------------------------------------------------------------------*/
+
+/*
+ * I/O INTERFACE between SPI controller and protocol drivers
+ *
+ * Protocol drivers use a queue of spi_messages, each transferring data
+ * between the controller and memory buffers.
+ *
+ * The spi_messages themselves consist of a series of read+write transfer
+ * segments. Those segments always read the same number of bits as they
+ * write; but one or the other is easily ignored by passing a null buffer
+ * pointer. (This is unlike most types of I/O API, because SPI hardware
+ * is full duplex.)
+ *
+ * NOTE: Allocation of spi_transfer and spi_message memory is entirely
+ * up to the protocol driver, which guarantees the integrity of both (as
+ * well as the data buffers) for as long as the message is queued.
+ */
+
+/**
+ * struct spi_transfer - a read/write buffer pair
+ * @tx_buf: data to be written (dma-safe memory), or NULL
+ * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
+ * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_nbits: number of bits used for writing. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @rx_nbits: number of bits used for reading. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @len: size of rx and tx buffers (in bytes)
+ * @speed_hz: Select a speed other than the device default for this
+ * transfer. If 0 the default (from @spi_device) is used.
+ * @bits_per_word: select a bits_per_word other than the device default
+ * for this transfer. If 0 the default (from @spi_device) is used.
+ * @cs_change: affects chipselect after this transfer completes
+ * @cs_change_delay: delay between cs deassert and assert when
+ * @cs_change is set and @spi_transfer is not the last in @spi_message
+ * @delay: delay to be introduced after this transfer before
+ * (optionally) changing the chipselect status, then starting
+ * the next transfer or completing this @spi_message.
+ * @delay_usecs: microseconds to delay after this transfer before
+ * (optionally) changing the chipselect status, then starting
+ * the next transfer or completing this @spi_message.
+ * @word_delay: inter word delay to be introduced after each word size
+ * (set by bits_per_word) transmission.
+ * @effective_speed_hz: the effective SCK-speed that was used to
+ * transfer this transfer. Set to 0 if the spi bus driver does
+ * not support it.
+ * @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg: Scatterlist for transmit, currently not for client use
+ * @rx_sg: Scatterlist for receive, currently not for client use
+ * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
+ * within @tx_buf for which the SPI device is requesting that the time
+ * snapshot for this transfer begins. Upon completing the SPI transfer,
+ * this value may have changed compared to what was requested, depending
+ * on the available snapshotting resolution (DMA transfer,
+ * @ptp_sts_supported is false, etc).
+ * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning
+ * that a single byte should be snapshotted).
+ * If the core takes care of the timestamp (if @ptp_sts_supported is false
+ * for this controller), it will set @ptp_sts_word_pre to 0, and
+ * @ptp_sts_word_post to the length of the transfer. This is done
+ * purposefully (instead of setting to spi_transfer->len - 1) to denote
+ * that a transfer-level snapshot taken from within the driver may still
+ * be of higher quality.
+ * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
+ * PTP system timestamp structure may lie. If drivers use PIO or their
+ * hardware has some sort of assist for retrieving exact transfer timing,
+ * they can (and should) assert @ptp_sts_supported and populate this
+ * structure using the ptp_read_system_*ts helper functions.
+ * The timestamp must represent the time at which the SPI slave device has
+ * processed the word, i.e. the "pre" timestamp should be taken before
+ * transmitting the "pre" word, and the "post" timestamp after receiving
+ * transmit confirmation from the controller for the "post" word.
+ * @timestamped: true if the transfer has been timestamped
+ * @error: Error status logged by spi controller driver.
+ *
+ * SPI transfers always write the same number of bytes as they read.
+ * Protocol drivers should always provide @rx_buf and/or @tx_buf.
+ * In some cases, they may also want to provide DMA addresses for
+ * the data being transferred; that may reduce overhead, when the
+ * underlying driver uses dma.
+ *
+ * If the transmit buffer is null, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is null, the data
+ * shifted in will be discarded. Only "len" bytes shift out (or in).
+ * It's an error to try to shift out a partial word. (For example, by
+ * shifting out three bytes with word size of sixteen or twenty bits;
+ * the former uses two bytes per word, the latter uses four bytes.)
+ *
+ * In-memory data values are always in native CPU byte order, translated
+ * from the wire byte order (big-endian except with SPI_LSB_FIRST). So
+ * for example when bits_per_word is sixteen, buffers are 2N bytes long
+ * (@len = 2N) and hold N sixteen bit words in CPU byte order.
+ *
+ * When the word size of the SPI transfer is not a power-of-two multiple
+ * of eight bits, those in-memory words include extra bits. In-memory
+ * words are always seen by protocol drivers as right-justified, so the
+ * undefined (rx) or unused (tx) bits are always the most significant bits.
+ *
+ * All SPI transfers start with the relevant chipselect active. Normally
+ * it stays selected until after the last transfer in a message. Drivers
+ * can affect the chipselect signal using cs_change.
+ *
+ * (i) If the transfer isn't the last one in the message, this flag is
+ * used to make the chipselect briefly go inactive in the middle of the
+ * message. Toggling chipselect in this way may be needed to terminate
+ * a chip command, letting a single spi_message perform all of group of
+ * chip transactions together.
+ *
+ * (ii) When the transfer is the last one in the message, the chip may
+ * stay selected until the next transfer. On multi-device SPI busses
+ * with nothing blocking messages going to other devices, this is just
+ * a performance hint; starting a message to another device deselects
+ * this one. But in other cases, this can be used to ensure correctness.
+ * Some devices need protocol transactions to be built from a series of
+ * spi_message submissions, where the content of one message is determined
+ * by the results of previous messages and where the whole transaction
+ * ends when the chipselect goes intactive.
+ *
+ * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
+ * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
+ * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
+ * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
+ *
+ * The code that submits an spi_message (and its spi_transfers)
+ * to the lower layers is responsible for managing its memory.
+ * Zero-initialize every field you don't set up explicitly, to
+ * insulate against future API updates. After you submit a message
+ * and its transfers, ignore them until its completion callback.
+ */
+struct spi_transfer {
+ /* it's ok if tx_buf == rx_buf (right?)
+ * for MicroWire, one buffer must be null
+ * buffers must work with dma_*map_single() calls, unless
+ * spi_message.is_dma_mapped reports a pre-existing mapping
+ */
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned len;
+
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ struct sg_table tx_sg;
+ struct sg_table rx_sg;
+
+ unsigned cs_change:1;
+ unsigned tx_nbits:3;
+ unsigned rx_nbits:3;
+#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+ u8 bits_per_word;
+ u16 delay_usecs;
+ struct spi_delay delay;
+ struct spi_delay cs_change_delay;
+ struct spi_delay word_delay;
+ u32 speed_hz;
+
+ u32 effective_speed_hz;
+
+ unsigned int ptp_sts_word_pre;
+ unsigned int ptp_sts_word_post;
+
+ struct ptp_system_timestamp *ptp_sts;
+
+ bool timestamped;
+
+ struct list_head transfer_list;
+
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+ u16 error;
+};
+
+/**
+ * struct spi_message - one multi-segment SPI transaction
+ * @transfers: list of transfer segments in this transaction
+ * @spi: SPI device to which the transaction is queued
+ * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
+ * addresses for each transfer buffer
+ * @complete: called to report transaction completions
+ * @context: the argument to complete() when it's called
+ * @frame_length: the total number of bytes in the message
+ * @actual_length: the total number of bytes that were transferred in all
+ * successful segments
+ * @status: zero for success, else negative errno
+ * @queue: for use by whichever driver currently owns the message
+ * @state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the spi message is processed
+ *
+ * A @spi_message is used to execute an atomic sequence of data transfers,
+ * each represented by a struct spi_transfer. The sequence is "atomic"
+ * in the sense that no other spi_message may use that SPI bus until that
+ * sequence completes. On some systems, many such sequences can execute as
+ * a single programmed DMA transfer. On all systems, these messages are
+ * queued, and might complete after transactions to other devices. Messages
+ * sent to a given spi_device are always executed in FIFO order.
+ *
+ * The code that submits an spi_message (and its spi_transfers)
+ * to the lower layers is responsible for managing its memory.
+ * Zero-initialize every field you don't set up explicitly, to
+ * insulate against future API updates. After you submit a message
+ * and its transfers, ignore them until its completion callback.
+ */
+struct spi_message {
+ struct list_head transfers;
+
+ struct spi_device *spi;
+
+ unsigned is_dma_mapped:1;
+
+ /* REVISIT: we might want a flag affecting the behavior of the
+ * last transfer ... allowing things like "read 16 bit length L"
+ * immediately followed by "read L bytes". Basically imposing
+ * a specific message scheduling algorithm.
+ *
+ * Some controller drivers (message-at-a-time queue processing)
+ * could provide that as their default scheduling algorithm. But
+ * others (with multi-message pipelines) could need a flag to
+ * tell them about such special cases.
+ */
+
+ /* completion is reported through a callback */
+ void (*complete)(void *context);
+ void *context;
+ unsigned frame_length;
+ unsigned actual_length;
+ int status;
+
+ /* for optional use by whatever driver currently owns the
+ * spi_message ... between calls to spi_async and then later
+ * complete(), that's the spi_controller controller driver.
+ */
+ struct list_head queue;
+ void *state;
+
+ /* list of spi_res reources when the spi message is processed */
+ struct list_head resources;
+};
+
+static inline void spi_message_init_no_memset(struct spi_message *m)
+{
+ INIT_LIST_HEAD(&m->transfers);
+ INIT_LIST_HEAD(&m->resources);
+}
+
+static inline void spi_message_init(struct spi_message *m)
+{
+ memset(m, 0, sizeof *m);
+ spi_message_init_no_memset(m);
+}
+
+static inline void
+spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
+{
+ list_add_tail(&t->transfer_list, &m->transfers);
+}
+
+static inline void
+spi_transfer_del(struct spi_transfer *t)
+{
+ list_del(&t->transfer_list);
+}
+
+static inline int
+spi_transfer_delay_exec(struct spi_transfer *t)
+{
+ struct spi_delay d;
+
+ if (t->delay_usecs) {
+ d.value = t->delay_usecs;
+ d.unit = SPI_DELAY_UNIT_USECS;
+ return spi_delay_exec(&d, NULL);
+ }
+
+ return spi_delay_exec(&t->delay, t);
+}
+
+/**
+ * spi_message_init_with_transfers - Initialize spi_message and append transfers
+ * @m: spi_message to be initialized
+ * @xfers: An array of spi transfers
+ * @num_xfers: Number of items in the xfer array
+ *
+ * This function initializes the given spi_message and adds each spi_transfer in
+ * the given array to the message.
+ */
+static inline void
+spi_message_init_with_transfers(struct spi_message *m,
+struct spi_transfer *xfers, unsigned int num_xfers)
+{
+ unsigned int i;
+
+ spi_message_init(m);
+ for (i = 0; i < num_xfers; ++i)
+ spi_message_add_tail(&xfers[i], m);
+}
+
+/* It's fine to embed message and transaction structures in other data
+ * structures so long as you don't free them while they're in use.
+ */
+
+static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
+{
+ struct spi_message *m;
+
+ m = kzalloc(sizeof(struct spi_message)
+ + ntrans * sizeof(struct spi_transfer),
+ flags);
+ if (m) {
+ unsigned i;
+ struct spi_transfer *t = (struct spi_transfer *)(m + 1);
+
+ spi_message_init_no_memset(m);
+ for (i = 0; i < ntrans; i++, t++)
+ spi_message_add_tail(t, m);
+ }
+ return m;
+}
+
+static inline void spi_message_free(struct spi_message *m)
+{
+ kfree(m);
+}
+
+extern int spi_set_cs_timing(struct spi_device *spi,
+ struct spi_delay *setup,
+ struct spi_delay *hold,
+ struct spi_delay *inactive);
+
+extern int spi_setup(struct spi_device *spi);
+extern int spi_async(struct spi_device *spi, struct spi_message *message);
+extern int spi_async_locked(struct spi_device *spi,
+ struct spi_message *message);
+extern int spi_slave_abort(struct spi_device *spi);
+
+static inline size_t
+spi_max_message_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ if (!ctlr->max_message_size)
+ return SIZE_MAX;
+ return ctlr->max_message_size(spi);
+}
+
+static inline size_t
+spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ size_t tr_max = SIZE_MAX;
+ size_t msg_max = spi_max_message_size(spi);
+
+ if (ctlr->max_transfer_size)
+ tr_max = ctlr->max_transfer_size(spi);
+
+ /* transfer size limit must not be greater than messsage size limit */
+ return min(tr_max, msg_max);
+}
+
+/**
+ * spi_is_bpw_supported - Check if bits per word is supported
+ * @spi: SPI device
+ * @bpw: Bits per word
+ *
+ * This function checks to see if the SPI controller supports @bpw.
+ *
+ * Returns:
+ * True if @bpw is supported, false otherwise.
+ */
+static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
+{
+ u32 bpw_mask = spi->master->bits_per_word_mask;
+
+ if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw)))
+ return true;
+
+ return false;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* SPI transfer replacement methods which make use of spi_res */
+
+struct spi_replaced_transfers;
+typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ struct spi_replaced_transfers *res);
+/**
+ * struct spi_replaced_transfers - structure describing the spi_transfer
+ * replacements that have occurred
+ * so that they can get reverted
+ * @release: some extra release code to get executed prior to
+ * relasing this structure
+ * @extradata: pointer to some extra data if requested or NULL
+ * @replaced_transfers: transfers that have been replaced and which need
+ * to get restored
+ * @replaced_after: the transfer after which the @replaced_transfers
+ * are to get re-inserted
+ * @inserted: number of transfers inserted
+ * @inserted_transfers: array of spi_transfers of array-size @inserted,
+ * that have been replacing replaced_transfers
+ *
+ * note: that @extradata will point to @inserted_transfers[@inserted]
+ * if some extra allocation is requested, so alignment will be the same
+ * as for spi_transfers
+ */
+struct spi_replaced_transfers {
+ spi_replaced_release_t release;
+ void *extradata;
+ struct list_head replaced_transfers;
+ struct list_head *replaced_after;
+ size_t inserted;
+ struct spi_transfer inserted_transfers[];
+};
+
+extern struct spi_replaced_transfers *spi_replace_transfers(
+ struct spi_message *msg,
+ struct spi_transfer *xfer_first,
+ size_t remove,
+ size_t insert,
+ spi_replaced_release_t release,
+ size_t extradatasize,
+ gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
+/* SPI transfer transformation methods */
+
+extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ size_t maxsize,
+ gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
+/* All these synchronous SPI transfer routines are utilities layered
+ * over the core async transfer primitive. Here, "synchronous" means
+ * they will sleep uninterruptibly until the async transfer completes.
+ */
+
+extern int spi_sync(struct spi_device *spi, struct spi_message *message);
+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
+extern int spi_bus_lock(struct spi_controller *ctlr);
+extern int spi_bus_unlock(struct spi_controller *ctlr);
+
+/**
+ * spi_sync_transfer - synchronous SPI data transfer
+ * @spi: device with which data will be exchanged
+ * @xfers: An array of spi_transfers
+ * @num_xfers: Number of items in the xfer array
+ * Context: can sleep
+ *
+ * Does a synchronous SPI data transfer of the given spi_transfer array.
+ *
+ * For more specific semantics see spi_sync().
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static inline int
+spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
+ unsigned int num_xfers)
+{
+ struct spi_message msg;
+
+ spi_message_init_with_transfers(&msg, xfers, num_xfers);
+
+ return spi_sync(spi, &msg);
+}
+
+/**
+ * spi_write - SPI synchronous write
+ * @spi: device to which data will be written
+ * @buf: data buffer
+ * @len: data buffer size
+ * Context: can sleep
+ *
+ * This function writes the buffer @buf.
+ * Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static inline int
+spi_write(struct spi_device *spi, const void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = len,
+ };
+
+ return spi_sync_transfer(spi, &t, 1);
+}
+
+/**
+ * spi_read - SPI synchronous read
+ * @spi: device from which data will be read
+ * @buf: data buffer
+ * @len: data buffer size
+ * Context: can sleep
+ *
+ * This function reads the buffer @buf.
+ * Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static inline int
+spi_read(struct spi_device *spi, void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = buf,
+ .len = len,
+ };
+
+ return spi_sync_transfer(spi, &t, 1);
+}
+
+/* this copies txbuf and rxbuf data; for small transfers only! */
+extern int spi_write_then_read(struct spi_device *spi,
+ const void *txbuf, unsigned n_tx,
+ void *rxbuf, unsigned n_rx);
+
+/**
+ * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) eight bit number returned by the
+ * device, or else a negative error code.
+ */
+static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
+{
+ ssize_t status;
+ u8 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 1);
+
+ /* return negative errno or unsigned value */
+ return (status < 0) ? status : result;
+}
+
+/**
+ * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * The number is returned in wire-order, which is at least sometimes
+ * big-endian.
+ *
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the
+ * device, or else a negative error code.
+ */
+static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
+{
+ ssize_t status;
+ u16 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 2);
+
+ /* return negative errno or unsigned value */
+ return (status < 0) ? status : result;
+}
+
+/**
+ * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * This function is similar to spi_w8r16, with the exception that it will
+ * convert the read 16 bit data word from big-endian to native endianness.
+ *
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code.
+ */
+static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
+
+{
+ ssize_t status;
+ __be16 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 2);
+ if (status < 0)
+ return status;
+
+ return be16_to_cpu(result);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/*
+ * INTERFACE between board init code and SPI infrastructure.
+ *
+ * No SPI driver ever sees these SPI device table segments, but
+ * it's how the SPI core (or adapters that get hotplugged) grows
+ * the driver model tree.
+ *
+ * As a rule, SPI devices can't be probed. Instead, board init code
+ * provides a table listing the devices which are present, with enough
+ * information to bind and set up the device's driver. There's basic
+ * support for nonstatic configurations too; enough to handle adding
+ * parport adapters, or microcontrollers acting as USB-to-SPI bridges.
+ */
+
+/**
+ * struct spi_board_info - board-specific template for a SPI device
+ * @modalias: Initializes spi_device.modalias; identifies the driver.
+ * @platform_data: Initializes spi_device.platform_data; the particular
+ * data stored there is driver-specific.
+ * @properties: Additional device properties for the device.
+ * @controller_data: Initializes spi_device.controller_data; some
+ * controllers need hints about hardware setup, e.g. for DMA.
+ * @irq: Initializes spi_device.irq; depends on how the board is wired.
+ * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
+ * from the chip datasheet and board-specific signal quality issues.
+ * @bus_num: Identifies which spi_controller parents the spi_device; unused
+ * by spi_new_device(), and otherwise depends on board wiring.
+ * @chip_select: Initializes spi_device.chip_select; depends on how
+ * the board is wired.
+ * @mode: Initializes spi_device.mode; based on the chip datasheet, board
+ * wiring (some devices support both 3WIRE and standard modes), and
+ * possibly presence of an inverter in the chipselect path.
+ *
+ * When adding new SPI devices to the device tree, these structures serve
+ * as a partial device template. They hold information which can't always
+ * be determined by drivers. Information that probe() can establish (such
+ * as the default transfer wordsize) is not included here.
+ *
+ * These structures are used in two places. Their primary role is to
+ * be stored in tables of board-specific device descriptors, which are
+ * declared early in board initialization and then used (much later) to
+ * populate a controller's device tree after the that controller's driver
+ * initializes. A secondary (and atypical) role is as a parameter to
+ * spi_new_device() call, which happens after those controller drivers
+ * are active in some dynamic board configuration models.
+ */
+struct spi_board_info {
+ /* the device name and module name are coupled, like platform_bus;
+ * "modalias" is normally the driver name.
+ *
+ * platform_data goes to spi_device.dev.platform_data,
+ * controller_data goes to spi_device.controller_data,
+ * device properties are copied and attached to spi_device,
+ * irq is copied too
+ */
+ char modalias[SPI_NAME_SIZE];
+ const void *platform_data;
+ const struct property_entry *properties;
+ void *controller_data;
+ int irq;
+
+ /* slower signaling on noisy or low voltage boards */
+ u32 max_speed_hz;
+
+
+ /* bus_num is board specific and matches the bus_num of some
+ * spi_controller that will probably be registered later.
+ *
+ * chip_select reflects how this chip is wired to that master;
+ * it's less than num_chipselect.
+ */
+ u16 bus_num;
+ u16 chip_select;
+
+ /* mode becomes spi_device.mode, and is essential for chips
+ * where the default of SPI_CS_HIGH = 0 is wrong.
+ */
+ u32 mode;
+
+ /* ... may need additional spi_device chip config data here.
+ * avoid stuff protocol drivers can set; but include stuff
+ * needed to behave without being bound to a driver:
+ * - quirks like clock rate mattering when not selected
+ */
+};
+
+#ifdef CONFIG_SPI
+extern int
+spi_register_board_info(struct spi_board_info const *info, unsigned n);
+#else
+/* board init code may ignore whether SPI is configured or not */
+static inline int
+spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ { return 0; }
+#endif
+
+/* If you're hotplugging an adapter with devices (parport, usb, etc)
+ * use spi_new_device() to describe each device. You can also call
+ * spi_unregister_device() to start making that device vanish, but
+ * normally that would be handled by spi_unregister_controller().
+ *
+ * You can also use spi_alloc_device() and spi_add_device() to use a two
+ * stage registration sequence for each spi_device. This gives the caller
+ * some more control over the spi_device structure before it is registered,
+ * but requires that caller to initialize fields that would otherwise
+ * be defined using the board info.
+ */
+extern struct spi_device *
+spi_alloc_device(struct spi_controller *ctlr);
+
+extern int
+spi_add_device(struct spi_device *spi);
+
+extern struct spi_device *
+spi_new_device(struct spi_controller *, struct spi_board_info *);
+
+extern void spi_unregister_device(struct spi_device *spi);
+
+extern const struct spi_device_id *
+spi_get_device_id(const struct spi_device *sdev);
+
+static inline bool
+spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
+{
+ return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
+}
+
+/* OF support code */
+#if IS_ENABLED(CONFIG_OF)
+
+/* must call put_device() when done with returned spi_device device */
+extern struct spi_device *
+of_find_spi_device_by_node(struct device_node *node);
+
+#else
+
+static inline struct spi_device *
+of_find_spi_device_by_node(struct device_node *node)
+{
+ return NULL;
+}
+
+#endif /* IS_ENABLED(CONFIG_OF) */
+
+/* Compatibility layer */
+#define spi_master spi_controller
+
+#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
+#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
+#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
+#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
+#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
+
+#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
+#define spi_master_set_devdata(_ctlr, _data) \
+ spi_controller_set_devdata(_ctlr, _data)
+#define spi_master_get(_ctlr) spi_controller_get(_ctlr)
+#define spi_master_put(_ctlr) spi_controller_put(_ctlr)
+#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
+#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
+
+#define spi_register_master(_ctlr) spi_register_controller(_ctlr)
+#define devm_spi_register_master(_dev, _ctlr) \
+ devm_spi_register_controller(_dev, _ctlr)
+#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
+
+#endif /* __LINUX_SPI_H */
diff --git a/upstream/linux-5.10/sound/soc/codecs/es8311.c b/upstream/linux-5.10/sound/soc/codecs/es8311.c
new file mode 100755
index 0000000..0e9dab6
--- /dev/null
+++ b/upstream/linux-5.10/sound/soc/codecs/es8311.c
@@ -0,0 +1,1848 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * es8311.c -- ES8311/ES8312 ALSA SoC Audio Codec
+ *
+ * Copyright (C) 2018 Everest Semiconductor Co., Ltd
+ *
+ * Authors: David Yang(yangxiaohua@everest-semi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+#include <sound/soc.h>
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/clk-provider.h>
+
+#include "es8311.h"
+#ifndef CONFIG_OF
+#define CONFIG_OF
+#endif
+
+/* component private data */
+
+struct es8311_private {
+ struct snd_soc_component *component;
+ struct regmap *regmap;
+ struct device *dev;
+
+ struct clk *mclk;
+ unsigned int mclk_rate;
+ int mastermode;
+ bool sclkinv;
+ bool mclkinv;
+ bool dmic_enable;
+ unsigned int mclk_src;
+ enum snd_soc_bias_level bias_level;
+ //add new
+ struct pinctrl *pctrl;
+ struct pinctrl_state *state0;
+ struct clk *clk;
+
+
+};
+
+#if 0
+struct aic31xx_priv {
+ struct snd_soc_component *component;
+ u8 i2c_regs_status;
+ struct device *dev;
+ struct regmap *regmap;
+ enum aic31xx_type codec_type;
+ struct gpio_desc *gpio_reset;
+ int micbias_vg;
+ struct aic31xx_pdata pdata;
+ struct regulator_bulk_data supplies[AIC31XX_NUM_SUPPLIES];
+ struct aic31xx_disable_nb disable_nb[AIC31XX_NUM_SUPPLIES];
+ struct snd_soc_jack *jack;
+ unsigned int sysclk;
+ u8 p_div;
+ int rate_div_line;
+ bool master_dapm_route_applied;
+ int irq;
+ u8 ocmv; /* output common-mode voltage */
+};
+#endif
+
+struct es8311_private *es8311_data;
+struct snd_soc_component *es8311_component;
+
+static bool es8311_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ //printk("Enter into %s()\n", __func__);
+
+ if ((reg <= 0xff))
+ return true;
+ else
+ return false;
+ }
+
+static bool es8311_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ //printk("Enter into %s()\n", __func__);
+
+ if ((reg <= 0xff))
+ return true;
+ else
+ return false;
+ }
+
+static bool es8311_writable_register(struct device *dev,
+ unsigned int reg)
+{
+ //printk("Enter into %s()\n", __func__);
+
+ if ((reg <= 0xff))
+ return true;
+ else
+ return false;
+}
+
+static const DECLARE_TLV_DB_SCALE(vdac_tlv, -9550, 50, 1);
+static const DECLARE_TLV_DB_SCALE(vadc_tlv, -9550, 50, 1);
+static const DECLARE_TLV_DB_SCALE(mic_pga_tlv, 0, 300, 1);
+static const DECLARE_TLV_DB_SCALE(adc_scale_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_winsize_tlv, 0, 25, 0);
+static const DECLARE_TLV_DB_SCALE(alc_maxlevel_tlv, -3600, 200, 0);
+static const DECLARE_TLV_DB_SCALE(alc_minlevel_tlv, -3600, 200, 0);
+static const DECLARE_TLV_DB_SCALE(alc_noisegate_tlv, -9600, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_noisegate_winsize_tlv, 2048, 2048, 0);
+static const DECLARE_TLV_DB_SCALE(alc_automute_gain_tlv, 0, -400, 0);
+static const DECLARE_TLV_DB_SCALE(adc_ramprate_tlv, 0, 25, 0);
+
+static const char * const dmic_type_txt[] = {
+ "dmic at high level",
+ "dmic at low level"
+};
+static const struct soc_enum dmic_type =
+SOC_ENUM_SINGLE(ES8311_ADC_REG15, 0, 2, dmic_type_txt);
+
+static const char * const automute_type_txt[] = {
+ "automute disabled",
+ "automute enable"
+};
+static const struct soc_enum alc_automute_type =
+SOC_ENUM_SINGLE(ES8311_ADC_REG18, 6, 2, automute_type_txt);
+
+static const char * const dacdsm_mute_type_txt[] = {
+ "mute to 8",
+ "mute to 7/9"
+};
+static const struct soc_enum dacdsm_mute_type =
+SOC_ENUM_SINGLE(ES8311_DAC_REG31, 7, 2, dacdsm_mute_type_txt);
+
+static const char * const aec_type_txt[] = {
+ "adc left, adc right",
+ "adc left, null right",
+ "null left, adc right",
+ "null left, null right",
+ "dac left, adc right",
+ "adc left, dac right",
+ "dac left, dac right",
+ "N/A"
+};
+static const struct soc_enum aec_type =
+SOC_ENUM_SINGLE(ES8311_GPIO_REG44, 4, 7, aec_type_txt);
+
+static const char * const adc2dac_sel_txt[] = {
+ "disable",
+ "adc data to dac",
+};
+static const struct soc_enum adc2dac_sel =
+SOC_ENUM_SINGLE(ES8311_GPIO_REG44, 7, 2, adc2dac_sel_txt);
+
+static const char * const mclk_sel_txt[] = {
+ "from mclk pin",
+ "from bclk",
+};
+static const struct soc_enum mclk_src =
+SOC_ENUM_SINGLE(ES8311_CLK_MANAGER_REG01, 7, 2, mclk_sel_txt);
+
+/*
+ * es8311 Controls
+ */
+static const struct snd_kcontrol_new es8311_snd_controls[] = {
+ SOC_SINGLE_TLV("MIC PGA GAIN", ES8311_SYSTEM_REG14,
+ 0, 10, 0, mic_pga_tlv),
+ SOC_SINGLE_TLV("ADC SCALE", ES8311_ADC_REG16,
+ 0, 7, 0, adc_scale_tlv),
+ SOC_ENUM("DMIC TYPE", dmic_type),
+ SOC_SINGLE_TLV("ADC RAMP RATE", ES8311_ADC_REG15,
+ 4, 15, 0, adc_ramprate_tlv),
+ SOC_SINGLE("ADC SDP MUTE", ES8311_SDPOUT_REG0A, 6, 1, 0),
+ SOC_SINGLE("ADC INVERTED", ES8311_ADC_REG16, 4, 1, 0),
+ SOC_SINGLE("ADC SYNC", ES8311_ADC_REG16, 5, 1, 1),
+ SOC_SINGLE("ADC RAM CLR", ES8311_ADC_REG16, 3, 1, 0),
+ SOC_SINGLE_TLV("ADC VOLUME", ES8311_ADC_REG17,
+ 0, 255, 0, vadc_tlv),
+ SOC_SINGLE("ALC ENABLE", ES8311_ADC_REG18, 7, 1, 0),
+ SOC_ENUM("ALC AUTOMUTE TYPE", alc_automute_type),
+ SOC_SINGLE_TLV("ALC WIN SIZE", ES8311_ADC_REG18,
+ 0, 15, 0, alc_winsize_tlv),
+ SOC_SINGLE_TLV("ALC MAX LEVEL", ES8311_ADC_REG19,
+ 4, 15, 0, alc_maxlevel_tlv),
+ SOC_SINGLE_TLV("ALC MIN LEVEL", ES8311_ADC_REG19,
+ 0, 15, 0, alc_minlevel_tlv),
+ SOC_SINGLE_TLV("ALC AUTOMUTE WINSIZE", ES8311_ADC_REG1A,
+ 4, 15, 0, alc_noisegate_winsize_tlv),
+ SOC_SINGLE_TLV("ALC AUTOMUTE GATE THRESHOLD", ES8311_ADC_REG1A,
+ 0, 15, 0, alc_noisegate_tlv),
+ SOC_SINGLE_TLV("ALC AUTOMUTE VOLUME", ES8311_ADC_REG1B,
+ 5, 7, 0, alc_automute_gain_tlv),
+ SOC_SINGLE("ADC FS MODE", ES8311_CLK_MANAGER_REG03, 6, 1, 0),
+ SOC_SINGLE("DAC SDP MUTE", ES8311_SDPIN_REG09, 6, 1, 0),
+ SOC_SINGLE("DAC DEM MUTE", ES8311_DAC_REG31, 5, 1, 0),
+ SOC_SINGLE("DAC INVERT", ES8311_DAC_REG31, 4, 1, 0),
+ SOC_SINGLE("DAC RAM CLR", ES8311_DAC_REG31, 3, 1, 0),
+ SOC_ENUM("DAC DSM MUTE", dacdsm_mute_type),
+ SOC_SINGLE("DAC OFFSET", ES8311_DAC_REG33, 0, 255, 0),
+ SOC_SINGLE_TLV("DAC VOLUME", ES8311_DAC_REG32,
+ 0, 255, 0, vdac_tlv),
+ SOC_SINGLE("DRC ENABLE", ES8311_DAC_REG34, 7, 1, 0),
+ SOC_SINGLE_TLV("DRC WIN SIZE", ES8311_DAC_REG34,
+ 0, 15, 0, alc_winsize_tlv),
+ SOC_SINGLE_TLV("DRC MAX LEVEL", ES8311_DAC_REG35,
+ 4, 15, 0, alc_maxlevel_tlv),
+ SOC_SINGLE_TLV("DRC MIN LEVEL", ES8311_DAC_REG35,
+ 0, 15, 0, alc_minlevel_tlv),
+ SOC_SINGLE_TLV("DAC RAMP RATE", ES8311_DAC_REG37,
+ 4, 15, 0, adc_ramprate_tlv),
+ SOC_ENUM("AEC MODE", aec_type),
+ SOC_ENUM("ADC DATA TO DAC TEST MODE", adc2dac_sel),
+ SOC_SINGLE("MCLK INVERT", ES8311_CLK_MANAGER_REG01, 6, 1, 0),
+ SOC_SINGLE("BCLK INVERT", ES8311_CLK_MANAGER_REG06, 5, 1, 0),
+ SOC_ENUM("MCLK SOURCE", mclk_src),
+};
+
+/*
+ * DAPM Controls
+ */
+static const char * const es8311_dmic_mux_txt[] = {
+ "DMIC DISABLE",
+ "DMIC ENABLE"
+};
+static const unsigned int es8311_dmic_mux_values[] = {
+ 0, 1
+};
+static const struct soc_enum es8311_dmic_mux_enum =
+ SOC_VALUE_ENUM_SINGLE(ES8311_SYSTEM_REG14, 6, 1,
+ ARRAY_SIZE(es8311_dmic_mux_txt),
+ es8311_dmic_mux_txt,
+ es8311_dmic_mux_values);
+static const struct snd_kcontrol_new es8311_dmic_mux_controls =
+ SOC_DAPM_ENUM("DMIC ROUTE", es8311_dmic_mux_enum);
+static const char * const es8311_adc_sdp_mux_txt[] = {
+ "FROM EQUALIZER",
+ "FROM ADC OUT",
+};
+static const unsigned int es8311_adc_sdp_mux_values[] = {
+ 0, 1
+};
+static const struct soc_enum es8311_adc_sdp_mux_enum =
+ SOC_VALUE_ENUM_SINGLE(ES8311_ADC_REG1C, 6, 1,
+ ARRAY_SIZE(es8311_adc_sdp_mux_txt),
+ es8311_adc_sdp_mux_txt,
+ es8311_adc_sdp_mux_values);
+static const struct snd_kcontrol_new es8311_adc_sdp_mux_controls =
+ SOC_DAPM_ENUM("ADC SDP ROUTE", es8311_adc_sdp_mux_enum);
+
+/*
+ * DAC data soure
+ */
+static const char * const es8311_dac_data_mux_txt[] = {
+ "SELECT SDP LEFT DATA",
+ "SELECT SDP RIGHT DATA",
+};
+static const unsigned int es8311_dac_data_mux_values[] = {
+ 0, 1
+};
+static const struct soc_enum es8311_dac_data_mux_enum =
+ SOC_VALUE_ENUM_SINGLE(ES8311_SDPIN_REG09, 7, 1,
+ ARRAY_SIZE(es8311_dac_data_mux_txt),
+ es8311_dac_data_mux_txt,
+ es8311_dac_data_mux_values);
+static const struct snd_kcontrol_new es8311_dac_data_mux_controls =
+ SOC_DAPM_ENUM("DAC SDP ROUTE", es8311_dac_data_mux_enum);
+
+static const struct snd_soc_dapm_widget es8311_dapm_widgets[] = {
+ /* Input*/
+ SND_SOC_DAPM_INPUT("DMIC"),
+ SND_SOC_DAPM_INPUT("AMIC"),
+
+ //SND_SOC_DAPM_PGA("INPUT PGA", ES8311_SYSTEM_REG0E,
+ // 6, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("INPUT PGA", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ /* ADCs */
+ //SND_SOC_DAPM_ADC("MONO ADC", NULL, ES8311_SYSTEM_REG0E, 5, 0),
+
+ SND_SOC_DAPM_ADC("MONO ADC", NULL, SND_SOC_NOPM, 0, 0),
+ /* Dmic MUX */
+ SND_SOC_DAPM_MUX("DMIC MUX", SND_SOC_NOPM, 0, 0,
+ &es8311_dmic_mux_controls),
+ /* sdp MUX */
+ SND_SOC_DAPM_MUX("SDP OUT MUX", SND_SOC_NOPM, 0, 0,
+ &es8311_adc_sdp_mux_controls),
+ /* Digital Interface */
+ //SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 1,
+ SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+ /* Render path */
+ SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ /*DACs SDP DATA SRC MUX */
+ SND_SOC_DAPM_MUX("DAC SDP SRC MUX", ES8311_SDPIN_REG09, 7, 2,
+ &es8311_dac_data_mux_controls),
+ SND_SOC_DAPM_DAC("MONO DAC", NULL, SND_SOC_NOPM, 0, 0),
+ /* Output Lines */
+ SND_SOC_DAPM_OUTPUT("DIFFERENTIAL OUT"),
+
+};
+
+
+static const struct snd_soc_dapm_route es8311_dapm_routes[] = {
+ /* record route map */
+ {"INPUT PGA", NULL, "AMIC"},
+ {"MONO ADC", NULL, "INPUT PGA"},
+ {"DMIC MUX", "DMIC DISABLE", "MONO ADC"},
+ {"DMIC MUX", "DMIC ENABLE", "DMIC"},
+ {"SDP OUT MUX", "FROM ADC OUT", "DMIC MUX"},
+ {"SDP OUT MUX", "FROM EQUALIZER", "DMIC MUX"},
+ {"I2S OUT", NULL, "SDP OUT MUX"},
+ /* playback route map */
+ {"DAC SDP SRC MUX", "SELECT SDP LEFT DATA", "I2S IN"},
+ {"DAC SDP SRC MUX", "SELECT SDP RIGHT DATA", "I2S IN"},
+ {"MONO DAC", NULL, "DAC SDP SRC MUX"},
+ {"DIFFERENTIAL OUT", NULL, "MONO DAC"},
+};
+
+struct _coeff_div {
+ u32 mclk; /* mclk frequency */
+ u32 rate; /* sample rate */
+ u8 prediv; /* the pre divider with range from 1 to 8 */
+ u8 premulti; /* the pre multiplier with x1, x2, x4 and x8 selection */
+ u8 adcdiv; /* adcclk divider */
+ u8 dacdiv; /* dacclk divider */
+ u8 fsmode; /* double speed or single speed, =0, ss, =1, ds */
+ u8 lrck_h; /* adclrck divider and daclrck divider */
+ u8 lrck_l;
+ u8 bclkdiv; /* sclk divider */
+ u8 adcosr; /* adc osr */
+ u8 dacosr; /* dac osr */
+ u8 adcscale;
+};
+
+
+/* component hifi mclk clock divider coefficients */
+static const struct _coeff_div coeff_div[] = {
+ //mclk rate prediv mult adcdiv dacdiv fsmode lrch lrcl bckdiv osr adcscale
+ /* 8k */
+ {12288000, 8000, 0x06, 0x01, 0x01, 0x01, 0x00, 0x05, 0xff, 0x04, 0x10, 0x20, 0x04}, //1536
+ {18432000, 8000, 0x03, 0x01, 0x03, 0x03, 0x00, 0x08, 0xff, 0x18, 0x10, 0x20, 0x04}, //2304
+ {16384000, 8000, 0x08, 0x01, 0x01, 0x01, 0x00, 0x07, 0xff, 0x04, 0x10, 0x20, 0x04}, //2048
+ {8192000, 8000, 0x04, 0x01, 0x01, 0x01, 0x00, 0x03, 0xff, 0x04, 0x10, 0x20, 0x04}, //1024
+ {6144000, 8000, 0x03, 0x01, 0x01, 0x01, 0x00, 0x02, 0xff, 0x04, 0x10, 0x20, 0x04}, //768
+ {4096000, 8000, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x20, 0x04}, //512
+ {3072000, 8000, 0x03, 0x02, 0x01, 0x01, 0x00, 0x01, 0x7f, 0x04, 0x10, 0x20, 0x04}, //384
+ {2048000, 8000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x20, 0x04}, //256
+ {1536000, 8000, 0x01, 0x04, 0x03, 0x03, 0x00, 0x00, 0xbf, 0x04, 0x10, 0x20, 0x04}, //192
+ {1024000, 8000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x20, 0x04}, //128
+ {12000000, 8000, 0x05, 0x04, 0x03, 0x03, 0x00, 0x05, 0xDB, 0x04, 0x19, 0x19, 0x01}, //1500
+ {26000000, 8000 , 0x04, 0x03, 0x0C, 0x0C, 0x00, 0x06, 0x58, 0x18, 0x1B, 0x1B, 0x01},
+
+ /* 11.025k */
+ {11289600, 11025, 0x04, 0x01, 0x01, 0x01, 0x00, 0x03, 0xff, 0x04, 0x10, 0x20, 0x04}, //1024
+ {5644800, 11025, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x20, 0x04}, //512
+ {2822400, 11025, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x20, 0x04}, //256
+ {1411200, 11025, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x20, 0x04}, //128
+
+ /* 12k */
+ {12288000, 12000, 0x04, 0x01, 0x01, 0x01, 0x00, 0x03, 0xff, 0x04, 0x10, 0x20, 0x04}, //1024
+ {6144000, 12000, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x20, 0x04}, //512
+ {3072000, 12000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x20, 0x04}, //256
+ {1536000, 12000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x20, 0x04}, //128
+
+ /* 16k */
+ //{24576000, 16000, 0x06, 0x01, 0x01, 0x01, 0x00, 0x05, 0xff, 0x04, 0x10, 0x20, 0x04}, //1536
+ {12288000, 16000, 0x03, 0x01, 0x01, 0x01, 0x00, 0x02, 0xff, 0x04, 0x10, 0x20, 0x04}, //768
+ {18432000, 16000, 0x03, 0x02, 0x03, 0x03, 0x00, 0x04, 0x7f, 0x0c, 0x10, 0x20, 0x04}, //1152
+ {16384000, 16000, 0x04, 0x01, 0x01, 0x01, 0x00, 0x03, 0xff, 0x04, 0x10, 0x20, 0x04}, //1024
+ {8192000, 16000, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x20, 0x04}, //512
+ {6144000, 16000, 0x03, 0x02, 0x01, 0x01, 0x00, 0x01, 0x7f, 0x04, 0x10, 0x20, 0x04}, //384
+ {4096000, 16000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x20, 0x04}, //256
+ {3072000, 16000, 0x03, 0x04, 0x01, 0x01, 0x00, 0x00, 0xbf, 0x04, 0x10, 0x20, 0x04}, //192
+ {2048000, 16000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x20, 0x04}, //128
+ {1536000, 16000, 0x01, 0x08, 0x03, 0x03, 0x00, 0x00, 0x5f, 0x02, 0x10, 0x20, 0x04}, //96
+ {1024000, 16000, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x20, 0x04}, //64
+ {12000000, 16000, 0x05, 0x08, 0x03, 0x03, 0x00, 0x02, 0xED, 0x04, 0x19, 0x19, 0x01}, //750
+ {26000000, 16000 , 0x04, 0x03, 0x06, 0x06, 0x00, 0x06, 0x58, 0x18, 0x1B, 0x1B, 0x01},
+
+
+ /* 22.05k */
+ {11289600, 22050, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x20, 0x04}, //512
+ {5644800, 22050, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x20, 0x04}, //256
+ {2822400, 22050, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x20, 0x04}, //128
+ {1411200, 22050, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x20, 0x04}, //64
+
+ /* 24k */
+ //{24576000, 24000, 0x04, 0x01, 0x01, 0x01, 0x00, 0x03, 0xff, 0x04, 0x10, 0x20, 0x04}, //1024
+ {12288000, 24000, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x20, 0x04}, //512
+ {18432000, 24000, 0x03, 0x01, 0x01, 0x01, 0x00, 0x02, 0xff, 0x04, 0x10, 0x20, 0x04}, //768
+ {6144000, 24000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x20, 0x04}, //256
+ {3072000, 24000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x20, 0x04}, //128
+ {1536000, 24000, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x20, 0x04}, //64
+ {12000000, 24000, 0x05, 0x04, 0x01, 0x01, 0x00, 0x01, 0xF3, 0x04, 0x19, 0x19, 0x01}, //500
+
+ /* 32k */
+ //{24576000, 32000, 0x03, 0x01, 0x01, 0x01, 0x00, 0x02, 0xff, 0x04, 0x10, 0x10, 0x04}, //768
+ {12288000, 32000, 0x03, 0x02, 0x01, 0x01, 0x00, 0x01, 0x7f, 0x04, 0x10, 0x10, 0x04}, //384
+ {18432000, 32000, 0x03, 0x04, 0x03, 0x03, 0x00, 0x02, 0x3f, 0x0c, 0x10, 0x10, 0x04}, //576
+ {16384000, 32000, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x10, 0x04}, //512
+ {8192000, 32000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x10, 0x04}, //256
+ {6144000, 32000, 0x03, 0x04, 0x01, 0x01, 0x00, 0x00, 0xbf, 0x04, 0x10, 0x10, 0x04}, //192
+ {4096000, 32000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x10, 0x04}, //128
+ {3072000, 32000, 0x03, 0x08, 0x01, 0x01, 0x00, 0x00, 0x5f, 0x02, 0x10, 0x10, 0x04}, //96
+ {2048000, 32000, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x10, 0x04}, //64
+ {1536000, 32000, 0x01, 0x08, 0x03, 0x03, 0x01, 0x00, 0x2f, 0x02, 0x10, 0x10, 0x04}, //48
+ {1024000, 32000, 0x01, 0x08, 0x01, 0x01, 0x00, 0x00, 0x1f, 0x02, 0x10, 0x10, 0x04}, //32
+
+ /* 44.1k */
+ //{22579200, 44100, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x10, 0x04}, //512
+ {11289600, 44100, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x10, 0x04}, //256
+ {5644800, 44100, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x10, 0x04}, //128
+ {2822400, 44100, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x10, 0x04}, //64
+ {1411200, 44100, 0x01, 0x08, 0x01, 0x01, 0x00, 0x00, 0x1f, 0x02, 0x10, 0x10, 0x04}, //32
+
+ /* 48k */
+ {24576000, 48000, 0x02, 0x01, 0x01, 0x01, 0x00, 0x01, 0xff, 0x04, 0x10, 0x10, 0x04}, //512
+ {12288000, 48000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x10, 0x04}, //256
+ {18432000, 48000, 0x03, 0x02, 0x01, 0x01, 0x00, 0x01, 0x7f, 0x04, 0x10, 0x10, 0x04}, //384
+ {6144000, 48000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x10, 0x04}, //128
+ {3072000, 48000, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x10, 0x04}, //64
+ {1536000, 48000, 0x01, 0x08, 0x01, 0x01, 0x00, 0x00, 0x1f, 0x02, 0x10, 0x10, 0x04}, //32
+ {12000000, 48000, 0x05, 0x08, 0x01, 0x01, 0x00, 0x00, 0xF9, 0x04, 0x19, 0x19, 0x01}, //250
+
+ /* 64k */
+ {12288000, 64000, 0x03, 0x04, 0x01, 0x01, 0x00, 0x00, 0xbf, 0x04, 0x10, 0x10, 0x04}, //192
+ {18432000, 64000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x1f, 0x06, 0x12, 0x12, 0x03}, //288
+ {16384000, 64000, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xff, 0x04, 0x10, 0x10, 0x04}, //256
+ {8192000, 64000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x10, 0x04}, //128
+ {6144000, 64000, 0x03, 0x08, 0x01, 0x01, 0x01, 0x00, 0x5f, 0x02, 0x10, 0x10, 0x04}, //96
+ {4096000, 64000, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x10, 0x04}, //64
+ {3072000, 64000, 0x03, 0x08, 0x01, 0x01, 0x01, 0x00, 0x2f, 0x02, 0x10, 0x10, 0x04}, //48
+ {2048000, 64000, 0x01, 0x08, 0x01, 0x01, 0x00, 0x00, 0x1f, 0x02, 0x10, 0x10, 0x04}, //32
+
+ /* 88.2k */
+ {11289600, 88200, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x10, 0x04}, //128
+ {5644800, 88200, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x10, 0x04}, //64
+ {2822400, 88200, 0x01, 0x08, 0x01, 0x01, 0x00, 0x00, 0x1f, 0x02, 0x10, 0x10, 0x04}, //32
+
+ /* 96k */
+ {12288000, 96000, 0x01, 0x02, 0x01, 0x01, 0x00, 0x00, 0x7f, 0x04, 0x10, 0x10, 0x04}, //128
+ {18432000, 96000, 0x03, 0x04, 0x01, 0x01, 0x00, 0x00, 0xbf, 0x04, 0x10, 0x10, 0x04}, //192
+ {6144000, 96000, 0x01, 0x04, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x02, 0x10, 0x10, 0x04}, //64
+ {3072000, 96000, 0x01, 0x08, 0x01, 0x01, 0x00, 0x00, 0x1f, 0x02, 0x10, 0x10, 0x04}, //32
+};
+static inline int get_coeff(int mclk, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+ if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk)
+ return i;
+ }
+ return -EINVAL;
+}
+
+/*
+ * if PLL not be used, use internal clk1 for mclk,otherwise, use internal clk2 for PLL source.
+ */
+static int es8311_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct es8311_private *es8311 = snd_soc_component_get_drvdata(component);
+
+ printk("Enter into %s(), freq:%d\n", __func__,freq);
+ switch (freq) {
+ case 11289600:
+ case 22579200:
+ case 5644800:
+ case 2822400:
+ case 1411200:
+ case 12288000:
+ case 16384000:
+ case 18432000:
+ case 24576000:
+ case 8192000:
+ case 6144000:
+ case 4096000:
+ case 2048000:
+ case 3072000:
+ case 1536000:
+ case 1024000:
+ case 12000000:
+ case 26000000:
+
+ es8311->mclk_rate = freq;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int es8311_set_dai_fmt(struct snd_soc_dai *component_dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = component_dai->component;
+ struct es8311_private *es8311 = snd_soc_component_get_drvdata(component);
+ u8 iface = 0;
+ u8 adciface = 0;
+ u8 daciface = 0;
+ printk("Enter into %s()\n", __func__);
+
+ dev_dbg(component->dev, "Enter into %s()\n", __func__);
+ iface = snd_soc_component_read(component, ES8311_RESET_REG00);
+ adciface = snd_soc_component_read(component, ES8311_SDPOUT_REG0A);
+ daciface = snd_soc_component_read(component, ES8311_SDPIN_REG09);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM: /* MASTER MODE */
+ pr_info("%s mastermode\n", __func__);
+ es8311->mastermode = 1;
+ dev_dbg(component->dev, "ES8311 in Master mode\n");
+ iface |= 0x40;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS: /* SLAVE MODE */
+ es8311->mastermode = 0;
+ dev_dbg(component->dev, "ES8311 in Slave mode\n");
+ iface &= 0xBF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_component_write(component, ES8311_RESET_REG00, iface);
+
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ dev_dbg(component->dev, "ES8311 in I2S Format\n");
+ adciface &= 0xFC;
+ daciface &= 0xFC;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ return -EINVAL;
+ case SND_SOC_DAIFMT_LEFT_J:
+ dev_dbg(component->dev, "ES8311 in LJ Format\n");
+ adciface &= 0xFC;
+ daciface &= 0xFC;
+ adciface |= 0x01;
+ daciface |= 0x01;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ dev_dbg(component->dev, "ES8311 in DSP-A Format\n");
+ adciface &= 0xDC;
+ daciface &= 0xDC;
+ adciface |= 0x03;
+ daciface |= 0x03;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ dev_dbg(component->dev, "ES8311 in DSP-B Format\n");
+ adciface &= 0xDC;
+ daciface &= 0xDC;
+ adciface |= 0x23;
+ daciface |= 0x23;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iface = snd_soc_component_read(component, ES8311_CLK_MANAGER_REG06);
+ /* clock inversion */
+ if (((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S) ||
+ ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LEFT_J)) {
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+
+ iface &= 0xDF;
+ adciface &= 0xDF;
+ daciface &= 0xDF;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ iface |= 0x20;
+ adciface |= 0x20;
+ daciface |= 0x20;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ iface |= 0x20;
+ adciface &= 0xDF;
+ daciface &= 0xDF;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ iface &= 0xDF;
+ adciface |= 0x20;
+ daciface |= 0x20;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG06, iface);
+ snd_soc_component_write(component, ES8311_SDPOUT_REG0A, adciface);
+ snd_soc_component_write(component, ES8311_SDPIN_REG09, daciface);
+ return 0;
+}
+static int es8311_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ printk("Enter into %s()\n", __func__);
+
+ return 0;
+}
+
+static int es8311_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct es8311_private *es8311 = snd_soc_component_get_drvdata(component);
+ u16 iface;
+ int coeff;
+ u8 regv, datmp;
+ int rate;
+
+ printk("Enter into %s()\n", __func__);
+ /* we need mclk rate to configure registers. Set MCLK here if failed
+ * to get mclk from set_sysclk.
+ *
+ * If the es8311->mclk_rate is a constant value, for example 12.288M,
+ * set es8311->mclk_rate = 12288000;
+ * else if es8311->mclk_rate is dynamic, for example 128Fs,
+ * set es8311->mclk_rate = 128 * params_rate(params);
+ */
+ if (es8311->mclk_src == ES8311_BCLK_PIN) {
+ /*
+ * Here 64 is ratio of BCLK/LRCK.
+ * If BCLK/LRCK isn't 64, please change it according to actual ratio.
+ */
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG01, 0x80, 0x80);
+ es8311->mclk_rate = 64 * params_rate(params);
+ }
+
+ pr_info("%s, mclk = %d, lrck = %d\n", __func__,
+ es8311->mclk_rate, params_rate(params));
+
+ rate = params_rate(params);
+ printk("yanming rate:%d\n",rate);
+ coeff = get_coeff(es8311->mclk_rate, params_rate(params));
+ if (coeff < 0) {
+ pr_info("Unable to configure sample rate %dHz with %dHz MCLK\n",
+ params_rate(params), es8311->mclk_rate);
+ //return -EINVAL;
+ }
+ /*
+ * set clock parammeters
+ */
+ if (coeff >= 0) {
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG02) & 0x07;
+
+ regv |= (coeff_div[coeff].prediv - 1) << 5;
+ datmp = 0;
+ switch (coeff_div[coeff].premulti) {
+ case 1:
+ datmp = 0;
+ break;
+ case 2:
+ datmp = 1;
+ break;
+ case 4:
+ datmp = 2;
+ break;
+ case 8:
+ datmp = 3;
+ break;
+ default:
+ break;
+ }
+ regv |= (datmp) << 3;
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, regv);
+
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG05) & 0x00;
+ regv |= (coeff_div[coeff].adcdiv - 1) << 4;
+ regv |= (coeff_div[coeff].dacdiv - 1) << 0;
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, regv);
+
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG03) & 0x80;
+ regv |= coeff_div[coeff].fsmode << 6;
+ regv |= coeff_div[coeff].adcosr << 0;
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, regv);
+
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG04) & 0x80;
+ regv |= coeff_div[coeff].dacosr << 0;
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, regv);
+
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG07) & 0xf0;
+ regv |= coeff_div[coeff].lrck_h << 0;
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG07, regv);
+
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG08) & 0x00;
+ regv |= coeff_div[coeff].lrck_l << 0;
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG08, regv);
+
+ regv = snd_soc_component_read(component,
+ ES8311_CLK_MANAGER_REG06) & 0xE0;
+ if (coeff_div[coeff].bclkdiv < 19)
+ regv |= (coeff_div[coeff].bclkdiv - 1) << 0;
+ else
+ regv |= coeff_div[coeff].bclkdiv << 0;
+
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG06, regv);
+
+ regv = snd_soc_component_read(component, ES8311_ADC_REG16) & 0x38;
+ regv |= (coeff_div[coeff].adcscale) << 0;
+ snd_soc_component_write(component, ES8311_ADC_REG16, regv);
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ iface = snd_soc_component_read(component,
+ ES8311_SDPIN_REG09) & 0xE3;
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ iface |= 0x0c;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ iface |= 0x04;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ iface |= 0x10;
+ break;
+ }
+ /* set iface */
+ snd_soc_component_write(component, ES8311_SDPIN_REG09, iface);
+ } else {
+ iface = snd_soc_component_read(component,
+ ES8311_SDPOUT_REG0A) & 0xE3;
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ iface |= 0x0c;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ iface |= 0x04;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ iface |= 0x10;
+ break;
+ }
+ /* set iface */
+ snd_soc_component_write(component, ES8311_SDPOUT_REG0A, iface);
+ }
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x98);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x1b);
+ /*
+ if(rate == 8000){
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0xbb);
+ pr_info("%s rate:%d\n",__FUNCTION__,rate);
+
+ }
+ if(rate == 16000){
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0x55);
+ pr_info("%s rate:%d\n",__FUNCTION__,rate);
+
+ }
+ */
+ if(rate == 44100){
+
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x90);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x1d);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x1d);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0x00);
+ pr_info("%s rate:%d\n",__FUNCTION__,rate);
+
+ }
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG06, 0x18);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG07, 0x06);
+
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG08, 0x58);
+
+
+ return 0;
+}
+
+static int es8311_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ int regv;
+ struct es8311_private *es8311 = snd_soc_component_get_drvdata(component);
+
+ printk("Enter into %s(), level = %d\n", __func__, level);
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ printk("%s on\n", __func__);
+ snd_soc_component_write(component, ES8311_GP_REG45, 0x00);
+ if (es8311->mclk_src == ES8311_MCLK_PIN) {
+ snd_soc_component_write(component,
+ ES8311_CLK_MANAGER_REG01, 0x30);
+ } else {
+ snd_soc_component_write(component,
+ ES8311_CLK_MANAGER_REG01, 0xB0);
+ }
+ //snd_soc_component_write(component, ES8311_ADC_REG16, 0x24);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0B, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0C, 0x00);
+ if (ES8311_AVDD == ES8311_1V8) {
+ snd_soc_component_write(component,
+ ES8311_SYSTEM_REG10, 0x61);
+ snd_soc_component_write(component,
+ ES8311_SYSTEM_REG11, 0x7B);
+ } else {
+ snd_soc_component_write(component,
+ ES8311_SYSTEM_REG10, 0x03);
+ snd_soc_component_write(component,
+ ES8311_SYSTEM_REG11, 0x57);
+ }
+
+ if (es8311->mclk_src == ES8311_MCLK_PIN) {
+ snd_soc_component_write(component,
+ ES8311_CLK_MANAGER_REG01, 0x3F);
+ } else {
+ snd_soc_component_write(component,
+ ES8311_CLK_MANAGER_REG01, 0xBF);
+ }
+ if (es8311->mclkinv == true) {
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG01, 0x40, 0x40);
+ } else {
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG01, 0x40, 0x00);
+ }
+ if (es8311->sclkinv == true) {
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG06, 0x20, 0x20);
+ } else {
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG06, 0x20, 0x00);
+ }
+
+ //digital reset
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x1f);
+ usleep_range(1000, 2000);
+ if (es8311->mastermode == 1) {
+ snd_soc_component_write(component,
+ ES8311_RESET_REG00, 0xC0);
+ } else {
+ snd_soc_component_write(component,
+ ES8311_RESET_REG00, 0x80);
+ }
+ usleep_range(1500, 3000);
+
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0x01);
+
+ regv = snd_soc_component_read(component, ES8311_SYSTEM_REG14) & 0xCF;
+ regv |= 0x1A;
+ snd_soc_component_write(component, ES8311_SYSTEM_REG14, regv);
+
+ if (es8311->dmic_enable == true) {
+ snd_soc_component_update_bits(component, ES8311_SYSTEM_REG14,
+ 0x40, 0x40);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_SYSTEM_REG14,
+ 0x40, 0x00);
+ }
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG13, 0x10);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0E, 0x02);
+ printk("%s biason REG0E=0X%X\n", __func__,snd_soc_component_read(component, ES8311_SYSTEM_REG0E));
+
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0F, 0x7F);
+ snd_soc_component_write(component, ES8311_ADC_REG15, 0x40);
+ snd_soc_component_write(component, ES8311_ADC_REG1B, 0x0A);
+ snd_soc_component_write(component, ES8311_ADC_REG1C, 0x6A);
+ snd_soc_component_write(component, ES8311_DAC_REG37, 0x48);
+ //snd_soc_component_write(component, ES8311_ADC_REG17, 0xBF);
+ //snd_soc_component_write(component, ES8311_DAC_REG32, 0xBF);
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ printk("%s prepare\n", __func__);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ printk("%s standby\n", __func__);
+ if (es8311->bias_level == SND_SOC_BIAS_PREPARE) {
+ //snd_soc_component_write(component, ES8311_DAC_REG32, 0x00);
+ //snd_soc_component_write(component, ES8311_ADC_REG17, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0E, 0xFF);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12, 0x02);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG14, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0xF9);
+ snd_soc_component_write(component, ES8311_ADC_REG15, 0x00);
+ snd_soc_component_write(component, ES8311_DAC_REG37, 0x08);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x10);
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x00);
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x1F);
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0xB0);
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x30);
+#endif
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x00);
+ snd_soc_component_write(component, ES8311_GP_REG45, 0x00);
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ printk("%s off\n", __func__);
+ if (es8311->bias_level == SND_SOC_BIAS_STANDBY) {
+ //snd_soc_component_write(component, ES8311_DAC_REG32, 0x00);
+ //snd_soc_component_write(component, ES8311_ADC_REG17, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0E, 0xFF);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12, 0x02);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG14, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0xF9);
+ snd_soc_component_write(component, ES8311_ADC_REG15, 0x00);
+ snd_soc_component_write(component, ES8311_DAC_REG37, 0x08);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x10);
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x00);
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x1F);
+#ifdef CONFIG_USE_TOP_TDM
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0xB0);
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x30);
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x00);
+ snd_soc_component_write(component, ES8311_GP_REG45, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0xFC);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x00);
+ }
+ break;
+ }
+ es8311->bias_level = level;
+ printk("%s END bias_level(%d),REG0E=0X%X\n", __func__,level,snd_soc_component_read(component, ES8311_SYSTEM_REG0E));
+
+ return 0;
+}
+
+static int es8311_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_component *component = dai->component;
+
+ printk("Enter into %s(), tristate = %d\n", __func__, tristate);
+ if (tristate) {
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG07, 0x30, 0x30);
+ } else {
+ snd_soc_component_update_bits(component,
+ ES8311_CLK_MANAGER_REG07, 0x30, 0x00);
+ }
+ return 0;
+}
+
+static int es8311_mute(struct snd_soc_dai *dai, int mute, int direction)
+{
+ struct snd_soc_component *component = dai->component;
+
+ printk("Enter into %s(), mute = %d\n", __func__, mute);
+
+ if (mute) {
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12,
+ 0x02);
+ snd_soc_component_update_bits(component, ES8311_DAC_REG31,
+ 0x60, 0x60);
+ //snd_soc_component_write(component, ES8311_DAC_REG32, 0x00);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_DAC_REG31,
+ 0x60, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12,
+ 0x00);
+ //snd_soc_component_write(component, ES8311_DAC_REG32, 0xbf);
+ }
+ printk("%s mute=%d biason REG0E=0X%X\n", __func__,mute,snd_soc_component_read(component, ES8311_SYSTEM_REG0E));
+
+ return 0;
+}
+
+#define es8311_RATES SNDRV_PCM_RATE_8000_96000
+
+#define es8311_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops es8311_ops = {
+ .startup = es8311_pcm_startup,
+ .hw_params = es8311_pcm_hw_params,
+ .set_fmt = es8311_set_dai_fmt,
+ .set_sysclk = es8311_set_dai_sysclk,
+ .mute_stream = es8311_mute,
+ .set_tristate = es8311_set_tristate,
+};
+
+static struct snd_soc_dai_driver es8311_dai = {
+ .name = "ES8311 HiFi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = es8311_RATES,
+ .formats = es8311_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = es8311_RATES,
+ .formats = es8311_FORMATS,
+ },
+ .ops = &es8311_ops,
+ .symmetric_rates = 1,
+};
+
+static int es8311_suspend(struct snd_soc_component *component)
+{
+ printk("Enter into %s()\n", __func__);
+
+ //snd_soc_component_write(component, ES8311_DAC_REG32, 0x00);
+ //snd_soc_component_write(component, ES8311_ADC_REG17, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0E, 0xFF);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12, 0x02);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG14, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0xF9);
+ snd_soc_component_write(component, ES8311_ADC_REG15, 0x00);
+ snd_soc_component_write(component, ES8311_DAC_REG37, 0x08);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x10);
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x00);
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x1F);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x30);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x00);
+ snd_soc_component_write(component, ES8311_GP_REG45, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0xFC);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x00);
+
+ return 0;
+}
+
+static int es8311_resume(struct snd_soc_component *component)
+{
+ printk("Enter into %s()\n", __func__);
+ //yu.dong@20240416[ZXW-268]Added codec re-initialization for power down and I2S default configuration adjustment
+ return 0;
+}
+
+static int es8311_probe(struct snd_soc_component *component)
+{
+ int ret = 0;
+ struct es8311_private *es8311 = es8311_data;
+
+ printk("Enter into %s()\n", __func__);
+
+ snd_soc_component_set_drvdata(component, es8311);
+ if (component == NULL) {
+ dev_err(component->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+ es8311_component = component;
+ es8311->component = component;
+
+ es8311->mastermode = 0;
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+ es8311->mclk_src = ES8311_BCLK_PIN;
+#else
+ es8311->mclk_src = ES8311_MCLK_SOURCE;
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ /* Enable the following code if there is no mclk.
+ * a clock named "mclk" need to be defined in the dts (see sample dts)
+ *
+ * No need to enable the following code to get mclk if:
+ * 1. sclk/bclk is used as mclk
+ * 2. mclk is controled by soc I2S
+ */
+ //if (es8311->mclk_src == ES8311_MCLK_PIN) {
+ if (0) {
+
+ es8311->mclk = devm_clk_get(component->dev, "mclk");
+ if (IS_ERR(es8311->mclk)) {
+ dev_err(component->dev, "%s,unable to get mclk\n", __func__);
+ return PTR_ERR(es8311->mclk);
+ }
+ if (!es8311->mclk)
+ dev_err(component->dev, "%s, assuming static mclk\n", __func__);
+
+ ret = clk_prepare_enable(es8311->mclk);
+ if (ret) {
+ dev_err(component->dev, "%s, unable to enable mclk\n", __func__);
+ return ret;
+ }
+ }
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+ snd_soc_component_write(component, ES8311_GP_REG45, 0x00);
+#ifdef CONFIG_USE_TOP_TDM
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0xB0);
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x30);
+#endif
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x00);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x10);
+ //snd_soc_component_write(component, ES8311_ADC_REG16, 0x24);
+ snd_soc_component_write(component, ES8311_ADC_REG16, 0x21);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x10);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0x00);
+ snd_soc_component_write(component, ES8311_SDPIN_REG09, 0x00);
+ snd_soc_component_write(component, ES8311_SDPOUT_REG0A, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0B, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0C, 0x00);
+ if (ES8311_AVDD == ES8311_1V8) {
+ snd_soc_component_write(component, ES8311_SYSTEM_REG10, 0x61);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG11, 0x7B);
+ } else {
+ snd_soc_component_write(component, ES8311_SYSTEM_REG10, 0x03);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG11, 0x57);
+ }
+
+ if (es8311->mclk_src == ES8311_MCLK_PIN)
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x3F);
+ else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0xBF);
+
+ if (es8311->mastermode == 1)
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0xC0);
+ else
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x80);
+
+ usleep_range(1500, 3000);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0x01);
+
+ if (es8311->mclkinv == true) {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG01,
+ 0x40, 0x40);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG01,
+ 0x40, 0x00);
+ }
+ if (es8311->sclkinv == true) {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG06,
+ 0x20, 0x20);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG06,
+ 0x20, 0x00);
+ }
+ snd_soc_component_write(component, ES8311_SYSTEM_REG14, 0x1A);
+ if (es8311->dmic_enable == true) {
+ snd_soc_component_update_bits(component, ES8311_SYSTEM_REG14,
+ 0x40, 0x40);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_SYSTEM_REG14,
+ 0x40, 0x00);
+ }
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG13, 0x10);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0E, 0x02);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0F, 0x7F);
+ snd_soc_component_write(component, ES8311_ADC_REG15, 0x40);
+ snd_soc_component_write(component, ES8311_ADC_REG1B, 0x0A);
+ snd_soc_component_write(component, ES8311_ADC_REG1C, 0x6A);
+ snd_soc_component_write(component, ES8311_DAC_REG37, 0x48);
+ snd_soc_component_write(component, ES8311_ADC_REG17, 0xBF);
+ snd_soc_component_write(component, ES8311_DAC_REG32, 0xBF);
+#ifdef CONFIG_USE_TOP_TDM
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x1A);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0x00);
+ snd_soc_component_write(component, ES8311_SDPIN_REG09,0x0F);
+ snd_soc_component_write(component, ES8311_SDPOUT_REG0A,0x0F);
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x98);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0xbb);
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ msleep(100);
+ es8311_set_bias_level(component, SND_SOC_BIAS_STANDBY);
+
+ printk("%s end\n", __func__);
+
+
+ return ret;
+}
+
+static void es8311_remove(struct snd_soc_component *component)
+{
+ printk("Enter into %s()\n", __func__);
+
+ es8311_set_bias_level(component, SND_SOC_BIAS_OFF);
+}
+static int clkout_init_pinctrl(struct device *dev)
+{
+
+ struct pinctrl *pctrl;
+ struct pinctrl_state *state0;
+ struct clk *clk;
+ int ret;
+ struct es8311_private *info = dev_get_drvdata(dev);
+ struct device_node *np = dev->of_node;
+
+ //yu.dong@20240416[T106BUG-551][codec] codec 8311 sleep power consumption does not go down
+
+ dev_info(dev, "%s:start!\n", __func__);
+
+ if(dev == NULL){
+ dev_err(dev, "%s:dev is null ,return\n",__func__);
+ return -EINVAL;
+
+ }
+ dev_info(dev, "%s: get clk pinctrl\n", __func__);
+
+ pctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pctrl)) {
+ dev_warn(dev, "Failed to get clk_test pins\n");
+ pctrl = NULL;
+ return -EINVAL;
+ }
+
+
+ state0 = pinctrl_lookup_state(pctrl, "clk_out2");
+ if (IS_ERR(state0)) {
+ devm_pinctrl_put(pctrl);
+ dev_err(dev, "missing clk_out\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "%s: select pinctrl\n", __func__);
+
+ if ( pinctrl_select_state(pctrl, state0) < 0) {
+ //devm_pinctrl_put(pctrl);
+ dev_err(dev, "setting clk_out failed\n");
+ ret = -EINVAL;
+ goto err_put_pinctrl;
+ }
+
+
+ dev_info(dev, "%s: get clk\n", __func__);
+
+
+
+
+ clk = of_clk_get_by_name(np, "clk_out2");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Could not get clk_out\n");
+ ret = PTR_ERR(clk);
+ goto err_put_pinctrl;
+ }
+
+
+
+#if 1
+ dev_info(dev, "%s: clk prepare\n", __func__);
+
+ ret = clk_prepare(clk);
+ if (ret) {
+ dev_err(dev, "failed to clk prepare\n");
+ goto err_put_clk;
+
+ }
+
+#else
+ dev_info(dev, "%s: clk enable\n", __func__);
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clkout");
+ goto err_put_clk;
+
+ }
+
+#endif
+ if(info != NULL){
+
+ dev_info(dev, "%s: set drvdata\n", __func__);
+ info->pctrl = pctrl;
+ info->state0 = state0;
+ info->clk = clk;
+ }
+ else{
+ dev_info(dev, "%s: info is null\n", __func__);
+
+ }
+ dev_info(dev, "%s: init clkout end!\n",__func__);
+ return 0;
+err_put_clk:
+ clk_put(clk);
+err_put_pinctrl:
+ devm_pinctrl_put(pctrl);
+
+
+ return ret;
+
+
+}
+
+//yu.dong@20240416[ZXW-268]Added codec re-initialization for power down and I2S default configuration adjustment start
+static void es8311_reinit(struct snd_soc_component *component, struct es8311_private *es8311)
+{
+ pr_info("%s:begin!\n", __func__);
+
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+ snd_soc_component_write(component, ES8311_GP_REG45, 0x00);
+#ifdef CONFIG_USE_TOP_TDM
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0xB0);
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x30);
+#endif
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x00);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x10);
+ //snd_soc_component_write(component, ES8311_ADC_REG16, 0x24);
+ snd_soc_component_write(component, ES8311_ADC_REG16, 0x21);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x10);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0x00);
+ snd_soc_component_write(component, ES8311_SDPIN_REG09, 0x00);
+ snd_soc_component_write(component, ES8311_SDPOUT_REG0A, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0B, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0C, 0x00);
+
+ if (ES8311_AVDD == ES8311_1V8) {
+ snd_soc_component_write(component, ES8311_SYSTEM_REG10, 0x61);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG11, 0x7B);
+ } else {
+ snd_soc_component_write(component, ES8311_SYSTEM_REG10, 0x03);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG11, 0x57);
+ }
+
+ if (es8311->mclk_src == ES8311_MCLK_PIN) {
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0x3F);
+ } else {
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG01, 0xBF);
+ }
+
+ if (es8311->mastermode == 1) {
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0xC0);
+ } else {
+ snd_soc_component_write(component, ES8311_RESET_REG00, 0x80);
+ }
+
+ usleep_range(1500, 3000);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0D, 0x01);
+
+ if (es8311->mclkinv == true) {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG01, 0x40, 0x40);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG01, 0x40, 0x00);
+ }
+
+ if (es8311->sclkinv == true) {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG06, 0x20, 0x20);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_CLK_MANAGER_REG06, 0x20, 0x00);
+ }
+
+ snd_soc_component_write(component, ES8311_SYSTEM_REG14, 0x1A);
+
+ if (es8311->dmic_enable == true) {
+ snd_soc_component_update_bits(component, ES8311_SYSTEM_REG14, 0x40, 0x40);
+ } else {
+ snd_soc_component_update_bits(component, ES8311_SYSTEM_REG14, 0x40, 0x00);
+ }
+ snd_soc_component_write(component, ES8311_SYSTEM_REG12, 0x00);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG13, 0x10);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0E, 0x02);
+ snd_soc_component_write(component, ES8311_SYSTEM_REG0F, 0x7F);
+ snd_soc_component_write(component, ES8311_ADC_REG15, 0x40);
+ snd_soc_component_write(component, ES8311_ADC_REG1B, 0x0A);
+ snd_soc_component_write(component, ES8311_ADC_REG1C, 0x6A);
+ snd_soc_component_write(component, ES8311_DAC_REG37, 0x48);
+ snd_soc_component_write(component, ES8311_ADC_REG17, 0xBF);
+ snd_soc_component_write(component, ES8311_DAC_REG32, 0xBF);
+#ifdef CONFIG_USE_TOP_TDM
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x1A);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0x00);
+ snd_soc_component_write(component, ES8311_SDPIN_REG09,0x0F);
+ snd_soc_component_write(component, ES8311_SDPOUT_REG0A,0x0F);
+#else
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG02, 0x98);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG03, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG04, 0x1b);
+ snd_soc_component_write(component, ES8311_CLK_MANAGER_REG05, 0xbb);
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ msleep(100);
+ es8311_set_bias_level(component, SND_SOC_BIAS_STANDBY);
+ pr_info("%s:end!\n", __func__);
+}
+
+extern int zx29_i2s_config_sleep_pin(void);
+extern int zx29_i2s_config_default_pin(void);
+
+static int component_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+
+ int ret = 0;
+ struct es8311_private *info = snd_soc_component_get_drvdata(component);
+ printk("Enter into %s()\n", __func__);
+
+ if(info->clk != NULL){
+
+ ret = clk_enable(info->clk);
+ if (ret) {
+ pr_err( "failed to enable clkout");
+ }
+ }
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+#else
+ ret = zx29_i2s_config_default_pin();
+ if(ret < 0) {
+ pr_err("%s select state failure %d !! \n", __func__, ret);
+ }
+#endif
+ es8311_reinit(component, info);
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ pr_info("%s:clk en end!\n",__func__);
+ return ret;
+
+
+
+
+}
+static int component_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+
+ int ret = 0;
+ struct es8311_private *info = snd_soc_component_get_drvdata(component);
+ printk("Enter into %s()\n", __func__);
+
+
+ if(info->clk != NULL){
+
+ clk_disable(info->clk);
+
+ }
+ pr_info("%s:clk dis end!\n",__func__);
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+#else
+ ret = zx29_i2s_config_sleep_pin();
+ if(ret < 0) {
+ pr_err("%s select state failure %d !! \n", __func__, ret);
+ }
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ return ret;
+}
+
+//yu.dong@20240416[ZXW-268]Added codec re-initialization for power down and I2S default configuration adjustment end
+static const struct snd_soc_component_driver soc_component_dev_es8311 = {
+ .probe = es8311_probe,
+ .remove = es8311_remove,
+ .suspend = es8311_suspend,
+ .resume = es8311_resume,
+ .set_bias_level = es8311_set_bias_level,
+ .suspend_bias_off = 1,
+ .idle_bias_on = 1,
+
+ .controls = es8311_snd_controls,
+ .num_controls = ARRAY_SIZE(es8311_snd_controls),
+ .dapm_widgets = es8311_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(es8311_dapm_widgets),
+ .dapm_routes = es8311_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(es8311_dapm_routes),
+ .open = component_open,
+ .close = component_close,
+};
+
+static struct regmap_config es8311_regmap = {
+ .name= "ES8311",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = ES8311_MAX_REGISTER,
+
+ .volatile_reg = es8311_volatile_register,
+ .writeable_reg = es8311_writable_register,
+ .readable_reg = es8311_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id es8311_if_dt_ids[] = {
+ {.compatible = "everest,es8311", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, es8311_if_dt_ids);
+#endif
+
+static void es8311_i2c_shutdown(struct i2c_client *i2c)
+{
+ printk("Enter into %s()\n", __func__);
+
+}
+
+static u32 cur_reg;
+
+static ssize_t es8311_show(struct device *dev,
+ struct device_attribute *attr, char *_buf)
+{
+ int ret;
+ int i ;
+ int reg_max = 256;
+ for( i = 0;i < reg_max;i++){
+ //sprintf(_buf, "%s(): get reg0x%04x=0x%04x\n", __func__, i,
+ //snd_soc_component_read(es8311_component, i));
+ printk("%s(): get reg0x%04x=0x%04x\n", __func__, i,
+ snd_soc_component_read(es8311_component, i));
+ }
+ return 0;
+}
+
+static ssize_t es8311_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val = 0, flag = 0;
+ u8 i = 0, reg, num, value_w, value_r;
+
+ val = simple_strtol(buf, NULL, 16);
+ flag = (val >> 16) & 0xFF;
+
+ if (flag) {
+ reg = (val >> 8) & 0xFF;
+ value_w = val & 0xFF;
+ pr_info("\nWrite: start REG:0x%02x,val:0x%02x,count:0x%02x\n",
+ reg, value_w, flag);
+ while (flag--) {
+ snd_soc_component_write(es8311_component, reg, value_w);
+ pr_info("Write 0x%02x to REG:0x%02x\n", value_w, reg);
+ reg++;
+ }
+ } else {
+ reg = (val >> 8) & 0xFF;
+ num = val & 0xff;
+ pr_info("\nRead: start REG:0x%02x,count:0x%02x\n", reg, num);
+ do {
+ value_r = 0;
+ value_r = snd_soc_component_read(es8311_component, reg);
+ pr_info("REG[0x%02x]: 0x%02x;\n", reg, value_r);
+ reg++;
+ i++;
+ } while (i < num);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(es8311, 0664, es8311_show, es8311_store);
+
+static ssize_t codec_info_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+
+ ssize_t count = 0;
+
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct es8311_private *info;
+ char cmd_str[16] = {0};
+
+ int ret,i;
+ int regs, rege;
+ unsigned int val;
+ struct snd_soc_component *component;
+
+
+ info = (struct es8311_private *) dev_get_drvdata(dev);
+
+ component = snd_soc_lookup_component(dev,NULL);
+ if (!component){
+ pr_err(" %s:(%d), snd_soc_lookup_component fail !\n", __func__, __LINE__ );
+
+ return count;
+ }
+
+ dev_info(dev," %s:(%d),snd_soc_lookup_component ,name=%s\n", __func__, __LINE__, component->name);
+ //info->component = component;
+
+
+ regs = 0x0;
+ rege = 0x45;
+
+ for (i = regs; i < rege; i++) {
+
+ //val = snd_soc_component_read(info->component, i);
+ regmap_read(info->regmap, i, &val);
+
+ dev_info(dev,"cocec reg read ,Reg(0x%x)=0x%x \n",i, val);
+ }
+
+ return count;
+
+
+}
+
+
+static ssize_t codec_info_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+
+{
+ ssize_t ret =0;
+ unsigned int val = 0;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct es8311_private *info;
+ char cmd_str[17] = {0};
+
+ u32 param1 = 0,param2 = 0,param3 = 0;
+
+ int i;
+
+ struct snd_soc_component *component;
+
+ info = (struct es8311_private *) dev_get_drvdata(dev);
+
+ component = snd_soc_lookup_component(dev, NULL);
+ if (!component){
+ pr_err(" %s:(%d), snd_soc_lookup_component fail !\n", __func__, __LINE__ );
+
+ return n;
+ }
+
+ dev_info(dev," %s:(%d),snd_soc_lookup_component ,name=%s\n", __func__, __LINE__, component->name);
+ //info->component = component;
+
+ //dev_info(dev, "pcieinfo_store name %s \n", pdev->name);
+
+
+ sscanf(buf, "%16s %x %x %x", cmd_str,¶m1,¶m2,¶m3);
+ dev_info(dev, "cmd_str:%s,param1:%x,param2:%x,param3:%x\n",cmd_str,param1,param2,param3);
+
+
+
+ dev_info(dev, "%s:cmd_str=%s \n",__func__,cmd_str);
+
+ ret = strcmp(cmd_str,"reg_read");
+ if( ret == 0)
+ {
+ dev_info(dev, "reg_read start\n");
+ if(param1 > 0xff){
+ dev_err(dev, "reg_read param invalidate fail,param1=%d \n",param1);
+ return -1;
+ }
+
+ //val = snd_soc_component_read(info->component, param1);
+ regmap_read(info->regmap, param1,&val);
+
+
+ dev_info(dev, "reg_read reg(%d)=0x%x \n",param1,val);
+
+
+ }
+
+ ret = strcmp(cmd_str,"reg_write");
+ if( ret == 0)
+ {
+ //u32 offset = param1;
+ //u32 mask = param2;
+ dev_info(dev, "reg_write start\n");
+
+ if(param1 > 0xff){
+ dev_err(dev, "reg_write param invalidate fail,param1=%d \n",param1);
+ return -1;
+ }
+ val = param2;
+ //ret = snd_soc_component_write(info->component, param1, val);
+ regmap_write(info->regmap, param1, val);
+ if (ret){
+ pr_err(" %s:(%d), cocec reg write fail - ret=%d\n", __func__, __LINE__ ,ret);
+ //return ret;
+ }
+
+ dev_info(dev, "reg_write reg(%d)=0x%x \n",param1,val);
+
+ }
+
+
+
+ return n;
+
+}
+#define CODEC_ATTR(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+
+
+CODEC_ATTR(codec_info);
+
+static struct attribute *es8311_debug_attrs[] = {
+ &dev_attr_es8311.attr,
+ &codec_info_attr.attr,
+ NULL,
+};
+
+static struct attribute_group es8311_debug_attr_group = {
+ .name = "es8311_debug",
+ .attrs = es8311_debug_attrs,
+};
+
+static int es8311_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct es8311_private *es8311;
+ int ret = -1;
+ unsigned int val;
+
+ pr_info("Enter into %s\n", __func__);
+ es8311 = devm_kzalloc(&i2c_client->dev,
+ sizeof(*es8311), GFP_KERNEL);
+ if (es8311 == NULL)
+ return -ENOMEM;
+
+ es8311->dmic_enable = false; // dmic interface disabled
+ /* the edge of lrck is always at the falling edge of mclk */
+ es8311->mclkinv = false;
+ /* the edge of lrck is always at the falling edge of sclk */
+ es8311->sclkinv = false;
+
+ i2c_set_clientdata(i2c_client, es8311);
+ es8311->regmap = devm_regmap_init_i2c(i2c_client, &es8311_regmap);
+ if (IS_ERR(es8311->regmap)) {
+ ret = PTR_ERR(es8311->regmap);
+ dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+ return ret;
+ }
+ es8311->dev = &i2c_client->dev;
+ /* verify that we have an es8311 */
+ ret = regmap_read(es8311->regmap, ES8311_CHD1_REGFD, &val);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "failed to read i2c at addr %X\n",
+ i2c_client->addr);
+ return ret;
+ }
+ /* The first ID should be 0x83 */
+ if (val != 0x83) {
+ dev_err(&i2c_client->dev, "device at addr %X is not an es8311\n",
+ i2c_client->addr);
+ return -ENODEV;
+ }
+ ret = regmap_read(es8311->regmap, ES8311_CHD2_REGFE, &val);
+ /* The NEXT ID should be 0x11 */
+ if (val != 0x11) {
+ dev_err(&i2c_client->dev, "device at addr %X is not an es8311\n",
+ i2c_client->addr);
+ return -ENODEV;
+ }
+ es8311_data = es8311;
+
+ es8311->pctrl = NULL;
+ es8311->state0 = NULL;
+ es8311->clk = NULL;
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifndef CONFIG_USE_TOP_TDM
+ clkout_init_pinctrl(&i2c_client->dev);
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ ret = snd_soc_register_component(&i2c_client->dev,
+ &soc_component_dev_es8311,
+ &es8311_dai,
+ 1);
+ if (ret < 0) {
+ kfree(es8311);
+ return ret;
+ }
+
+ pr_info("Enter into %s-----4\n", __func__);
+ ret = sysfs_create_group(&i2c_client->dev.kobj,
+ &es8311_debug_attr_group);
+ if (ret)
+ pr_err("failed to create attr group\n");
+
+
+ printk("%s end\n", __func__);
+
+
+ return ret;
+}
+
+static const struct i2c_device_id es8311_i2c_id[] = {
+ {"es8311", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, es8311_i2c_id);
+
+static struct i2c_driver es8311_i2c_driver = {
+ .driver = {
+ .name = "es8311",
+ .owner = THIS_MODULE,
+ .of_match_table = es8311_if_dt_ids,
+ },
+ .shutdown = es8311_i2c_shutdown,
+ .probe = es8311_i2c_probe,
+ .id_table = es8311_i2c_id,
+};
+
+static int __init es8311_init(void)
+{
+ int ret;
+ printk("Enter into %s()\n", __func__);
+
+ ret = i2c_add_driver(&es8311_i2c_driver);
+ if (ret != 0)
+ pr_info("Failed to register es8311 i2c driver\n");
+ return ret;
+}
+
+static void __exit es8311_exit(void)
+{
+ printk("Enter into %s()\n", __func__);
+
+ return i2c_del_driver(&es8311_i2c_driver);
+}
+
+late_initcall(es8311_init);
+module_exit(es8311_exit);
+
+MODULE_DESCRIPTION("ASoC es8311 driver");
+MODULE_AUTHOR("David Yang <yangxiaohua@everest-semi.com>");
+MODULE_LICENSE("GPL");
+
+
diff --git a/upstream/linux-5.10/sound/soc/sanechips/zx29_es83xx.c b/upstream/linux-5.10/sound/soc/sanechips/zx29_es83xx.c
new file mode 100755
index 0000000..3c755a6
--- /dev/null
+++ b/upstream/linux-5.10/sound/soc/sanechips/zx29_es83xx.c
@@ -0,0 +1,1367 @@
+/*
+ * zx29_es83xx.c -- zx29-es83xx ALSA SoC Audio board driver
+ *
+ * Copyright (C) 2022, ZTE Corporation.
+ *
+ * Based on smdk_wm8994.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifdef CONFIG_SND_SOC_ES8311
+
+#include "../codecs/es8311.h"
+#endif
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+//#include <sound/tlv.h>
+//#include <sound/soc.h>
+//#include <sound/jack.h>
+//#include <sound/zx29_snd_platform.h>
+//#include <mach/iomap.h>
+//#include <mach/board.h>
+#include <linux/of_gpio.h>
+
+#include <linux/i2c.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+
+
+#include "i2s.h"
+
+#define ZX29_I2S_TOP_LOOP_REG 0x60
+//#define NAU_CLK_ID 0
+
+#if 1
+
+#define ZXIC_MCLK 26000000
+
+#define ZXIC_PLL_CLKIN_MCLK 0
+/* System Clock Source */
+enum {
+ CODEC_SCLK_MCLK,
+ CODEC_SCLK_PLL,
+};
+
+
+#define zx_reg_sync_write(v, a) \
+ do { \
+ iowrite32(v, a); \
+ } while (0)
+
+#define zx_read_reg(addr) \
+ ioread32(addr)
+
+#define zx_write_reg(addr, val) \
+ zx_reg_sync_write(val, addr)
+
+
+
+struct zx29_board_data {
+ const char *name;
+ struct device *dev;
+
+ int codec_refclk;
+ int gpio_pwen;
+ int gpio_pdn;
+ void __iomem *sys_base_va;
+
+ struct pinctrl *p;
+ struct pinctrl_state *s;
+ struct pinctrl_state *s_sleep;
+
+};
+
+
+struct zx29_board_data *s_board = 0;
+
+//#define AON_WIFI_BT_CLK_CFG2 ((volatile unsigned int *)(ZX_TOP_CRM_BASE + 0x94))
+ /* Default ZX29s */
+static struct zx29_board_data zx29_platform_data = {
+ .codec_refclk = ZXIC_MCLK,
+};
+ static struct platform_device *zx29_snd_device;
+
+ static DEFINE_RAW_SPINLOCK(codec_pa_lock);
+
+ static int set_path_stauts_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+ static int get_path_stauts_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+
+ static int zx29startup(struct snd_pcm_substream *substream)
+ {
+ // int ret = 0;
+ print_audio("Alsa Entered func %s\n", __func__);
+ //CPPS_FUNC(cpps_callbacks, zDrv_Audio_Printf)("Alsa: zx29_startup device=%d,stream=%d\n", substream->pcm->device, substream->stream);
+
+ struct snd_pcm *pcmC0D0p = snd_lookup_minor_data(16, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+ struct snd_pcm *pcmC0D1p = snd_lookup_minor_data(17, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+ struct snd_pcm *pcmC0D2p = snd_lookup_minor_data(18, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+ struct snd_pcm *pcmC0D3p = snd_lookup_minor_data(19, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+ if ((pcmC0D0p == NULL) || (pcmC0D1p == NULL) || (pcmC0D2p == NULL) || (pcmC0D3p == NULL))
+ return -EINVAL;
+ if ((pcmC0D0p->streams[0].substream_opened && pcmC0D1p->streams[0].substream_opened) ||
+ (pcmC0D0p->streams[0].substream_opened && pcmC0D2p->streams[0].substream_opened) ||
+ (pcmC0D0p->streams[0].substream_opened && pcmC0D3p->streams[0].substream_opened) ||
+ (pcmC0D1p->streams[0].substream_opened && pcmC0D2p->streams[0].substream_opened) ||
+ (pcmC0D1p->streams[0].substream_opened && pcmC0D3p->streams[0].substream_opened) ||
+ (pcmC0D2p->streams[0].substream_opened && pcmC0D3p->streams[0].substream_opened))
+ BUG();
+
+
+ return 0;
+ }
+
+ static void zx29_shutdown(struct snd_pcm_substream *substream)
+ {
+ //CPPS_FUNC(cpps_callbacks, zDrv_Audio_Printf)("Alsa: zx297520xx_shutdown device=%d, stream=%d\n", substream->pcm->device, substream->stream);
+ // print_audio("Alsa Entered func %s, stream=%d\n", __func__, substream->stream);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ }
+
+ if (snd_soc_dai_active(cpu_dai))
+ return;
+
+
+
+ }
+
+ static void zx29_shutdown2(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ //CPPS_FUNC(cpps_callbacks, zDrv_Audio_Printf)("Alsa: zx29_shutdown2 device=%d, stream=%d\n", substream->pcm->device, substream->stream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+#ifdef USE_ALSA_VOICE_FUNC
+ //CPPS_FUNC(cpps_callbacks, zDrvVp_Loop)(VP_PATH_OFF);
+#endif
+
+
+ }
+
+ if (snd_soc_dai_active(cpu_dai))
+ return;
+
+
+ }
+ static int zx29_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+ {
+ //struct snd_soc_codec *codec = rtd->codec;
+ //struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+ //snd_soc_dapm_enable_pin(dapm, "HPOL");
+ //snd_soc_dapm_enable_pin(dapm, "HPOR");
+
+ /* Other pins NC */
+ // snd_soc_dapm_nc_pin(dapm, "HPOUT2P");
+
+ // print_audio("Alsa Entered func %s\n", __func__);
+
+ return 0;
+ }
+
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+ static int zx29_hw_params_tdm(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ print_audio("Alsa: Entered func %s\n", __func__);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+ int rfs = 0, frq_out = 0;
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 11025:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ rfs = 32;
+ break;
+ default:
+ {
+ ret =-EINVAL;
+ print_audio("Alsa: rate=%d not support,ret=%d!\n", params_rate(params),ret);
+ return ret;
+ }
+ }
+
+ //frq_out = params_rate(params) * rfs * 2;
+
+ /* Set the Codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_A
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+ /* Set the AP DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: ap dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+ ret = snd_soc_dai_set_sysclk(codec_dai, ES8311_MCLK_PIN,params_rate(params)*256, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: codec dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+ /* Set the AP DAI clk */
+ //ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_26M, SND_SOC_CLOCK_IN);
+ ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_104M, SND_SOC_CLOCK_IN);
+ //ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_122M88, SND_SOC_CLOCK_IN);
+
+ if (ret < 0){
+ print_audio("Alsa: cpu dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+ print_audio("Alsa: Entered func %s end\n", __func__);
+
+ return 0;
+ }
+
+ static int zx29_hw_params_lp_tdm(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ print_audio("Alsa: Entered func %s\n", __func__);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+ int rfs = 0, frq_out = 0;
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 11025:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ rfs = 32;
+ break;
+ default:
+ {
+ ret =-EINVAL;
+ print_audio("Alsa: rate=%d not support,ret=%d!\n", params_rate(params),ret);
+ return ret;
+ }
+ }
+
+ //frq_out = params_rate(params) * rfs * 2;
+
+ /* Set the Codec DAI configuration */
+ /*
+
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+ */
+
+ /* Set the AP DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: ap dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+ /* Set the Codec DAI clk */
+ /*ret =snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_BCLK1,
+ fs*datawidth*2, 256*fs);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai clk snd_soc_dai_set_pll fail,ret=%d!\n",ret);
+ return ret;
+ }
+ */
+ /*
+ ret = snd_soc_dai_set_sysclk(codec_dai, ES8312_CLKID_MCLK,ZXIC_MCLK, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: codec dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+ */
+ /* Set the AP DAI clk */
+ //ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_26M, SND_SOC_CLOCK_IN);
+ ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_104M, SND_SOC_CLOCK_IN);
+
+ //ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_122M88, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: cpu dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+ print_audio("Alsa: Entered func %s end\n", __func__);
+
+ return 0;
+ }
+
+
+ static int zx29_hw_params_voice_tdm(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ print_audio("Alsa: Entered func %s\n", __func__);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+ int rfs = 0, frq_out = 0;
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 11025:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ rfs = 32;
+ break;
+ default:
+ {
+ ret =-EINVAL;
+ print_audio("Alsa: rate=%d not support,ret=%d!\n", params_rate(params),ret);
+ return ret;
+ }
+ }
+
+ frq_out = params_rate(params) * rfs * 2;
+
+ /* Set the Codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_A
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+
+ //ret = snd_soc_dai_set_sysclk(codec_dai, NAU8810_SCLK_PLL,ZXIC_MCLK, SND_SOC_CLOCK_IN);
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, ES8311_MCLK_PIN,params_rate(params)*256, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: codec dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+
+
+ print_audio("Alsa: Entered func %s end\n", __func__);
+
+ return 0;
+ }
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+
+ static int zx29_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ print_audio("Alsa: Entered func %s\n", __func__);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ int ret;
+ int rfs = 0, frq_out = 0;
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 11025:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ rfs = 32;
+ break;
+ default:
+ {
+ ret = -EINVAL;
+ print_audio("Alsa: rate=%d not support,ret=%d!\n", params_rate(params),ret);
+ return ret;
+ }
+ }
+
+ frq_out = params_rate(params) * rfs * 2;
+
+ /* Set the Codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+
+ /* Set the AP DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: ap dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, CODEC_SCLK_MCLK, ZXIC_MCLK, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: codec dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+
+#if 0
+ /* Set the Codec DAI clk */
+ ret =snd_soc_dai_set_pll(codec_dai, 0, NAU8810_SCLK_PLL,
+ ZXIC_MCLK, params_rate(params)*256);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai clk snd_soc_dai_set_pll fail,ret=%d!\n",ret);
+ return ret;
+ }
+#endif
+
+
+
+ /* Set the AP DAI clk */
+ ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_26M, SND_SOC_CLOCK_IN);
+ //ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_26M, SND_SOC_CLOCK_IN);
+
+ if (ret < 0){
+ print_audio("Alsa: cpu dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+ print_audio("Alsa: Entered func %s end\n", __func__);
+
+ return 0;
+ }
+
+static int zx29_hw_params_lp(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ print_audio("Alsa: Entered func %s\n", __func__);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ int ret;
+ int rfs = 0, frq_out = 0;
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 11025:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ rfs = 32;
+ break;
+ default:
+ {
+ ret = -EINVAL;
+ print_audio("Alsa: rate=%d not support,ret=%d!\n", params_rate(params),ret);
+ return ret;
+ }
+ }
+
+ frq_out = params_rate(params) * rfs * 2;
+
+ /* Set the Codec DAI configuration */
+ /*
+
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+ */
+
+
+ /* Set the AP DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: ap dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+ /* Set the Codec DAI clk */
+ /*ret =snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_BCLK1,
+ fs*datawidth*2, 256*fs);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai clk snd_soc_dai_set_pll fail,ret=%d!\n",ret);
+ return ret;
+ }
+ */
+ /*
+ ret = snd_soc_dai_set_sysclk(codec_dai, ES8312_CLKID_MCLK,ZXIC_MCLK, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: codec dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+ */
+ /* Set the AP DAI clk */
+ ret = snd_soc_dai_set_sysclk(cpu_dai, ZX29_I2S_WCLK_SEL,ZX29_I2S_WCLK_FREQ_26M, SND_SOC_CLOCK_IN);
+
+ if (ret < 0){
+ print_audio("Alsa: cpu dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+ print_audio("Alsa: Entered func %s end\n", __func__);
+
+ return 0;
+}
+
+
+
+
+
+
+ static int zx29_hw_params_voice(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ print_audio("Alsa: Entered func %s\n", __func__);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ int ret;
+ int rfs = 0, frq_out = 0;
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 11025:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ rfs = 32;
+ break;
+ default:
+ {
+ ret = -EINVAL;
+ print_audio("Alsa: rate=%d not support,ret=%d!\n", params_rate(params),ret);
+ return ret;
+ }
+ }
+
+ frq_out = params_rate(params) * rfs * 2;
+
+ /* Set the Codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai snd_soc_dai_set_fmt fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, CODEC_SCLK_MCLK, ZXIC_MCLK, SND_SOC_CLOCK_IN);
+ if (ret < 0){
+ print_audio("Alsa: codec dai snd_soc_dai_set_sysclk fail,ret=%d!\n",ret);
+ return ret;
+ }
+
+
+#if 0
+
+ /* Set the Codec DAI clk */
+ ret =snd_soc_dai_set_pll(codec_dai, 0, NAU8810_SCLK_PLL,
+ ZXIC_MCLK, params_rate(params)*256);
+ if (ret < 0){
+
+ print_audio("Alsa: codec dai clk snd_soc_dai_set_pll fail,ret=%d!\n",ret);
+ return ret;
+ }
+#endif
+
+ print_audio("Alsa: Entered func %s end\n", __func__);
+
+ return 0;
+ }
+
+
+ int zx29_prepare2(struct snd_pcm_substream *substream)
+ {
+ int path, ret;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ //ret = CPPS_FUNC(cpps_callbacks, zDrvVp_Loop)(VP_PATH_SPEAKER);
+ if (ret < 0)
+ return -1;
+ }
+
+ return 0;
+ }
+ static int zx29_late_probe(struct snd_soc_card *card)
+ {
+ //struct snd_soc_codec *codec = card->rtd[0].codec;
+ //struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+ int ret;
+ // print_audio("Alsa zx29_late_probe entry!\n");
+
+#ifdef CONFIG_SND_SOC_JACK_DECTEC
+
+ ret = snd_soc_jack_new(codec, "Headset",
+ SND_JACK_HEADSET |SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2,
+ &codec_headset);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_jack_add_pins(&codec_headset,
+ ARRAY_SIZE(codec_headset_pins),
+ codec_headset_pins);
+ if (ret)
+ return ret;
+ #ifdef CONFIG_SND_SOC_codec
+ //rt5670_hs_detect(codec, &codec_headset);
+ #endif
+#endif
+
+ return 0;
+ }
+
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+ static struct snd_soc_ops zx29_ops = {
+ //.startup = zx29_startup,
+ .shutdown = zx29_shutdown,
+#ifdef CONFIG_USE_TOP_TDM
+ .hw_params = zx29_hw_params_tdm,
+#else
+ .hw_params = zx29_hw_params,
+#endif
+ };
+ static struct snd_soc_ops zx29_ops_lp = {
+ //.startup = zx29_startup,
+ .shutdown = zx29_shutdown,
+#ifdef CONFIG_USE_TOP_TDM
+ .hw_params = zx29_hw_params_lp_tdm,
+#else
+ .hw_params = zx29_hw_params_lp,
+#endif
+ };
+ static struct snd_soc_ops zx29_ops1 = {
+ //.startup = zx29_startup,
+ .shutdown = zx29_shutdown,
+ //.hw_params = zx29_hw_params1,
+ };
+
+ static struct snd_soc_ops zx29_ops2 = {
+ //.startup = zx29_startup,
+ .shutdown = zx29_shutdown2,
+ //.hw_params = zx29_hw_params1,
+ .prepare = zx29_prepare2,
+ };
+ static struct snd_soc_ops voice_ops = {
+ .startup = zx29startup,
+ .shutdown = zx29_shutdown2,
+#ifdef CONFIG_USE_TOP_TDM
+ .hw_params = zx29_hw_params_voice_tdm,
+#else
+ .hw_params = zx29_hw_params_voice,
+#endif
+ //.prepare = zx29_prepare2,
+ };
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+
+ enum {
+ MERR_DPCM_AUDIO = 0,
+ MERR_DPCM_DEEP_BUFFER,
+ MERR_DPCM_COMPR,
+ };
+
+
+ //static struct zx298501_nau8810_pdata *zx29_platform_data;
+
+ static int zx29_setup_pins(struct zx29_board_data *codec_pins, char *fun)
+ {
+ int ret;
+
+ //ret = gpio_request(codec_pins->codec_refclk, "codec_refclk");
+ if (ret < 0) {
+ printk(KERN_ERR "zx297520xx SoC Audio: %s pin already in use\n", fun);
+ return ret;
+ }
+ //zx29_gpio_config(codec_pins->codec_refclk, GPIO17_CLK_OUT2);
+
+
+ return 0;
+ }
+#endif
+
+
+ static int zx29_remove(struct platform_device *pdev)
+ {
+ gpio_free(zx29_platform_data.codec_refclk);
+ platform_device_unregister(zx29_snd_device);
+ return 0;
+ }
+
+
+
+
+
+
+
+enum {
+ AUDIO_DL_MEDIA = 0,
+ AUDIO_DL_VOICE,
+ AUDIO_DL_2G_AND_3G_VOICE,
+ AUDIO_DL_VP_LOOP,
+ AUDIO_DL_3G_VOICE,
+
+ AUDIO_DL_MAX,
+};
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+SND_SOC_DAILINK_DEF(dummy, \
+ DAILINK_COMP_ARRAY(COMP_DUMMY()));
+
+//SND_SOC_DAILINK_DEF(cpu_i2s0, \
+// DAILINK_COMP_ARRAY(COMP_CPU("media-cpu-dai")));
+SND_SOC_DAILINK_DEF(cpu_i2s0, \
+ DAILINK_COMP_ARRAY(COMP_CPU("1405000.i2s")));
+
+SND_SOC_DAILINK_DEF(cpu_tdm, \
+ DAILINK_COMP_ARRAY(COMP_CPU("1412000.tdm")));
+
+SND_SOC_DAILINK_DEF(voice_cpu, \
+ DAILINK_COMP_ARRAY(COMP_CPU("soc:voice_audio")));
+
+SND_SOC_DAILINK_DEF(voice_2g_3g, \
+ DAILINK_COMP_ARRAY(COMP_CPU("voice_2g_3g-dai")));
+
+SND_SOC_DAILINK_DEF(voice_3g, \
+ DAILINK_COMP_ARRAY(COMP_CPU("voice_3g-dai")));
+
+
+
+SND_SOC_DAILINK_DEF(dummy_cpu, \
+ DAILINK_COMP_ARRAY(COMP_CPU("soc:zx29_snd_dummy")));
+//SND_SOC_DAILINK_DEF(dummy_platform, \
+// DAILINK_COMP_ARRAY(COMP_PLATFORM("soc:zx29_snd_dummy")));
+
+SND_SOC_DAILINK_DEF(dummy_codec, \
+ DAILINK_COMP_ARRAY(COMP_CODEC("soc:zx29_snd_dummy", "zx29_snd_dummy_dai")));
+
+#if defined(CONFIG_SND_SOC_ZX29_ES8311)
+SND_SOC_DAILINK_DEF(codec, \
+ DAILINK_COMP_ARRAY(COMP_CODEC("es8311.1-0018", "ES8311 HiFi")));
+
+#elif defined(CONFIG_SND_SOC_ZX29_ES8374)
+
+SND_SOC_DAILINK_DEF(codec, \
+ DAILINK_COMP_ARRAY(COMP_CODEC("es8374.1-001a", "es8374-hifi")));
+#else
+SND_SOC_DAILINK_DEF(codec, \
+ DAILINK_COMP_ARRAY(COMP_CODEC("es8311.1-0018", "es8311-hifi")));
+
+#endif
+
+//SND_SOC_DAILINK_DEF(media_platform, \
+// DAILINK_COMP_ARRAY(COMP_PLATFORM("zx29-pcm-audio")));
+SND_SOC_DAILINK_DEF(media_platform, \
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("1405000.i2s")));
+
+SND_SOC_DAILINK_DEF(media_platform_tdm, \
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("1412000.tdm")));
+
+//SND_SOC_DAILINK_DEF(voice_cpu, \
+// DAILINK_COMP_ARRAY(COMP_CPU("E1D02000.i2s")));
+
+SND_SOC_DAILINK_DEF(voice_platform, \
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("soc:voice_audio")));
+
+
+
+
+//static struct snd_soc_dai_link zx29_dai_link[] = {
+struct snd_soc_dai_link zx29_dai_link[] = {
+ {
+ .name = "zx29_snd_dummy",//codec name
+ .stream_name = "zx29_snd_dumy",
+ //.nonatomic = true,
+ //.dynamic = 1,
+ //.dpcm_playback = 1,
+ .ops = &zx29_ops_lp,
+ .init = zx29_init_paiftx,
+#ifdef CONFIG_USE_TOP_TDM
+ SND_SOC_DAILINK_REG(cpu_tdm, dummy_codec, media_platform_tdm),
+#else
+ SND_SOC_DAILINK_REG(cpu_i2s0, dummy_codec, media_platform),
+#endif
+},
+{
+ .name = "media",//codec name
+ .stream_name = "MultiMedia",
+ //.nonatomic = true,
+ //.dynamic = 1,
+ //.dpcm_playback = 1,
+ .ops = &zx29_ops,
+
+ .init = zx29_init_paiftx,
+
+#ifdef CONFIG_USE_TOP_TDM
+ SND_SOC_DAILINK_REG(cpu_tdm, codec, media_platform_tdm),
+#else
+ SND_SOC_DAILINK_REG(cpu_i2s0, codec, media_platform),
+#endif
+},
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+{
+ .name = "voice",//codec name
+ .stream_name = "voice",
+ //.nonatomic = true,
+ //.dynamic = 1,
+ //.dpcm_playback = 1,
+ .ops = &voice_ops,
+
+ .init = zx29_init_paiftx,
+
+
+
+ SND_SOC_DAILINK_REG(voice_cpu, codec, voice_platform),
+
+},
+{
+ .name = "voice_2g3g_teak",//codec name
+ .stream_name = "voice_2g3g_teak",
+ //.nonatomic = true,
+ //.dynamic = 1,
+ //.dpcm_playback = 1,
+ .ops = &voice_ops,
+
+ .init = zx29_init_paiftx,
+
+
+ SND_SOC_DAILINK_REG(voice_cpu, codec, voice_platform),
+
+},
+
+{
+ .name = "voice_3g",//codec name
+ .stream_name = "voice_3g",
+ //.nonatomic = true,
+ //.dynamic = 1,
+ //.dpcm_playback = 1,
+ .ops = &voice_ops,
+
+ .init = zx29_init_paiftx,
+
+
+ SND_SOC_DAILINK_REG(voice_cpu, codec, voice_platform),
+
+},
+
+{
+ .name = "loop_test",//codec name
+ .stream_name = "loop_test",
+ //.nonatomic = true,
+ //.dynamic = 1,
+ //.dpcm_playback = 1,
+ //.ops = &zx29_ops,
+ .ops = &voice_ops,
+
+ .init = zx29_init_paiftx,
+
+
+ SND_SOC_DAILINK_REG(voice_cpu, codec, dummy),
+
+},
+
+};
+
+
+
+
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+static struct snd_soc_card zx29_soc_card = {
+ .name = "zx29-sound-card",
+ .owner = THIS_MODULE,
+ .dai_link = zx29_dai_link,
+ .num_links = ARRAY_SIZE(zx29_dai_link),
+#ifdef CONFIG_USE_TOP_TDM
+#else
+ #ifdef USE_ALSA_VOICE_FUNC
+ .controls = vp_snd_controls,
+ .num_controls = ARRAY_SIZE(vp_snd_controls),
+ #endif
+#endif
+};
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+
+static const struct of_device_id zx29_codec_of_match[] = {
+#if defined(CONFIG_SND_SOC_ZX29_ES8311)
+ { .compatible = "zxic,zx29_es8311", .data = &zx29_platform_data },
+#elif defined(CONFIG_SND_SOC_ZX29_ES8374)
+ { .compatible = "zxic,zx29_es8374", .data = &zx29_platform_data },
+#else
+ { .compatible = "zxic,zx29_es8311", .data = &zx29_platform_data },
+
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, zx29_codec_of_match);
+
+static void zx29_i2s_top_pin_cfg(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl *p;
+ struct pinctrl_state *s;
+ struct pinctrl_state *s_sleep;
+ int ret = 0;
+ printk("%s start \n",__func__);
+
+ struct resource *res;
+ void __iomem *reg_base;
+ unsigned int val;
+
+ struct zx29_board_data *info = s_board;
+
+ pr_info("%s: board name(%s)!\n", __func__,info->name);
+
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "soc_sys");
+ if (!res) {
+ dev_err(dev, "Reg region missing (%s)\n", "soc_sys");
+ //return -ENXIO;
+ }
+
+ #if 0
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base )) {
+ dev_err(dev, "Reg region ioremap (%s) err=%li\n", "soc_sys",PTR_ERR(reg_base ));
+ //return PTR_ERR(reg_base );
+ }
+
+ #else
+ reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ #endif
+
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+ //#if 1 //CONFIG_USE_PIN_I2S0
+ #if defined(CONFIG_USE_TOP_I2S0)
+
+ dev_info(dev, "%s: arm i2s1 to top i2s0!!\n", __func__);
+ //9300
+
+ //top i2s1 cfg
+ val = zx_read_reg(reg_base+ZX29_I2S_TOP_LOOP_REG);
+ val &= ~(0x7<<0);
+ val |= 0x1<<0; // inter arm_i2s1--top i2s1
+ zx_write_reg(reg_base+ZX29_I2S_TOP_LOOP_REG, val);
+
+ #elif defined(CONFIG_USE_PIN_I2S1)
+
+ dev_info(dev, "%s: arm i2s1 to top i2s1!\n", __func__);
+
+ //top i2s2 cfg
+ val = zx_read_reg(reg_base+ZX29_I2S_TOP_LOOP_REG);
+ val &= ~(0x7<<16);
+ val |= 0x1<<16;// inter arm_i2s1--top i2s2
+ zx_write_reg(reg_base+ZX29_I2S_TOP_LOOP_REG, val);
+ #endif
+
+#else
+
+ //#if 1 //CONFIG_USE_PIN_I2S0
+ #if defined(CONFIG_USE_TOP_I2S0)
+
+ dev_info(dev, "%s: arm i2s1 to top i2s0!!\n", __func__);
+ //9300
+
+ //top i2s1 cfg
+ val = zx_read_reg(reg_base+ZX29_I2S_TOP_LOOP_REG);
+ val &= ~(0x7<<0);
+ val |= 0x1<<0; // inter arm_i2s1--top i2s1
+ zx_write_reg(reg_base+ZX29_I2S_TOP_LOOP_REG, val);
+
+ #elif defined(CONFIG_USE_TOP_I2S1)
+
+ dev_info(dev, "%s: arm i2s1 to top i2s1!\n", __func__);
+
+ //top i2s2 cfg
+ val = zx_read_reg(reg_base+ZX29_I2S_TOP_LOOP_REG);
+ val &= ~(0x7<<16);
+ val |= 0x1<<16;// inter arm_i2s1--top i2s2
+ zx_write_reg(reg_base+ZX29_I2S_TOP_LOOP_REG, val);
+ #endif
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p)) {
+ dev_err(dev, "%s: pinctrl get failure ,p=0x%llx,dev=0x%llx!!\n", __func__,p,dev);
+ return;
+ }
+
+ dev_info(dev, "%s: get pinctrl ,p=0x%llx,dev=0x%llx!!\n", __func__,p,dev);
+#if defined(CONFIG_USE_TOP_I2S0)
+ dev_info(dev, "%s: top_i2s0 pinctrl sel!!\n", __func__);
+
+ s = pinctrl_lookup_state(p, "top_i2s0");
+ if (IS_ERR(s)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+
+ dev_info(dev, "%s: get top_i2s sleep pinctrl sel!!\n", __func__);
+
+ s_sleep = pinctrl_lookup_state(p, "topi2s0_sleep");
+ if (IS_ERR(s_sleep)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+
+
+
+#elif defined(CONFIG_USE_TOP_I2S1)
+ dev_info(dev, "%s: top_i2s1 pinctrl sel!!\n", __func__);
+
+ s = pinctrl_lookup_state(p, "top_i2s1");
+ if (IS_ERR(s)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+ dev_info(dev, "%s: get top_i2s sleep pinctrl sel!!\n", __func__);
+
+ s_sleep = pinctrl_lookup_state(p, "topi2s1_sleep");
+ if (IS_ERR(s_sleep)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+
+#elif defined(CONFIG_USE_TOP_TDM)
+ dev_info(dev, "%s: top_tdm pinctrl sel!!\n", __func__);
+ s = pinctrl_lookup_state(p, "top_tdm");
+ if (IS_ERR(s)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+ dev_info(dev, "%s: get top_i2s sleep pinctrl sel!!\n", __func__);
+
+ s_sleep = pinctrl_lookup_state(p, "toptdm_sleep");
+ if (IS_ERR(s_sleep)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+
+#else
+ dev_info(dev, "%s: default top_i2s pinctrl sel!!\n", __func__);
+
+ s = pinctrl_lookup_state(p, "top_i2s0");
+ if (IS_ERR(s)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+
+ dev_info(dev, "%s: get top_i2s sleep pinctrl sel!!\n", __func__);
+
+ s_sleep = pinctrl_lookup_state(p, "topi2s0_sleep");
+ if (IS_ERR(s_sleep)) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " get state failure!!\n");
+ return;
+ }
+
+#endif
+ if(info != NULL){
+
+ info->p = p;
+ info->s = s;
+ info->s_sleep = s_sleep;
+ }
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+ ret = pinctrl_select_state(p, s);
+#else
+//yu.dong@20240416[ZXW-268]Added codec re-initialization for power down and I2S default configuration adjustment start
+ ret = pinctrl_select_state(p, s_sleep);
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+ if (ret < 0) {
+ devm_pinctrl_put(p);
+ dev_err(dev, " select state failure!!\n");
+ return;
+ }
+ dev_info(dev, "%s: set pinctrl end!\n", __func__);
+
+}
+
+int zx29_i2s_config_default_pin(void)
+{
+ struct zx29_board_data *info = s_board;
+ int ret;
+
+ if (!info || !info->p || !info->s)
+ return -ENODEV;
+
+ ret = pinctrl_select_state(info->p, info->s);
+ if (ret < 0) {
+ pr_err(" %s select state failure %d!!\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+int zx29_i2s_config_sleep_pin(void)
+{
+ struct zx29_board_data *info = s_board;
+ int ret;
+
+ if (!info || !info->p || !info->s_sleep)
+ return -ENODEV;
+
+ ret = pinctrl_select_state(info->p, info->s_sleep);
+ if (ret < 0) {
+ pr_err(" %s select state failure %d!!\n", __func__, ret);
+ }
+
+ return ret;
+}
+//yu.dong@20240416[ZXW-268]Added codec re-initialization for power down and I2S default configuration adjustment end
+
+static int zx29_audio_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *np = pdev->dev.of_node;
+ struct snd_soc_card *card = &zx29_soc_card;
+ struct zx29_board_data *board;
+ const struct of_device_id *id;
+ enum of_gpio_flags flags;
+ unsigned int idx;
+
+ struct device *dev = &pdev->dev;
+ dev_info(&pdev->dev,"zx29_audio_probe start!\n");
+
+
+ card->dev = &pdev->dev;
+
+ board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
+ if (!board)
+ return -ENOMEM;
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes start */
+#ifdef CONFIG_USE_TOP_TDM
+#else
+ if (np) {
+ zx29_dai_link[0].cpus->dai_name = NULL;
+ zx29_dai_link[0].cpus->of_node = of_parse_phandle(np,
+ "zxic,i2s-controller", 0);
+ if (!zx29_dai_link[0].cpus->of_node) {
+ dev_err(&pdev->dev,
+ "Property 'zxic,i2s-controller' missing or invalid\n");
+ ret = -EINVAL;
+ }
+
+ zx29_dai_link[0].platforms->name = NULL;
+ zx29_dai_link[0].platforms->of_node = zx29_dai_link[0].cpus->of_node;
+
+
+ }
+#endif
+/* yu.dong@20240508[ZXW-277]Modified Platform CODEC ES8311 Compatible with I2S and TDM Modes end */
+
+ id = of_match_device(of_match_ptr(zx29_codec_of_match), &pdev->dev);
+ if (id)
+ *board = *((struct zx29_board_data *)id->data);
+
+#if defined(CONFIG_SND_SOC_ZX29_ES8311)
+ board->name = "zx29_es8311";
+#elif defined(CONFIG_SND_SOC_ZX29_ES8374)
+ board->name = "zx29_es8374";
+#else
+ board->name = "zx29_es8311";
+
+#endif
+ board->dev = &pdev->dev;
+
+ //platform_set_drvdata(pdev, board);
+ s_board = board;
+
+
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+
+ if (ret){
+ dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
+ return ret;
+ }
+ zx29_i2s_top_pin_cfg(pdev);
+
+
+ dev_info(&pdev->dev,"zx29_audio_probe end!\n");
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int zx29_audio_suspend(struct platform_device * pdev, pm_message_t state)
+{
+ pr_info("%s: start!\n",__func__);
+
+ //pinctrl_pm_select_sleep_state(&pdev->dev);
+ return 0;
+}
+
+static int zx29_audio_resume(struct platform_device *pdev)
+{
+ pr_info("%s: start!\n",__func__);
+
+ //pinctrl_pm_select_default_state(&pdev->dev);
+
+ return 0;
+}
+
+int zx29_snd_soc_suspend(struct device *dev)
+{
+
+ int ret = 0;
+ struct zx29_board_data *info = s_board;
+
+ pr_info("%s: start![8311]\n",__func__);
+
+ //pinctrl_pm_select_sleep_state(dev);
+ if((info->p != NULL)&&(info->s_sleep != NULL)){
+ ret = pinctrl_select_state(info->p, info->s_sleep);
+ if (ret < 0) {
+ //devm_pinctrl_put(info->p);
+ dev_err(dev, " select state failure!!\n");
+ //return;
+ }
+ dev_info(dev, "%s: set pinctrl sleep end!\n", __func__);
+ }
+ return snd_soc_suspend(dev);
+
+}
+int zx29_snd_soc_resume(struct device *dev)
+{
+ int ret = 0;
+ struct zx29_board_data *info = s_board;
+
+ pr_info("%s: start!\n",__func__);
+
+ //pinctrl_pm_select_default_state(dev);
+ if((info->p != NULL)&&(info->s != NULL)){
+ ret = pinctrl_select_state(info->p, info->s);
+ if (ret < 0) {
+ //devm_pinctrl_put(info->p);
+ dev_err(dev, " select state failure!!\n");
+ //return;
+ }
+ dev_info(dev, "%s: set pinctrl active end!\n", __func__);
+ }
+
+
+ return snd_soc_resume(dev);
+
+}
+
+#else
+static int zx29_audio_suspend(struct platform_device * pdev, pm_message_t state)
+{
+
+ return 0;
+}
+
+static int zx29_audio_resume(struct platform_device *pdev)
+{
+
+
+ return 0;
+}
+
+int zx29_snd_soc_suspend(struct device *dev)
+{
+
+
+ return snd_soc_suspend(dev);
+
+}
+int zx29_snd_soc_resume(struct device *dev)
+{
+
+
+ return snd_soc_resume(dev);
+
+}
+
+
+#endif
+
+struct dev_pm_ops zx29_snd_soc_pm_ops = {
+ .suspend = zx29_snd_soc_suspend,
+ .resume = zx29_snd_soc_resume,
+};
+static struct platform_driver zx29_platform_driver = {
+ .driver = {
+#if defined(CONFIG_SND_SOC_ZX29_ES8311)
+ .name = "zx29_es8311",
+#elif defined(CONFIG_SND_SOC_ZX29_ES8374)
+ .name = "zx29_es8374",
+
+#else
+ .name = "zx29_es8311",
+#endif
+ .of_match_table = of_match_ptr(zx29_codec_of_match),
+ .pm = &zx29_snd_soc_pm_ops,
+ },
+ .probe = zx29_audio_probe,
+};
+
+
+
+
+
+module_platform_driver(zx29_platform_driver);
+
+MODULE_DESCRIPTION("zx29 ALSA SoC audio driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:zx29-audio-es83xx");