blob: d0eb5eeb86213ef514cf421bef9701e90a0551ad [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001
2#include <linux/jiffies.h>
3//#include <linux/timekeeping.h>
4#include <linux/pci.h>
5#include <linux/module.h>
6#include <linux/delay.h>
7#include <linux/kthread.h>
8#include "aicwf_pcie.h"
9#include "pcie_host.h"
10#include "aicwf_txrxif.h"
11#include "rwnx_defs.h"
12#include "rwnx_platform.h"
13#include "aic_bsp_export.h"
14#include "lmac_msg.h"
15#include "rwnx_msg_tx.h"
16
17extern uint8_t scanning;
18extern u8 dhcped;
19
20#ifdef AICWF_PCIE_SUPPORT
21
22static const struct pci_device_id aic8820_pci_ids[] = {
23 {PCI_DEVICE(AIC8800D80_PCI_VENDOR_ID,AIC8800D80_PCI_DEVICE_ID)},
24 {PCI_DEVICE(AIC8800D80X2_PCI_VENDOR_ID,AIC8800D80X2_PCI_DEVICE_ID)},
25};
26
27#ifdef CONFIG_WS
28#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
29static struct wakeup_source *pci_ws;
30#endif
31
32void rwnx_pm_stay_awake_pc(struct rwnx_hw *rwnx_hw)
33{
34 printk("%s\n", __func__);
35
36 //pm_stay_awake(&(rwnx_hw->pcidev->pci_dev->dev));
37#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
38 spin_lock_bh(&rwnx_hw->pcidev->ws_lock);
39 if(pci_ws != NULL){
40 __pm_stay_awake(pci_ws);
41 }
42 spin_unlock_bh(&rwnx_hw->pcidev->ws_lock);
43#endif
44}
45
46void rwnx_pm_relax_pc(struct rwnx_hw *rwnx_hw)
47{
48 printk("%s\n", __func__);
49
50 //pm_relax(&(rwnx_hw->pcidev->pci_dev->dev));
51#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
52 spin_lock_bh(&rwnx_hw->pcidev->ws_lock);
53 if(pci_ws != NULL){
54 __pm_relax(pci_ws);
55 }
56 spin_unlock_bh(&rwnx_hw->pcidev->ws_lock);
57#endif
58}
59
60static void register_ws(void)
61{
62 printk("%s\n", __func__);
63
64#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
65 pci_ws = wakeup_source_register("wifisleep");
66#endif
67}
68
69static void unregister_ws(void)
70{
71 printk("%s\n", __func__);
72#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
73 wakeup_source_unregister(pci_ws);
74#endif
75}
76#endif
77
78#ifdef CONFIG_TEMP_CONTROL
79//int interval = 30;
80//module_param(interval, int, 0660);
81static int update_state(s8_l value, u8_l current_state)
82{
83 s8_l thd_1 = g_rwnx_plat->pcidev->tp_thd_1;
84 s8_l thd_2 = g_rwnx_plat->pcidev->tp_thd_2;
85
86 if (value > thd_2)
87 return 2;
88 else if (value > (thd_2 - BUFFERING_V2) && (current_state == 2))
89 return 2;
90 else if (value > thd_1 && current_state != 2)
91 return 1;
92 else if (value > (thd_1 - BUFFERING_V1) && current_state == 1)
93 return 1;
94 else if (current_state == 0)
95 return 0;
96 else
97 return 1;
98}
99
100void aicwf_netif_ctrl(struct aic_pci_dev *pcidev, int val)
101{
102 unsigned long flags;
103 struct rwnx_vif *rwnx_vif;
104
105 if (pcidev->net_stop)
106 return;
107
108 spin_lock_irqsave(&pcidev->tx_flow_lock, flags);
109 list_for_each_entry(rwnx_vif, &pcidev->rwnx_hw->vifs, list) {
110 if (!rwnx_vif || !rwnx_vif->ndev || !rwnx_vif->up)
111 continue;
112 netif_tx_stop_all_queues(rwnx_vif->ndev);//netif_stop_queue(rwnx_vif->ndev);
113 }
114 spin_unlock_irqrestore(&pcidev->tx_flow_lock, flags);
115 pcidev->net_stop = true;
116 mod_timer(&pcidev->netif_timer, jiffies + msecs_to_jiffies(val));
117
118 return;
119}
120
121void aicwf_temp_ctrl(struct aic_pci_dev *pcidev)
122{
123 if (pcidev->set_level) {
124 if (pcidev->set_level == 1) {
125 pcidev->get_level = 1;
126 aicwf_netif_ctrl(pcidev, pcidev->interval_t1/*TMR_INTERVAL_1*/);
127 //mdelay(1);
128 } else if (pcidev->set_level == 2) {
129 pcidev->get_level = 2;
130 aicwf_netif_ctrl(pcidev, pcidev->interval_t2/*TMR_INTERVAL_2*/);
131 //mdelay(2);
132 }
133 return;
134 } else {
135 if (pcidev->cur_temp > (pcidev->tp_thd_1 - 8)) {
136 //if ((sdiodev->cur_temp > TEMP_THD_1 && sdiodev->cur_temp <= TEMP_THD_2) || (sdiodev->cur_stat == 1)) {
137 if (update_state(pcidev->cur_temp, pcidev->cur_stat) == 1) {
138 pcidev->get_level = 1;
139 pcidev->cur_stat = 1;
140 aicwf_netif_ctrl(pcidev, pcidev->interval_t1/*TMR_INTERVAL_1*/);
141 //mdelay(1);
142 //break;
143 //} else if ((sdiodev->cur_temp > TEMP_THD_2) || (sdiodev->cur_stat == 2)) {
144 } else if (update_state(pcidev->cur_temp, pcidev->cur_stat) == 2) {
145 pcidev->get_level = 2;
146 pcidev->cur_stat = 2;
147 aicwf_netif_ctrl(pcidev, pcidev->interval_t2/*TMR_INTERVAL_2*/);
148 //mdelay(2);
149 //break;
150 }
151 return;
152 }
153
154 if (pcidev->cur_stat) {
155 AICWFDBG(LOGINFO, "reset cur_stat");
156 pcidev->cur_stat = 0;
157 pcidev->get_level = 0;
158 }
159
160 return;
161 }
162}
163
164void aicwf_netif_worker(struct work_struct *work)
165{
166 struct aic_pci_dev *pcidev = container_of(work, struct aic_pci_dev, netif_work);
167 unsigned long flags;
168 struct rwnx_vif *rwnx_vif;
169 spin_lock_irqsave(&pcidev->tx_flow_lock, flags);
170 list_for_each_entry(rwnx_vif, &pcidev->rwnx_hw->vifs, list) {
171 if (!rwnx_vif || !rwnx_vif->ndev || !rwnx_vif->up)
172 continue;
173 netif_tx_wake_all_queues(rwnx_vif->ndev);//netif_wake_queue(rwnx_vif->ndev);
174 }
175 spin_unlock_irqrestore(&pcidev->tx_flow_lock, flags);
176 pcidev->net_stop = false;
177}
178
179#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
180static void aicwf_netif_timer(ulong data)
181#else
182static void aicwf_netif_timer(struct timer_list *t)
183#endif
184{
185#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
186 struct aic_pci_dev *pcidev = (struct aic_pci_dev *) data;
187#else
188 struct aic_pci_dev *pcidev = from_timer(pcidev, t, netif_timer);
189#endif
190
191 if (!work_pending(&pcidev->netif_work))
192 schedule_work(&pcidev->netif_work);
193
194 return;
195}
196
197void aicwf_temp_ctrl_worker(struct work_struct *work)
198{
199 struct rwnx_hw *rwnx_hw;
200 struct mm_set_vendor_swconfig_cfm cfm;
201 struct aic_pci_dev *pcidev = container_of(work, struct aic_pci_dev, tp_ctrl_work);
202 rwnx_hw = pcidev->rwnx_hw;
203 //AICWFDBG(LOGINFO, "%s\n", __func__);
204
205 if (pcidev->bus_if->state == BUS_DOWN_ST) {
206 AICWFDBG(LOGERROR, "%s bus down\n", __func__);
207 return;
208 }
209
210 spin_lock_bh(&pcidev->tm_lock);
211 if (!pcidev->tm_start) {
212 spin_unlock_bh(&pcidev->tm_lock);
213 AICWFDBG(LOGERROR, "tp_timer should stop_1\n");
214 return;
215 }
216 spin_unlock_bh(&pcidev->tm_lock);
217
218
219 rwnx_hw->started_jiffies = jiffies;
220
221 rwnx_send_get_temp_req(rwnx_hw, &cfm);
222 pcidev->cur_temp = cfm.temp_comp_get_cfm.degree;
223
224 spin_lock_bh(&pcidev->tm_lock);
225 if (pcidev->tm_start) {
226 mod_timer(&pcidev->tp_ctrl_timer, jiffies + msecs_to_jiffies(TEMP_GET_INTERVAL));
227 } else
228 AICWFDBG(LOGERROR, "tp_timer should stop_2\n");
229 spin_unlock_bh(&pcidev->tm_lock);
230
231
232 return;
233}
234
235#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
236static void aicwf_temp_ctrl_timer(ulong data)
237#else
238static void aicwf_temp_ctrl_timer(struct timer_list *t)
239#endif
240{
241#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
242 struct aic_pci_dev *pcidev = (struct aic_pci_dev *) data;
243#else
244 struct aic_pci_dev *pcidev = from_timer(pcidev, t, tp_ctrl_timer);
245#endif
246
247 if (!work_pending(&pcidev->tp_ctrl_work))
248 schedule_work(&pcidev->tp_ctrl_work);
249
250 return;
251}
252#endif
253
254irqreturn_t aicwf_pcie_irq_hdlr(int irq, void *dev_id)
255{
256 struct aic_pci_dev *pciedev = (struct aic_pci_dev *) dev_id;
257
258 //printk("%s:%lx\n", __func__);
259 if(pciedev->rwnx_hw) {
260 disable_irq_nosync(irq);
261 pciedev->rwnx_hw->is_irq_disable = 1;
262 tasklet_schedule(&pciedev->rwnx_hw->task);
263 }
264
265 return IRQ_HANDLED;
266}
267
268extern u8 data_cnt;
269extern int rwnx_plat_bin_fw_upload_2(struct rwnx_hw *rwnx_hw, u32 fw_addr, char *filename);
270
271static int aicwf_sw_resume(void)
272{
273 struct rwnx_hw *rwnx_hw = g_rwnx_plat->pcidev->rwnx_hw;
274 //struct rwnx_vif *rwnx_vif;
275 struct rwnx_ipc_buf *buf;
276 struct rwnx_ipc_buf *ipc_buf;
277 int i;
278 struct ipc_e2a_msg *msg;
279
280 rwnx_hw->ipc_env->msgbuf_idx = 0; // reset msgrx idx
281 for (i = 0; i < IPC_MSGE2A_BUF_CNT; i++) {
282 buf = rwnx_hw->ipc_env->msgbuf[i];
283 if (!buf) {
284 printk("msg error!!!\n");
285 break;
286 }
287 msg = buf->addr;
288 msg->pattern = 0;
289 ipc_host_msgbuf_push(rwnx_hw->ipc_env, buf);
290 }
291
292#if 0
293 for (i = 0; i < IPC_TXDMA_DESC_CNT; i++) {
294 struct rwnx_sw_txhdr *sw_txhdr = (struct rwnx_sw_txhdr *)rwnx_hw->ipc_env->txcfm[i];
295 struct rwnx_ipc_buf *txcfm_buf;
296 if (sw_txhdr != NULL) {
297 txcfm_buf = &sw_txhdr->ipc_desc;
298 struct sk_buff *skb_tmp = sw_txhdr->skb;
299
300 rwnx_ipc_buf_a2e_release(rwnx_hw, txcfm_buf);
301 dma_unmap_single(rwnx_hw->dev, sw_txhdr->ipc_hostdesc.dma_addr, sw_txhdr->ipc_hostdesc.size, DMA_TO_DEVICE);
302 kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
303 skb_pull(skb_tmp, RWNX_TX_HEADROOM);
304 consume_skb(skb_tmp);
305 rwnx_hw->ipc_env->txcfm[i] = NULL;
306 atomic_dec(&rwnx_hw->txdata_cnt);
307 }
308 }
309#endif
310 rwnx_hw->ipc_env->txdmadesc_idx = 0; //reset tx idx
311 rwnx_hw->ipc_env->txcfm_idx = 0; //reset txcfm idx
312
313 for (i = 0; i < IPC_RXBUF_CNT; i++) {
314 ipc_buf = &rwnx_hw->rxbufs[i];
315 rwnx_hw->ipc_env->shared->host_rxbuf[i].hostid = RWNX_RXBUFF_HOSTID_GET(ipc_buf);
316 rwnx_hw->ipc_env->shared->host_rxbuf[i].dma_addr = ipc_buf->dma_addr;
317 rwnx_hw->ipc_env->shared->host_rxbuf[i].pattern = 0x0;
318 }
319
320 data_cnt = 0; // reset rx idx
321 rwnx_hw->rxbuf_idx = 0;
322 rwnx_hw->ipc_env->rxbuf_idx = 0;
323
324 return 0;
325}
326static int aicwf_resume_access(void)
327{
328 int ret = 0;
329 //struct mm_add_if_cfm add_if_cfm;
330 //struct mm_set_stack_start_cfm set_start_cfm;
331 //struct aicbsp_feature_t feature;
332 struct rwnx_hw *rwnx_hw = g_rwnx_plat->pcidev->rwnx_hw;
333 struct rwnx_vif *rwnx_vif;
334 struct rwnx_vif *rwnx_vif_param = NULL;
335 struct rwnx_cmd *cur = NULL;
336 struct rwnx_cmd *nxt = NULL;
337#ifdef CONFIG_USB_BT
338 struct aicbt_patch_table *head = NULL;
339 struct aicbt_patch_info_t patch_info = {
340 .info_len = 0,
341 .adid_addrinf = 0,
342 .addr_adid = 0,
343 .patch_addrinf = 0,
344 .addr_patch = 0,
345 .reset_addr = 0,
346 .reset_val = 0,
347 .adid_flag_addr = 0,
348 .adid_flag = 0,
349 };
350
351 head = aicbt_patch_table_alloc(rwnx_hw, FW_PATCH_TABLE_NAME_8800D80_U02);
352 if (head == NULL){
353 printk("aicbt_patch_table_alloc fail\n");
354 return -1;
355 }
356
357 patch_info.addr_adid = FW_RAM_ADID_BASE_ADDR_8800D80_U02;
358 patch_info.addr_patch = FW_RAM_PATCH_BASE_ADDR_8800D80_U02;
359
360 aicbt_patch_info_unpack(&patch_info, head);
361 if(patch_info.info_len == 0) {
362 printk("%s, aicbt_patch_info_unpack fail\n", __func__);
363 return -1;
364 }
365
366 printk("addr_adid 0x%x, addr_patch 0x%x\n", patch_info.addr_adid, patch_info.addr_patch);
367
368 if(rwnx_plat_bin_fw_upload_2(rwnx_hw, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80_U02)) {
369 printk("%s load patch fail 1\n", __func__);
370 return -1;
371 }
372 if(rwnx_plat_bin_fw_upload_2(rwnx_hw, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80_U02)) {
373 printk("%s load patch fail 2\n", __func__);
374 return -1;
375 }
376 if (aicbt_patch_table_load(rwnx_hw, head)) {
377 return -1;
378 }
379 mdelay(15);
380#endif
381
382 ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, RAM_FMAC_FW_ADDR, RWNX_PCIE_FW_NAME);
383 if (ret) {
384 printk("resume fw load fail\n");
385 return ret;
386 }
387
388 patch_config(rwnx_hw);
389 pcie_reset_firmware(rwnx_hw, RAM_FMAC_FW_ADDR);
390
391 aicwf_sw_resume();
392
393// struct rwnx_cmd *cur, *nxt;
394 spin_lock_bh(&rwnx_hw->cmd_mgr->lock);
395 list_for_each_entry_safe(cur, nxt, &rwnx_hw->cmd_mgr->cmds, list) {
396 printk("resume_cmd_id: %d\n", cur->id);
397 list_del(&cur->list);
398 rwnx_hw->cmd_mgr->queue_sz--;
399 if (!(cur->flags & RWNX_CMD_FLAG_NONBLOCK))
400 complete(&cur->complete);
401 }
402 if(rwnx_hw->pcidev->cmd_mgr.state == RWNX_CMD_MGR_STATE_CRASHED) {
403 rwnx_hw->pcidev->cmd_mgr.state = RWNX_CMD_MGR_STATE_INITED;
404 printk("cmd state recovery\n");
405 }
406 spin_unlock_bh(&rwnx_hw->cmd_mgr->lock);
407
408 list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
409 if (rwnx_vif->up) {
410 printk("find vif_up\n");
411 rwnx_vif_param = rwnx_vif;
412 spin_lock_bh(&rwnx_hw->cb_lock);
413 rwnx_vif->vif_index = 0;
414 rwnx_hw->vif_table[0] = rwnx_vif;
415 spin_unlock_bh(&rwnx_hw->cb_lock);
416 }
417 }
418
419#ifdef USE_5G
420 ret = rwnx_send_resume_restore(rwnx_hw, 1, 0, CO_BIT(5), 0, rwnx_vif_param);
421#else
422 ret = rwnx_send_resume_restore(rwnx_hw, 1, feature.hwinfo < 0,feature.hwinfo, 0, rwnx_vif_param);
423#endif
424 if (ret) {
425 printk("%s restore fail\n", __func__);
426 return ret;
427 }
428
429 mdelay(200);
430 rwnx_hw->pci_suspending = 0;
431
432 return ret;
433}
434
435static int aicwf_disconnect_inform(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif) {
436 struct net_device *dev;
437#ifdef AICWF_RX_REORDER
438 struct reord_ctrl_info *reord_info, *tmp;
439 u8 *macaddr;
440 struct aicwf_rx_priv *rx_priv;
441#endif
442
443 RWNX_DBG(RWNX_FN_ENTRY_STR);
444 dhcped = 0;
445
446 if(!rwnx_vif)
447 return 0;
448 dev = rwnx_vif->ndev;
449
450 #ifdef CONFIG_BR_SUPPORT
451 struct rwnx_vif *vif = netdev_priv(dev);
452 /* clear bridge database */
453 nat25_db_cleanup(rwnx_vif);
454 #endif /* CONFIG_BR_SUPPORT */
455
456 if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
457 rwnx_hw->is_p2p_connected = 0;
458 /* if vif is not up, rwnx_close has already been called */
459 if (rwnx_vif->up) {
460 cfg80211_disconnected(dev, 1, NULL, 0, true, GFP_ATOMIC);
461 netif_tx_stop_all_queues(dev);
462 netif_carrier_off(dev);
463 }
464
465#ifdef CONFIG_RWNX_BFMER
466 /* Disable Beamformer if supported */
467 rwnx_bfmer_report_del(rwnx_hw, rwnx_vif->sta.ap);
468#endif //(CONFIG_RWNX_BFMER)
469
470#ifdef AICWF_RX_REORDER
471 rx_priv = rwnx_hw->pcidev->rx_priv;
472
473 if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
474 macaddr = rwnx_vif->ndev->dev_addr;
475 printk("deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0], macaddr[1], macaddr[2], \
476 macaddr[3], macaddr[4], macaddr[5]);
477
478 spin_lock_bh(&rx_priv->stas_reord_lock);
479 list_for_each_entry_safe(reord_info, tmp, &rx_priv->stas_reord_list, list) {
480 macaddr = rwnx_vif->ndev->dev_addr;
481 printk("reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0], reord_info->mac_addr[1], reord_info->mac_addr[2], \
482 reord_info->mac_addr[3], reord_info->mac_addr[4], reord_info->mac_addr[5]);
483 if (!memcmp(reord_info->mac_addr, macaddr, 6)) {
484 reord_deinit_sta(rx_priv, reord_info);
485 break;
486 }
487 }
488 spin_unlock_bh(&rx_priv->stas_reord_lock);
489 } else if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
490 BUG();//should be not here: del_sta function
491 }
492#endif
493
494 rwnx_txq_sta_deinit(rwnx_hw, rwnx_vif->sta.ap);
495 rwnx_txq_tdls_vif_deinit(rwnx_vif);
496 #if 0
497 rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_vif->sta.ap);
498 #endif
499 rwnx_vif->sta.ap->valid = false;
500 rwnx_vif->sta.ap = NULL;
501 rwnx_external_auth_disable(rwnx_vif);
502 rwnx_chanctx_unlink(rwnx_vif);
503
504 //msleep(200);
505 atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED);
506 return 0;
507}
508
509static int aicwf_pcie_init(struct aic_pci_dev *pciedev)
510{
511 struct pci_dev *pci_dev = pciedev->pci_dev;
512 struct aic_pci_dev *adev = pciedev;
513 u16 pci_cmd;
514 int ret = -ENODEV;
515 u8 linkctrl;
516 int i = 0;
517
518 printk("%s\n", __func__);
519 /* Hotplug fixups */
520 pci_read_config_word(pci_dev, PCI_COMMAND, &pci_cmd);
521 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
522 pci_write_config_word(pci_dev, PCI_COMMAND, pci_cmd);
523 pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES>>2);
524
525 if((ret = pci_enable_device(pci_dev))) {
526 dev_err(&(pci_dev->dev), "pci_enable_device failed\n");
527 goto out;
528 }
529
530 pci_set_master(pci_dev);
531
532 if((ret = pci_request_regions(pci_dev, KBUILD_MODNAME))) {
533 dev_err(&(pci_dev->dev), "pci_request_regions failed\n");
534 goto out_request;
535 }
536
537 if(pci_enable_msi(pci_dev)) {
538 dev_err(&(pci_dev->dev), "pci_enable_msi failed\n");
539 goto out_msi;
540 }
541
542 pciedev->bar_count = 0;
543 for (i = 0; i < 6; i++) {
544 if (pci_resource_start(pci_dev, i)) {
545 pciedev->bar_count++;
546 }
547 }
548 printk("bar_count:%d\n", adev->bar_count);
549
550 switch (pciedev->chip_id) {
551 case PRODUCT_ID_AIC8800D80:
552 if (pciedev->bar_count == 1) {
553 adev->pdev = pci_dev;
554 #if 0
555 adev->bar0 = pci_resource_start(adev->pdev, 0);
556 adev->len0 = pci_resource_len (adev->pdev, 0);
557 adev->map0 = ioremap(adev->bar0, adev->len0);
558 #else
559 pci_read_config_dword (adev->pdev, 0x10, &(adev->bar0));
560 adev->len0 = pci_resource_len(adev->pdev, 0);
561 if(!(adev->map0 = (u8 *)pci_ioremap_bar(adev->pdev, 0))) {
562 dev_err(&(pci_dev->dev), "pci_ioremap_bar0 failed\n");
563 ret = -ENODEV;
564 goto out_bar0;
565 }
566 #endif
567
568 pciedev->pci_bar0_vaddr = adev->map0;
569 if(adev->map0 == NULL)
570 {
571 dev_err(&(pci_dev->dev), "pci_ioremap_bar0 failed\n");
572 ret = -ENODEV;
573 goto out_bar0;
574 }
575 LOG_INFO("bar0: %x, len = %x, map = %lx", adev->bar0, adev->len0, (unsigned long)adev->map0);
576 printk("start %llx end %llx flags %lx len %llx \n", pci_resource_start(adev->pdev, 0),
577 pci_resource_end(adev->pdev, 0),
578 pci_resource_flags(adev->pdev, 0),
579 pci_resource_len(adev->pdev, 0));
580 } else {
581 if(!(pciedev->pci_bar0_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 0))) {
582 dev_err(&(pci_dev->dev), "pci_ioremap_bar0 failed\n");
583 ret = -ENODEV;
584 goto out_bar0;
585 }
586
587 if( !(pciedev->pci_bar1_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 1))) {
588 dev_err(&(pci_dev->dev), "pci_ioremap_bar1 failed\n");
589 ret = -ENODEV;
590 goto out_bar1;
591 }
592
593 if( !(pciedev->pci_bar2_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 2))) {
594 dev_err(&(pci_dev->dev), "pci_ioremap_bar2 failed\n");
595 ret = -ENODEV;
596 goto out_bar2;
597 }
598 }
599 break;
600 case PRODUCT_ID_AIC8800D80X2:
601 adev->pdev = pci_dev;
602 #if 0
603 adev->bar0 = pci_resource_start(adev->pdev, 0);
604 adev->len0 = pci_resource_len (adev->pdev, 0);
605 adev->map0 = ioremap(adev->bar0, adev->len0);
606 #else
607 pci_read_config_dword (adev->pdev, 0x10, &(adev->bar0));
608 adev->len0 = pci_resource_len(adev->pdev, 0);
609 if(!(adev->map0 = (u8 *)pci_ioremap_bar(adev->pdev, 0))) {
610 dev_err(&(pci_dev->dev), "pci_ioremap_bar0 failed\n");
611 ret = -ENODEV;
612 goto out_bar0;
613 }
614 #endif
615
616 pciedev->pci_bar0_vaddr = adev->map0;
617 if(adev->map0 == NULL)
618 {
619 dev_err(&(pci_dev->dev), "pci_ioremap_bar0 failed\n");
620 ret = -ENODEV;
621 goto out_bar0;
622 }
623 LOG_INFO("bar0: %x, len = %x, map = %lx", adev->bar0, adev->len0, (unsigned long)adev->map0);
624 printk("start %llx end %llx flags %lx len %llx \n", pci_resource_start(adev->pdev, 0),
625 pci_resource_end(adev->pdev, 0),
626 pci_resource_flags(adev->pdev, 0),
627 pci_resource_len(adev->pdev, 0));
628 break;
629 default:
630 printk("chip id not correct\n");
631 break;
632 }
633
634 ret = request_irq(pci_dev->irq, aicwf_pcie_irq_hdlr, IRQF_SHARED, "aicwf_pci", pciedev);
635 if(ret) {
636 printk("request irq fail:%d\n", ret);
637 goto out_irq;
638 }
639
640 if(pciedev->chip_id == PRODUCT_ID_AIC8800D80) {
641 //# by G: msg waitlock at L1
642 pci_read_config_byte(pci_dev, pci_dev->pcie_cap + PCI_EXP_LNKCTL, &linkctrl);
643 if(linkctrl & 0x02){
644 linkctrl = linkctrl & ~0x02;
645 pci_write_config_byte(pci_dev, pci_dev->pcie_cap + PCI_EXP_LNKCTL, linkctrl);
646 }
647 }
648
649 #if 0
650 unsigned long base = pci_resource_start(pci_dev, 0);
651 unsigned long len = pci_resource_len(pci_dev, 0);
652 unsigned long flags = pci_resource_flags(pci_dev, 0);
653 printk("bar0: base: 0x%lx, len=%ld, flags=0x%lx, vaddr=%p\n", base, len, flags, pciedev->pci_bar0_vaddr);
654 base = pci_resource_start(pci_dev, 2);
655 len = pci_resource_len(pci_dev, 2);
656 flags = pci_resource_flags(pci_dev, 2);
657 printk("bar2: base: 0x%lx, len=%ld, flags=0x%lx, vaddr=%p\n", base, len, flags, pciedev->pci_bar0_vaddr);
658 #endif
659 printk("%s success\n", __func__);
660
661 goto out;
662out_irq:
663 if(pciedev->chip_id == PRODUCT_ID_AIC8800D80) {
664 if (pciedev->pci_bar2_vaddr) {
665 iounmap(pciedev->pci_bar2_vaddr);
666 }
667 }
668out_bar2:
669 if(pciedev->chip_id == PRODUCT_ID_AIC8800D80) {
670 if (pciedev->pci_bar1_vaddr) {
671 iounmap(pciedev->pci_bar1_vaddr);
672 }
673 }
674out_bar1:
675 if(pciedev->chip_id == PRODUCT_ID_AIC8800D80) {
676 if (pciedev->pci_bar0_vaddr) {
677 iounmap(pciedev->pci_bar0_vaddr);
678 }
679 }
680out_bar0:
681 pci_disable_msi(pci_dev);
682out_msi:
683 pci_release_regions(pci_dev);
684out_request:
685 pci_disable_device(pci_dev);
686
687out:
688 return ret;
689}
690
691static void aicwf_pcie_txmsg_db(struct aic_pci_dev *pciedev)
692{
693 if (pciedev->chip_id == PRODUCT_ID_AIC8800D80) {
694 if (pciedev->bar_count == 1) {
695 writel(1, pciedev->emb_tpci + 0x0ec);
696 } else {
697 volatile unsigned int *dst_mail = (volatile unsigned int *)(pciedev->pci_bar2_vaddr + 0x800ec);
698 dst_mail[0] = 0x1;
699 }
700 } else {
701 writel(1, pciedev->emb_tpci + 0x0ec);
702 }
703}
704
705static void aicwf_pcie_txdata_db(struct aic_pci_dev *pciedev)
706{
707 if (pciedev->chip_id == PRODUCT_ID_AIC8800D80) {
708 if (pciedev->bar_count == 1) {
709 writel(2, pciedev->emb_tpci + 0x0ec);
710 } else {
711 volatile unsigned int *dst_mail = (volatile unsigned int *)(pciedev->pci_bar2_vaddr + 0x800ec);
712 dst_mail[0] = 0x2;
713 }
714 } else {
715 writel(2, pciedev->emb_tpci + 0x0ec);
716 }
717}
718
719static int aicwf_pcie_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
720{
721 int ret = -ENODEV;
722 struct aicwf_bus *bus_if = NULL;
723 struct aic_pci_dev *pciedev = NULL;
724
725 printk("%s\n", __func__);
726
727 bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_KERNEL);
728 if (!bus_if) {
729 printk("alloc bus fail\n");
730 return -ENOMEM;
731 }
732
733 pciedev = kzalloc(sizeof(struct aic_pci_dev), GFP_KERNEL);
734 if (!pciedev) {
735 printk("alloc pciedev fail\n");
736 kfree(bus_if);
737 return -ENOMEM;
738 }
739
740 printk("pci_dev vendor:%04x device:%04x subvendor:%04x subdevice:%04x\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device);
741
742 if(pci_id->device == AIC8800D80_PCI_DEVICE_ID)
743 pciedev->chip_id = PRODUCT_ID_AIC8800D80;
744 if(pci_id->device == AIC8800D80X2_PCI_DEVICE_ID)
745 pciedev->chip_id = PRODUCT_ID_AIC8800D80X2;
746 pciedev->bus_if = bus_if;
747 bus_if->bus_priv.pci = pciedev;
748 dev_set_drvdata(&pci_dev->dev, bus_if);
749 pciedev->pci_dev = pci_dev;
750
751#ifdef CONFIG_WS
752 register_ws();
753#endif
754
755 ret = aicwf_pcie_init(pciedev);
756 if(ret) {
757 printk("%s: pci init fail\n", __func__);
758 return ret;
759 }
760
761 if(pciedev->chip_id == PRODUCT_ID_AIC8800D80X2 || pciedev->bar_count == 1) {
762 ret = aicwf_pcie_setst(pciedev);
763 if(ret) {
764 printk("%s: pci set&tst fail\n", __func__);
765 return ret;
766 }
767 }
768
769 aicwf_pcie_bus_init(pciedev);
770
771 ret = aicwf_pcie_platform_init(pciedev);
772
773 if(!ret)
774 aicwf_hostif_ready();
775
776 return ret;
777}
778
779static void aicwf_pcie_remove(struct pci_dev *pci_dev)
780{
781 struct aicwf_bus *bus_if = dev_get_drvdata(&pci_dev->dev);
782 struct aic_pci_dev *pci = bus_if->bus_priv.pci;
783
784 printk("%s\n", __func__);
785
786 bus_if->state = BUS_DOWN_ST;
787 rwnx_cmd_mgr_deinit(&bus_if->bus_priv.pci->cmd_mgr);
788
789#ifdef CONFIG_WS
790 unregister_ws();
791#endif
792 free_irq(pci_dev->irq, pci);
793 pci_disable_device(pci_dev);
794 if (pci->pci_bar0_vaddr) {
795 iounmap(pci->pci_bar0_vaddr);
796 }
797 if(pci->chip_id == PRODUCT_ID_AIC8800D80) {
798 if (pci->pci_bar1_vaddr) {
799 iounmap(pci->pci_bar1_vaddr);
800 }
801 if (pci->pci_bar2_vaddr) {
802 iounmap(pci->pci_bar2_vaddr);
803 }
804 }
805 pci_release_regions(pci_dev);
806 pci_clear_master(pci_dev);
807 pci_disable_msi(pci_dev);
808
809#ifdef AICWF_PCIE_SUPPORT
810 if (pci->bus_if->busrx_thread) {
811 complete_all(&pci->bus_if->busrx_trgg);
812 kthread_stop(pci->bus_if->busrx_thread);
813 pci->bus_if->busrx_thread = NULL;
814 }
815#endif
816
817 kfree(bus_if);
818 kfree(pci);
819}
820
821static int aicwf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
822{
823 int ret = 0, i = 0;
824 //g_rwnx_plat->pcidev->rwnx_hw->pci_suspending = 1;
825 struct rwnx_hw *rwnx_hw = g_rwnx_plat->pcidev->rwnx_hw;
826 struct rwnx_vif *rwnx_vif;
827
828 printk("%s\n", __func__);
829 list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
830 if (rwnx_vif->up) {
831 while ((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTING) {
832 printk("suspend waiting disc\n");
833 msleep(100);
834 i += 1;
835 if (i >= 20) {
836 aicwf_disconnect_inform(rwnx_hw, rwnx_vif);
837 break;
838 }
839 }
840 }
841 }
842
843#ifdef CONFIG_TEMP_CONTROL
844 del_timer_sync(&rwnx_hw->pcidev->tp_ctrl_timer);
845 cancel_work_sync(&rwnx_hw->pcidev->tp_ctrl_work);
846
847 mod_timer(&rwnx_hw->pcidev->tp_ctrl_timer, jiffies + msecs_to_jiffies(TEMP_GET_INTERVAL));
848
849 del_timer_sync(&rwnx_hw->pcidev->netif_timer);
850 cancel_work_sync(&rwnx_hw->pcidev->netif_work);
851#endif
852
853 g_rwnx_plat->pcidev->rwnx_hw->pci_suspending = 1;
854
855 spin_lock_bh(&rwnx_hw->cb_lock);
856 if (rwnx_hw->scan_request) {// && rwnx_hw->scan_request->wdev == &rwnx_vif->wdev) {
857// printk("suspend scan_done\n");
858#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
859 struct cfg80211_scan_info info =
860 {
861 .aborted = true,
862 };
863 cfg80211_scan_done (rwnx_hw->scan_request, &info);
864#else
865 cfg80211_scan_done (rwnx_hw->scan_request, true);
866#endif
867 printk("suspend scan_done\n");
868 rwnx_hw->scan_request = NULL;
869 scanning = 0;
870 }
871 spin_unlock_bh(&rwnx_hw->cb_lock);
872
873 ret = pci_save_state(pdev);
874 if (ret) {
875 printk("failed on pci_save_state %d\n", ret);
876 return ret;
877 }
878
879 pci_disable_device(pdev);
880 ret = pci_set_power_state(pdev, pci_choose_state(pdev, state));
881 if (ret)
882 printk("failed on pci_set_power_state %d\n", ret);
883
884 return ret;
885}
886
887static int aicwf_pcie_resume(struct pci_dev *pdev)
888{
889 struct rwnx_hw *rwnx_hw = g_rwnx_plat->pcidev->rwnx_hw;
890 bool fw_started;
891 int ret = 0;
892
893 printk("%s enter: %d\n", __func__, atomic_read(&rwnx_hw->txdata_cnt));
894
895 ret = pci_set_power_state(pdev, PCI_D0);
896 if (ret) {
897 printk("failed on pci_set_power_state %d\n", ret);
898 return ret;
899 }
900
901 ret = pci_enable_device(pdev);
902 if (ret) {
903 printk("failed on pci_enable_device %d\n", ret);
904 return ret;
905 }
906
907#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37))
908 pci_restore_state(pdev);
909#else
910 ret = pci_restore_state(pdev);
911 if (ret) {
912 printk("failed on pci_restore_state %d\n", ret);
913 return ret;
914 }
915#endif
916
917 fw_started = *(volatile u32 *) (g_rwnx_plat->pcidev->pci_bar0_vaddr + 0x120000) == 0x1a2000;
918
919 if(!fw_started) {
920 ret = aicwf_resume_access();
921 if (ret) {
922 printk("resume access fail %d\n", ret);
923 return ret;
924 }
925 } else {
926 printk("resume skip reload\n");
927
928 g_rwnx_plat->pcidev->rwnx_hw->pci_suspending = 0;
929
930#ifdef CONFIG_TEMP_CONTROL
931 mod_timer(&g_rwnx_plat->pcidev->tp_ctrl_timer, jiffies + msecs_to_jiffies(TEMP_GET_INTERVAL));
932#endif
933
934 return ret;
935 }
936 printk("%s end\n", __func__);
937
938 return ret;
939}
940
941static struct pci_driver aicwf_pci_driver = {
942 .name = KBUILD_MODNAME,
943 .id_table = aic8820_pci_ids,
944 .probe = aicwf_pcie_probe,
945 .remove = aicwf_pcie_remove,
946 .suspend = aicwf_pcie_suspend,
947 .resume = aicwf_pcie_resume,
948};
949
950int aicwf_pcie_register_drv(void)
951{
952 return pci_register_driver(&aicwf_pci_driver);
953}
954
955void aicwf_pcie_unregister_drv(void)
956{
957 if (g_rwnx_plat && g_rwnx_plat->enabled){
958#ifdef CONFIG_TEMP_CONTROL
959 spin_lock_bh(&g_rwnx_plat->pcidev->tm_lock);
960 g_rwnx_plat->pcidev->tm_start = 0;
961 if (timer_pending(&g_rwnx_plat->pcidev->tp_ctrl_timer)) {
962 AICWFDBG(LOGINFO, "%s del tp_ctrl_timer\n", __func__);
963 del_timer_sync(&g_rwnx_plat->pcidev->tp_ctrl_timer);
964 }
965 spin_unlock_bh(&g_rwnx_plat->pcidev->tm_lock);
966 cancel_work_sync(&g_rwnx_plat->pcidev->tp_ctrl_work);
967
968 if (timer_pending(&g_rwnx_plat->pcidev->netif_timer)) {
969 AICWFDBG(LOGINFO, "%s del netif_timer\n", __func__);
970 del_timer_sync(&g_rwnx_plat->pcidev->netif_timer);
971 }
972 cancel_work_sync(&g_rwnx_plat->pcidev->netif_work);
973#endif
974 rwnx_platform_deinit(g_rwnx_plat->pcidev->rwnx_hw);
975 }
976
977 pci_unregister_driver(&aicwf_pci_driver);
978}
979
980void rwnx_data_dump(char* tag, void* data, unsigned long len){
981 unsigned long i = 0;
982 uint8_t* data_ = (uint8_t* )data;
983
984 printk("%s %s len:(%lu)\r\n", __func__, tag, len);
985
986 for (i = 0; i < len; i += 16){
987 printk("%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\r\n",
988 data_[0 + i],
989 data_[1 + i],
990 data_[2 + i],
991 data_[3 + i],
992 data_[4 + i],
993 data_[5 + i],
994 data_[6 + i],
995 data_[7 + i],
996 data_[8 + i],
997 data_[9 + i],
998 data_[10 + i],
999 data_[11 + i],
1000 data_[12 + i],
1001 data_[13 + i],
1002 data_[14 + i],
1003 data_[15 + i]);
1004 }
1005
1006}
1007
1008int pcie_rxbuf_rep_thread(void *data)
1009{
1010 struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data;
1011 struct aicwf_bus *bus_if = rx_priv->pciedev->bus_if;
1012 struct rwnx_hw *rwnx_hw = rx_priv->pciedev->rwnx_hw;
1013
1014 //printk("rwnx_hw = %px \n",rwnx_hw);
1015 while (1) {
1016 if (kthread_should_stop()) {
1017 AICWFDBG(LOGERROR, "pcie busrx thread stop\n");
1018 break;
1019 }
1020 if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) {
1021
1022 //bus_if = g_rwnx_plat->pcidev->bus_if;
1023 //rwnx_hw = g_rwnx_plat->pcidev->rwnx_hw;
1024
1025 if (bus_if->state == BUS_DOWN_ST)
1026 continue;
1027
1028 printk("%s trigger \n",__func__);
1029 printk("trigger rwnx_hw = %px \n",rwnx_hw);
1030 for(;atomic_read(&rwnx_hw->rxbuf_cnt) < rwnx_hw->ipc_env->rxbuf_nb;){
1031 if(rwnx_ipc_rxbuf_alloc(rwnx_hw)){
1032 printk("pcie_rxbuf_rep_thread rxbuf alloc fail,now rxbuf_cnt = %d \n",atomic_read(&rwnx_hw->rxbuf_cnt));
1033 msleep(10);
1034 //break;
1035 }
1036 }
1037 printk("%s out \n",__func__);
1038 }
1039 }
1040
1041 return 0;
1042}
1043
1044static int aicwf_pcie_bus_start(struct device *dev)
1045{
1046
1047 return 0;
1048}
1049
1050static void aicwf_pcie_bus_stop(struct device *dev)
1051{
1052
1053}
1054
1055static int aicwf_pcie_bus_txdata(struct device *dev, struct sk_buff *skb)
1056{
1057 //printk("%s\n", __func__);
1058 struct aicwf_bus *bus_if = dev_get_drvdata(dev);
1059
1060 aicwf_pcie_txdata_db(bus_if->bus_priv.pci);
1061
1062 return 0;
1063}
1064
1065static int aicwf_pcie_bus_txmsg(struct device *dev, u8 *msg, uint msglen)
1066{
1067 struct aicwf_bus *bus_if = dev_get_drvdata(dev);
1068 struct rwnx_hw *rwnx_hw = bus_if->bus_priv.pci->rwnx_hw;
1069
1070 pcie_host_msg_push(rwnx_hw->ipc_env, msg, msglen);
1071 aicwf_pcie_txmsg_db(bus_if->bus_priv.pci);
1072
1073 //rwnx_data_dump("msg", msg, msglen);
1074
1075 return 0;
1076}
1077
1078static struct aicwf_bus_ops aicwf_pcie_bus_ops = {
1079 .start = aicwf_pcie_bus_start,
1080 .stop = aicwf_pcie_bus_stop,
1081 .txdata = aicwf_pcie_bus_txdata,
1082 .txmsg = aicwf_pcie_bus_txmsg,
1083};
1084
1085void aicwf_pcie_bus_init(struct aic_pci_dev *pciedev)
1086{
1087 struct aicwf_bus *bus_if = pciedev->bus_if;
1088 int ret;
1089 struct aicwf_rx_priv* rx_priv = NULL;
1090 bus_if->dev = &pciedev->pci_dev->dev;
1091 bus_if->ops = &aicwf_pcie_bus_ops;
1092 bus_if->state = BUS_UP_ST;
1093
1094 //struct aicwf_rx_priv* rx_priv;
1095 rx_priv = aicwf_rx_init(pciedev);
1096 if(!rx_priv) {
1097 txrx_err("rx init failed\n");
1098 //ret = -1;
1099 //goto out_free_bus;
1100 }
1101 pciedev->rx_priv = rx_priv;
1102
1103#ifdef CONFIG_TEMP_CONTROL
1104#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
1105 init_timer(&pciedev->tp_ctrl_timer);
1106 pciedev->tp_ctrl_timer.data = (ulong) pciedev;
1107 pciedev->tp_ctrl_timer.function = aicwf_temp_ctrl_timer;
1108 init_timer(&pciedev->netif_timer);
1109 pciedev->netif_timer.data = (ulong) pciedev;
1110 pciedev->netif_timer.function = aicwf_netif_timer;
1111#else
1112 timer_setup(&pciedev->tp_ctrl_timer, aicwf_temp_ctrl_timer, 0);
1113 timer_setup(&pciedev->netif_timer, aicwf_netif_timer, 0);
1114#endif
1115 INIT_WORK(&pciedev->tp_ctrl_work, aicwf_temp_ctrl_worker);
1116 INIT_WORK(&pciedev->netif_work, aicwf_netif_worker);
1117 mod_timer(&pciedev->tp_ctrl_timer, jiffies + msecs_to_jiffies(TEMP_GET_INTERVAL));
1118 spin_lock_init(&pciedev->tm_lock);
1119 pciedev->net_stop = false;;
1120 pciedev->on_off = true;
1121 pciedev->cur_temp = 0;
1122 pciedev->get_level = 0;
1123 pciedev->set_level = 0;
1124 pciedev->interval_t1 = TMR_INTERVAL_1;
1125 pciedev->interval_t2 = TMR_INTERVAL_2;
1126 pciedev->cur_stat = 0;
1127 pciedev->tp_thd_1 = TEMP_THD_1;
1128 pciedev->tp_thd_2 = TEMP_THD_2;
1129 pciedev->tm_start = 1;
1130#endif
1131
1132 ret = aicwf_bus_init(0, &pciedev->pci_dev->dev);
1133 if(ret)
1134 printk("%s fail\n", __func__);
1135}
1136
1137#endif
1138