2 ******************************************************************************
\r
6 * @brief sdio core function definitions
\r
8 * Copyright (C) ESWIN 2015-2020
\r
10 ******************************************************************************
\r
12 #include <linux/firmware.h>
\r
13 #include <linux/kthread.h>
\r
16 #include <uapi/linux/sched/types.h>
\r
17 //#include "debug.h"
\r
18 #include "ecrnx_platform.h"
\r
20 #include "ecrnx_rx.h"
\r
21 #include "sdio_host_interface.h"
\r
22 #include "eswin_utils.h"
\r
25 module_param(loopback, bool, S_IRUSR | S_IWUSR);
\r
26 MODULE_PARM_DESC(loopback, "HIF loopback");
\r
29 module_param(power_save, int, S_IRUSR | S_IWUSR);
\r
30 MODULE_PARM_DESC(power_save, "Power Save(0: disable, 1:enable)");
\r
32 int disable_cqm = 0;
\r
33 module_param(disable_cqm, int, S_IRUSR | S_IWUSR);
\r
34 MODULE_PARM_DESC(disable_cqm, "Disable CQM (0: disable, 1:enable)");
\r
37 int listen_interval = 0;
\r
38 module_param(listen_interval, int, S_IRUSR | S_IWUSR);
\r
39 MODULE_PARM_DESC(listen_interval, "Listen Interval");
\r
41 int bss_max_idle = 0;
\r
42 module_param(bss_max_idle, int, S_IRUSR | S_IWUSR);
\r
43 MODULE_PARM_DESC(bss_max_idle, "BSS Max Idle");
\r
47 module_param(dl_fw, bool, S_IRUSR | S_IWUSR);
\r
48 MODULE_PARM_DESC(dl_fw, "download firmware");
\r
51 #ifdef CONFIG_ECRNX_WIFO_CAIL
\r
53 module_param(amt_mode, bool, S_IRUSR | S_IWUSR);
\r
54 MODULE_PARM_DESC(amt_mode, "calibrate mode");
\r
58 module_param(set_gain, bool, S_IRUSR | S_IWUSR);
\r
59 MODULE_PARM_DESC(set_gain, "set gain delta");
\r
62 struct eswin *pEswin;
\r
64 module_param(fw_name, charp, S_IRUGO);
\r
65 MODULE_PARM_DESC(fw_name, "Firmware file name");
\r
68 static void eswin_fw_ready(struct sk_buff *skb, struct eswin * tr)
\r
70 struct ieee80211_hw *hw = tr->hw;
\r
71 struct wim_ready *ready;
\r
72 struct wim *wim = (struct wim *)skb->data;
\r
74 ECRNX_PRINT(" %s entry!!", __func__);
\r
75 ready = (struct wim_ready *) (wim + 1);
\r
77 ECRNX_PRINT(" %s -- version: 0x%x", __func__, ready->v.version);
\r
78 ECRNX_PRINT(" %s -- rx_head_size: %d", __func__, ready->v.rx_head_size);
\r
79 ECRNX_PRINT(" %s -- tx_head_size: %d", __func__, ready->v.tx_head_size);
\r
80 ECRNX_PRINT(" %s -- buffer_size: %d", __func__, ready->v.buffer_size);
\r
82 tr->fwinfo.ready = 2;
\r
83 tr->fwinfo.version = ready->v.version;
\r
84 tr->fwinfo.rx_head_size = ready->v.rx_head_size;
\r
85 tr->fwinfo.tx_head_size = ready->v.tx_head_size;
\r
86 tr->fwinfo.payload_align = ready->v.payload_align;
\r
87 tr->fwinfo.buffer_size = ready->v.buffer_size;
\r
89 ECRNX_PRINT(" %s -- cap_mask: 0x%llx", __func__, ready->v.cap.cap);
\r
90 ECRNX_PRINT(" %s -- cap_li: %d, %d", __func__, ready->v.cap.listen_interval, listen_interval);
\r
91 ECRNX_PRINT(" %s -- cap_idle: %d, %d", __func__, ready->v.cap.bss_max_idle, bss_max_idle);
\r
93 tr->cap.cap_mask = ready->v.cap.cap;
\r
94 tr->cap.listen_interval = ready->v.cap.listen_interval;
\r
95 tr->cap.bss_max_idle = ready->v.cap.bss_max_idle;
\r
97 if (listen_interval) {
\r
98 hw->max_listen_interval = listen_interval;
\r
99 tr->cap.listen_interval = listen_interval;
\r
102 if (bss_max_idle) {
\r
103 tr->cap.bss_max_idle = bss_max_idle;
\r
106 dev_kfree_skb(skb);
\r
107 ECRNX_PRINT(" %s exit!!", __func__);
\r
110 static unsigned int sdio_tx_packets = 0;
111 static struct timer_list sdio_tx_timer = {0};
113 #define SDIO_TX_TIMER_TIMEOUT_US (200)
115 void sdio_tx_queue_init(struct tx_buff_queue * queue)
\r
117 queue->head = NULL;
\r
118 queue->tail = NULL;
\r
120 spin_lock_init(&queue->lock);
\r
123 void sdio_tx_queue_push(struct tx_buff_queue * queue, struct tx_buff_node *node)
\r
125 unsigned long flags;
\r
127 spin_lock_irqsave(&queue->lock, flags);
\r
129 queue->tail->next = node;
\r
131 queue->head = node;
\r
134 queue->tail = node;;
\r
135 queue->tail->next = NULL;
\r
138 spin_unlock_irqrestore(&queue->lock, flags);
\r
140 //ECRNX_PRINT(" queue push count: %d\n", queue->count);
\r
141 //ECRNX_PRINT(" queue push head: %#x\n", queue->head);
\r
144 struct tx_buff_node *sdio_tx_queue_pop(struct tx_buff_queue *queue)
\r
146 unsigned long flags;
\r
147 struct tx_buff_node *res = NULL;
\r
149 //ECRNX_PRINT(" queue pop count: %d\n", queue->count);
\r
150 //ECRNX_PRINT(" queue pop head: %#x\n", queue->head);
\r
152 spin_lock_irqsave(&queue->lock, flags);
\r
154 if (queue->count) {
\r
156 queue->head = res->next;
\r
161 spin_unlock_irqrestore(&queue->lock, flags);
\r
165 struct tx_buff_node *sdio_tx_queue_peek(struct tx_buff_queue *queue)
\r
167 unsigned long flags;
\r
168 struct tx_buff_node *res = NULL;
\r
170 spin_lock_irqsave(&queue->lock, flags);
\r
172 if (queue->count) {
\r
176 spin_unlock_irqrestore(&queue->lock, flags);
\r
180 struct tx_buff_node * sdio_tx_node_alloc(struct eswin *tr)
\r
182 struct tx_buff_node * res;
\r
183 unsigned long flags;
\r
185 spin_lock_irqsave(&tr->tx_lock,flags);
\r
186 res = tr->tx_node_head;
\r
189 spin_unlock_irqrestore(&tr->tx_lock, flags);
\r
192 tr->tx_node_head = tr->tx_node_head->next;
\r
195 spin_unlock_irqrestore(&tr->tx_lock, flags);
\r
200 void sdio_tx_node_free(struct eswin *tr, struct tx_buff_node * node)
\r
202 unsigned long flags;
\r
203 spin_lock_irqsave(&tr->tx_lock,flags);
\r
206 node->next = tr->tx_node_head;
\r
207 tr->tx_node_head = node;
\r
209 spin_unlock_irqrestore(&tr->tx_lock, flags);
\r
212 void sdio_tx_pkg_queue_init(struct tx_buff_pkg_queue * queue)
\r
214 queue->head = NULL;
\r
215 queue->tail = NULL;
\r
217 spin_lock_init(&queue->lock);
\r
220 void sdio_tx_pkg_queue_push(struct tx_buff_pkg_queue * queue, struct tx_buff_pkg_node *node)
\r
222 unsigned long flags;
\r
224 spin_lock_irqsave(&queue->lock, flags);
\r
226 queue->tail->next = node;
\r
228 queue->head = node;
\r
231 queue->tail = node;;
\r
232 queue->tail->next = NULL;
\r
235 spin_unlock_irqrestore(&queue->lock, flags);
\r
238 struct tx_buff_pkg_node *sdio_tx_pkg_queue_pop(struct tx_buff_pkg_queue *queue)
\r
240 unsigned long flags;
\r
241 struct tx_buff_pkg_node *res = NULL;
\r
243 spin_lock_irqsave(&queue->lock, flags);
\r
245 if (queue->count) {
\r
247 queue->head = res->next;
\r
252 spin_unlock_irqrestore(&queue->lock, flags);
\r
256 struct tx_buff_pkg_node * sdio_tx_pkg_node_alloc(struct eswin *tr)
\r
258 struct tx_buff_pkg_node * res;
\r
259 unsigned long flags;
\r
261 spin_lock_irqsave(&tr->tx_pkg_lock,flags);
\r
262 res = tr->tx_pkg_node_head;
\r
265 spin_unlock_irqrestore(&tr->tx_pkg_lock, flags);
\r
268 tr->tx_pkg_node_head = tr->tx_pkg_node_head->next;
\r
270 tr->tx_pkg_node_num--;
\r
271 spin_unlock_irqrestore(&tr->tx_pkg_lock, flags);
\r
276 void sdio_tx_pkg_node_free(struct eswin *tr, struct tx_buff_pkg_node * node)
\r
278 unsigned long flags;
\r
280 spin_lock_irqsave(&tr->tx_pkg_lock,flags);
\r
281 if((node->flag & FLAG_MSG_TYPE_MASK) == TX_FLAG_TX_DESC)
\r
286 node->next = tr->tx_pkg_node_head;
\r
287 tr->tx_pkg_node_head = node;
\r
288 tr->tx_pkg_node_num++;
\r
290 if (node->node_cnt > 1)
\r
292 printk("%s,count :%d\n",__func__,node->node_cnt);
\r
296 for (i = 0; i < node->node_cnt; ++i)
\r
298 sdio_tx_node_free(tr, node->tx_node[i]);
\r
300 spin_unlock_irqrestore(&tr->tx_pkg_lock, flags);
\r
304 void eswin_sdio_register_rx_cb(struct eswin *tr, sdio_rx_cb_t cb)
\r
306 tr->rx_callback = cb;
\r
309 extern int ecrnx_data_cfm_callback(void *priv, void *host_id);
\r
310 extern int ecrnx_msg_cfm_callback(void *priv, void *host_id);
\r
311 static void eswin_core_register_work(struct work_struct *work)
\r
313 //struct sk_buff *skb_resp;
\r
315 struct eswin *tr = container_of(work, struct eswin, register_work.work);
\r
317 ECRNX_PRINT(" %s entry, dl_fw = %d!!", __func__, dl_fw);
\r
319 if (dl_fw && eswin_fw_file_chech(tr)) {
\r
320 eswin_fw_file_download(tr);
\r
321 release_firmware(tr->fw);
\r
323 schedule_delayed_work(&tr->register_work, msecs_to_jiffies(1000));
\r
327 #ifdef CONFIG_ECRNX_WIFO_CAIL
\r
328 ECRNX_PRINT(" %s entry, amt_mode = %d!!", __func__, amt_mode);
\r
331 tr->rx_callback = ecrnx_rx_callback;
\r
332 tr->data_cfm_callback = ecrnx_data_cfm_callback;
\r
333 tr->msg_cfm_callback = ecrnx_msg_cfm_callback;
\r
335 ret = ecrnx_platform_init(tr, &tr->umac_priv);
\r
336 set_bit(ESWIN_FLAG_CORE_REGISTERED, &tr->dev_flags);
\r
337 ECRNX_DBG("%s exit!!", __func__);
\r
342 int eswin_core_register(struct eswin *tr)
\r
344 ECRNX_PRINT("%s entry!!", __func__);
\r
345 tr->ops->start(tr);
\r
347 //schedule_delayed_work(&tr->register_work, msecs_to_jiffies(10));
\r
348 schedule_delayed_work(&tr->register_work, msecs_to_jiffies(1));
\r
349 ECRNX_PRINT("%s exit!!", __func__);
\r
353 void eswin_core_unregister(struct eswin *tr)
\r
355 struct eswin_sdio *tr_sdio = (struct eswin_sdio *)tr->drv_priv;
\r
356 ECRNX_PRINT("%s entry!!", __func__);
\r
358 cancel_delayed_work(&tr->register_work);
\r
360 if (!test_bit(ESWIN_FLAG_CORE_REGISTERED, &tr->dev_flags))
\r
363 ecrnx_platform_deinit(tr->umac_priv);
\r
366 static int eswin_sdio_tx_thread(void *data)
368 struct eswin *tr = (struct eswin *)data;
369 struct tx_buff_pkg_node *node;
\r
370 int i, ret = 0, cb_per = 0;
\r
372 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
373 struct sched_param param = { .sched_priority = 1 };
374 param.sched_priority = 56;
375 sched_setscheduler(get_current(), SCHED_FIFO, ¶m);
377 sched_set_fifo(get_current());
379 ECRNX_PRINT("sdio pkg thread entry\n");
\r
381 while (!kthread_should_stop())
383 ret = wait_event_interruptible(tr->wait_tx, tr->tx_pkg_queue.count != 0 || kthread_should_stop());
386 ECRNX_ERR("sdio pkg thread error!\n");
\r
389 if(kthread_should_stop())
\r
393 while (tr->tx_pkg_queue.count != 0)
395 node = sdio_tx_pkg_queue_pop(&tr->tx_pkg_queue);
\r
398 wake_up_interruptible(&tr->wait_cb);
\r
402 if (tr->ops->xmit) {
\r
403 ret = tr->ops->xmit(tr, node);
\r
405 if((node->flag & FLAG_MSG_TYPE_MASK) == TX_FLAG_TX_DESC || (node->flag & FLAG_MSG_TYPE_MASK) == TX_FLAG_MSG_E)
\r
407 sdio_tx_pkg_queue_push(&tr->tx_c_queue,node);
\r
411 sdio_tx_pkg_node_free(tr, node);
\r
414 //if (cb_per % 4 == 0)
\r
417 wake_up_interruptible(&tr->wait_cb);
\r
420 ECRNX_ERR(" eswin_sdio_work, ops->xmit is null\n");
\r
424 ECRNX_PRINT("sdio tx thread exit\n");
\r
428 static int eswin_sdio_callback_thread(void *data)
430 struct eswin *tr = (struct eswin *)data;
431 struct tx_buff_pkg_node *node;
\r
433 struct txdesc_api *tx_desc;
\r
436 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
437 struct sched_param param = { .sched_priority = 1 };
438 param.sched_priority = 56;
439 sched_setscheduler(get_current(), SCHED_FIFO, ¶m);
441 sched_set_fifo(get_current());
443 ECRNX_PRINT("sdio callback thread entry\n");
\r
445 while (!kthread_should_stop())
447 ret = wait_event_interruptible(tr->wait_cb, tr->tx_c_queue.count != 0 || kthread_should_stop());
450 ECRNX_ERR("sdio callback thread error!\n");
\r
453 if(kthread_should_stop())
\r
457 while (tr->tx_c_queue.count != 0)
459 node = sdio_tx_pkg_queue_pop(&tr->tx_c_queue);
\r
460 if((node->flag & FLAG_MSG_TYPE_MASK) == TX_FLAG_TX_DESC && (tr->data_cfm_callback))
\r
462 for (i = 0; i < node->node_cnt; ++i)
\r
464 tx_desc = (struct txdesc_api *)node->tx_node[i]->buff;
\r
465 if (tx_desc->host.flags & TXU_CNTRL_MGMT)
\r
469 memcpy(&host_id, tx_desc->host.packet_addr, sizeof(ptr_addr));
\r
470 tr->data_cfm_callback(tr->umac_priv, (void*)host_id);
\r
473 //else if((node->flag & FLAG_MSG_TYPE_MASK) == TX_FLAG_MSG_E && (tr->msg_cfm_callback))
\r
475 // for (i = 0; i < node->node_cnt; ++i)
\r
477 // struct ecrnx_cmd_a2emsg *msg = (struct ecrnx_cmd_a2emsg *)node->tx_node[i]->buff;
\r
478 // tr->msg_cfm_callback(tr->umac_priv, msg->hostid);
\r
481 sdio_tx_pkg_node_free(tr, node);
\r
484 ECRNX_PRINT("rx callback thread exit\n");
\r
488 static int eswin_sdio_tx_pkg_thread(void *data)
490 struct eswin *tr = (struct eswin *)data;
491 struct tx_buff_node *node;
\r
492 struct txdesc_api *tx_desc;
\r
493 struct tx_buff_pkg_node * pkg_node = NULL;
\r
494 struct tx_buff_pkg_head tx_pkg_head;
\r
495 unsigned int offset = 0;
\r
496 int i, ret, pkg_cnt = 0;
\r
498 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
499 struct sched_param param = { .sched_priority = 1 };
500 param.sched_priority = 56;
501 sched_setscheduler(get_current(), SCHED_FIFO, ¶m);
503 sched_set_fifo(get_current());
505 ECRNX_PRINT("sdio tx pkg thread entry\n");
\r
507 while (!kthread_should_stop())
509 ret = wait_event_interruptible(tr->wait_pkg, tr->tx_queue.count != 0 || kthread_should_stop());
512 ECRNX_ERR("sdio tx pkg thread error!\n");
\r
515 if(kthread_should_stop())
\r
519 while (tr->tx_queue.count != 0)
523 memset(&tx_pkg_head,0,sizeof(tx_pkg_head));
\r
524 pkg_node = sdio_tx_pkg_node_alloc(tr);
\r
525 memset(pkg_node,0,sizeof(struct tx_buff_pkg_node));
\r
527 ECRNX_PRINT(" sdio pkg failed, no node!!\n");
\r
530 node = sdio_tx_queue_peek(&tr->tx_queue);
\r
531 pkg_node->flag = node->flag;
\r
533 if((node->flag & FLAG_MSG_TYPE_MASK) != TX_FLAG_TX_DESC)
\r
535 node = sdio_tx_queue_pop(&tr->tx_queue);
\r
536 pkg_node->buff = node->buff;
\r
537 pkg_node->len = node->len;
\r
538 pkg_node->tx_node[pkg_cnt] = node;
\r
543 pkg_node->buff = (void *)kzalloc(ALIGN(SDIO_PKG_MAX_DATA*SDIO_PKG_MAX_CNT + sizeof(tx_pkg_head), 512), GFP_ATOMIC);
\r
544 if(!pkg_node->buff){
\r
545 ECRNX_PRINT("pkg_node buff malloc error! \n");
\r
547 pkg_node->len = sizeof(tx_pkg_head);
\r
549 while (tr->tx_queue.count)
\r
551 offset = pkg_node->len;
\r
552 node = sdio_tx_queue_peek(&tr->tx_queue);
\r
554 if (((node->flag & FLAG_MSG_TYPE_MASK) != TX_FLAG_TX_DESC) || (pkg_cnt > (SDIO_PKG_MAX_CNT-1)))
\r
558 //ECRNX_DBG("tx count 2 %d,node %x",tr->tx_queue.count,node);
\r
559 node = sdio_tx_queue_pop(&tr->tx_queue);
\r
560 if(ALIGN(node->len, SDIO_PKG_PAD_GRN) < (SDIO_PKG_DIV_MSZ+1))
\r
562 pkg_node->len += ALIGN(node->len, SDIO_PKG_PAD_GRN);
\r
563 pkg_node->flag |= (ALIGN(node->len, SDIO_PKG_PAD_GRN)/SDIO_PKG_PAD_GRN) << (8+SDIO_PKG_BIT_SHIFT*pkg_cnt);
\r
567 pkg_node->len += SDIO_PKG_MAX_DATA;
\r
568 pkg_node->flag |= ((1 << SDIO_PKG_BIT_SHIFT) - 1) << (8+SDIO_PKG_BIT_SHIFT*pkg_cnt);
\r
570 memcpy(pkg_node->buff + offset, node->buff, node->len);
\r
571 pkg_node->tx_node[pkg_cnt] = node;
\r
572 tx_pkg_head.len[pkg_cnt] = node->len;
\r
575 pkg_node->len = ALIGN(pkg_node->len, 512);
\r
576 memcpy(pkg_node->buff, &tx_pkg_head, sizeof(tx_pkg_head));
\r
578 pkg_node->node_cnt = pkg_cnt;
\r
579 sdio_tx_pkg_queue_push(&tr->tx_pkg_queue, pkg_node);
\r
580 wake_up_interruptible(&tr->wait_tx);
\r
583 ECRNX_PRINT("tx pkg thread exit\n");
\r
587 void eswin_sdio_ops_init(struct eswin * tr, const struct sdio_ops * ops)
\r
593 sdio_tx_queue_init(&tr->tx_queue);
\r
594 sdio_tx_pkg_queue_init(&tr->tx_c_queue);
\r
595 sdio_tx_pkg_queue_init(&tr->tx_pkg_queue);
\r
597 for (i=1; i<ESWIN_TX_NODE_CNT; i++) {
\r
598 tr->tx_node[i-1].next = &tr->tx_node[i];
\r
601 tr->tx_node[i-1].next = NULL;
\r
602 tr->tx_node_head = &tr->tx_node[0];
\r
603 tr->tx_node_num = ESWIN_TX_NODE_CNT;
\r
604 spin_lock_init(&tr->tx_lock);
\r
606 for (i=1; i<ESWIN_TX_NODE_CNT; i++) {
\r
607 tr->tx_pkg_node[i-1].next = &tr->tx_pkg_node[i];
\r
610 tr->tx_pkg_node[i-1].next = NULL;
\r
611 tr->tx_pkg_node_head = &tr->tx_pkg_node[0];
\r
612 tr->tx_pkg_node_num = ESWIN_TX_NODE_CNT;
\r
613 spin_lock_init(&tr->tx_pkg_lock);
\r
616 int tx_desc_count = 0;
\r
617 int sdio_host_send(void *buff, int len, int flag)
\r
619 struct eswin * tr= pEswin;
\r
620 struct tx_buff_node * node = sdio_tx_node_alloc(tr);
\r
622 ECRNX_DBG("%s enter, data len :%d ", __func__, len);
\r
625 ECRNX_PRINT(" sdio send failed, no node!!\n");
\r
629 node->buff = (struct lmac_msg *)kzalloc(len, GFP_ATOMIC);
\r
631 ECRNX_PRINT("buff malloc error! \n");
\r
634 memcpy(node->buff, buff, len);
\r
636 node->flag = flag & 0xFF;
\r
638 if ((len > 512) && (len%512) && ((node->flag & FLAG_MSG_TYPE_MASK) != TX_FLAG_TX_DESC)) {
\r
639 node->flag |= (len%512)<<8;
\r
645 sdio_tx_queue_push(&tr->tx_queue, node);
\r
647 if((node->flag & FLAG_MSG_TYPE_MASK) != TX_FLAG_TX_DESC)
\r
650 //queue_work(tr->workqueue_pkg,&tr->work_pkg);
\r
651 wake_up_interruptible(&tr->wait_pkg);
\r
656 if(tx_desc_count%SDIO_PKG_MAX_CNT == 0)
\r
658 //queue_work(tr->workqueue_pkg,&tr->work_pkg);
\r
659 wake_up_interruptible(&tr->wait_pkg);
\r
663 mod_timer(&sdio_tx_timer, jiffies + usecs_to_jiffies(SDIO_TX_TIMER_TIMEOUT_US));
\r
669 void sdio_tx_timer_handle(struct timer_list *time)
671 struct eswin * tr = pEswin;
\r
675 wake_up_interruptible(&tr->wait_pkg);
\r
679 extern void ecrnx_send_handle_register(void * fn);
\r
681 struct eswin * eswin_core_create(size_t priv_size, struct device *dev,
\r
682 const struct sdio_ops * ops)
\r
686 tr = (struct eswin *)kzalloc(sizeof(struct eswin) + priv_size, GFP_KERNEL);
\r
694 tr->loopback = loopback;
\r
695 //tr->loopback = 1;
\r
696 eswin_sdio_ops_init(tr, ops);
\r
697 ecrnx_send_handle_register(sdio_host_send);
\r
699 //init_completion(&tr->wim_responded);
\r
700 init_waitqueue_head(&tr->wait_pkg);
\r
701 init_waitqueue_head(&tr->wait_tx);
702 init_waitqueue_head(&tr->wait_cb);
\r
704 tr->kthread_pkg = kthread_run(eswin_sdio_tx_pkg_thread, tr, "sdio-tx-pkg");
705 tr->kthread_tx = kthread_run(eswin_sdio_tx_thread, tr, "sdio-tx");
\r
706 tr->kthread_cb = kthread_run(eswin_sdio_callback_thread, tr, "sdio-tx-callback");
\r
708 INIT_DELAYED_WORK(&tr->register_work, eswin_core_register_work);
\r
709 timer_setup(&sdio_tx_timer, sdio_tx_timer_handle, 0);
\r
711 tr->state = ESWIN_STATE_INIT;
\r
713 //eswin_init_debugfs(tr);
\r
715 ECRNX_PRINT(" %s exit!!", __func__);
\r
719 eswin_core_destroy(tr);
\r
723 void eswin_core_destroy(struct eswin *tr)
\r
725 unsigned long flags;
\r
728 ECRNX_PRINT("%s entry!!", __func__);
\r
729 tr->state = ESWIN_STATE_CLOSEED;
\r
732 //flush_workqueue(tr->workqueue);
\r
733 //destroy_workqueue(tr->workqueue);
\r
734 //tr->workqueue = NULL;
\r
736 ECRNX_PRINT("%s node_num %d\n", __func__, tr->tx_node_num);
\r
737 spin_lock_irqsave(&tr->tx_lock,flags);
\r
738 for (i=0; i<64; i++)
\r
740 if (tr->tx_node[i].buff)
\r
742 kfree(tr->tx_node[i].buff);
\r
745 spin_unlock_irqrestore(&tr->tx_lock, flags);
\r
747 spin_lock_irqsave(&tr->tx_pkg_lock,flags);
\r
748 for (i=0; i<64; i++)
\r
750 if (tr->tx_pkg_node[i].buff)
\r
752 kfree(tr->tx_pkg_node[i].buff);
\r
755 spin_unlock_irqrestore(&tr->tx_pkg_lock, flags);
\r
757 kthread_stop(tr->kthread_pkg);
\r
758 wake_up_interruptible(&tr->wait_pkg);
\r
759 kthread_stop(tr->kthread_cb);
\r
760 wake_up_interruptible(&tr->wait_cb);
\r
761 kthread_stop(tr->kthread_tx);
\r
762 wake_up_interruptible(&tr->wait_tx);
\r
767 //eswin_mac_destroy(tr);
\r
768 ECRNX_PRINT("%s exit!!", __func__);
\r
772 //MODULE_AUTHOR("Transa-Semi");
\r
773 //MODULE_LICENSE("Dual BSD/GPL");
\r
774 //MODULE_DESCRIPTION("Core module for Transa-Semi 802.11 WLAN SDIO driver");
\r