4 * IPC utility function definitions
6 * Copyright (C) ESWIN 2015-2020
8 #include "ecrnx_utils.h"
9 #include "ecrnx_defs.h"
12 #include "ecrnx_msg_rx.h"
13 #include "ecrnx_debugfs.h"
14 #include "ecrnx_prof.h"
17 #ifdef CONFIG_ECRNX_ESWIN_SDIO
18 #include "eswin_utils.h"
19 #include "ecrnx_sdio.h"
21 #elif defined(CONFIG_ECRNX_ESWIN_USB)
22 #include "eswin_utils.h"
23 #include "ecrnx_usb.h"
29 * ecrnx_ipc_elem_pool_allocs() - Allocate and push to fw a pool of buffer.
31 * @ecrnx_hw: Main driver structure
32 * @pool: Pool to allocate
33 * @nb: Size of the pool to allocate
34 * @elem_size: SIze of one pool element
35 * @pool_name: Name of the pool
36 * @push: Function to push one pool element to fw
38 * This function will allocate an array to store the list of element addresses,
39 * a dma pool and @nb element in the dma pool.
40 * Each element is set with '0' and then push to fw using the @push function.
41 * It assumes that pointer inside @ipc parameter are set to NULL at start.
43 * Return: 0 on success and <0 upon error. If error is returned any allocated
44 * memory is NOT freed and ecrnx_ipc_elem_pool_deallocs() must be called.
46 #ifndef CONFIG_ECRNX_ESWIN
47 static int ecrnx_ipc_elem_pool_allocs(struct ecrnx_hw *ecrnx_hw,
48 struct ecrnx_ipc_elem_pool *pool,
49 int nb, size_t elem_size, char *pool_name,
50 int (*push)(struct ipc_host_env_tag *,
53 struct ecrnx_ipc_elem *buf;
58 /* allocate buf array */
59 pool->buf = kmalloc(nb * sizeof(struct ecrnx_ipc_elem), GFP_KERNEL);
61 dev_err(ecrnx_hw->dev, "Allocation of buffer array for %s failed\n",
66 /* allocate dma pool */
67 pool->pool = dma_pool_create(pool_name, ecrnx_hw->dev, elem_size,
68 cache_line_size(), 0);
70 dev_err(ecrnx_hw->dev, "Allocation of dma pool %s failed\n",
75 for (i = 0, buf = pool->buf; i < nb; buf++, i++) {
77 /* allocate an elem */
78 buf->addr = dma_pool_alloc(pool->pool, GFP_KERNEL, &buf->dma_addr);
80 dev_err(ecrnx_hw->dev, "Allocation of block %d/%d in %s failed\n",
81 (i + 1), nb, pool_name);
86 /* reset the element */
87 memset(buf->addr, 0, elem_size);
90 push(ecrnx_hw->ipc_env, buf, (uint32_t)buf->dma_addr);
97 * ecrnx_ipc_elem_pool_deallocs() - Free all memory allocated for a pool
101 * Must be call once after ecrnx_ipc_elem_pool_allocs(), even if it returned
104 #ifndef CONFIG_ECRNX_ESWIN
105 static void ecrnx_ipc_elem_pool_deallocs(struct ecrnx_ipc_elem_pool *pool)
107 struct ecrnx_ipc_elem *buf;
110 for (i = 0, buf = pool->buf; i < pool->nb ; buf++, i++) {
111 dma_pool_free(pool->pool, buf->addr, buf->dma_addr);
116 dma_pool_destroy(pool->pool);
125 * ecrnx_ipc_elem_var_allocs - Alloc a single ipc buffer and push it to fw
127 * @ecrnx_hw: Main driver structure
128 * @elem: Element to allocate
129 * @elem_size: Size of the element to allcoate
130 * @dir: DMA direction
131 * @buf: If not NULL, used this buffer instead of allocating a new one. It must
132 * be @elem_size long and be allocated by kmalloc as kfree will be called.
133 * @init: Pointer to initial data to write in buffer before DMA sync. Needed
134 * only if direction is DMA_TO_DEVICE. If set it is assume that its size is
136 * @push: Function to push the element to fw. May be set to NULL.
138 * It allocates a buffer (or use the one provided with @buf), initializes it if
139 * @init is set, map buffer for DMA transfer, initializes @elem and push buffer
140 * to FW if @push is seet.
142 * Return: 0 on success and <0 upon error. If error is returned any allocated
143 * memory has been freed (including @buf if set).
145 int ecrnx_ipc_elem_var_allocs(struct ecrnx_hw *ecrnx_hw,
146 struct ecrnx_ipc_elem_var *elem, size_t elem_size,
147 enum dma_data_direction dir,
148 void *buf, const void *init,
149 void (*push)(struct ipc_host_env_tag *, uint32_t))
154 elem->addr = kmalloc(elem_size, GFP_KERNEL);
156 dev_err(ecrnx_hw->dev, "Allocation of ipc buffer failed\n");
160 elem->size = elem_size;
162 if ((dir == DMA_TO_DEVICE) && init) {
163 memcpy(elem->addr, init, elem_size);
166 #ifdef CONFIG_ECRNX_ESWIN
167 elem->dma_addr = (ptr_addr)elem->addr;
169 elem->dma_addr = dma_map_single(ecrnx_hw->dev, elem->addr, elem_size, dir);
170 if (dma_mapping_error(ecrnx_hw->dev, elem->dma_addr)) {
171 dev_err(ecrnx_hw->dev, "DMA mapping failed\n");
178 push(ecrnx_hw->ipc_env, elem->dma_addr);
184 * ecrnx_ipc_elem_var_deallocs() - Free memory allocated for a single ipc buffer
186 * @ecrnx_hw: Main driver structure
187 * @elem: Element to free
189 void ecrnx_ipc_elem_var_deallocs(struct ecrnx_hw *ecrnx_hw,
190 struct ecrnx_ipc_elem_var *elem)
194 #ifndef CONFIG_ECRNX_ESWIN
195 dma_unmap_single(ecrnx_hw->dev, elem->dma_addr, elem->size, DMA_TO_DEVICE);
202 * ecrnx_ipc_skb_elem_allocs() - Allocate and push a skb buffer for the FW
204 * @ecrnx_hw: Main driver data
205 * @elem: Pointer to the skb elem that will contain the address of the buffer
207 int ecrnx_ipc_skb_elem_allocs(struct ecrnx_hw *ecrnx_hw,
208 struct ecrnx_ipc_skb_elem *elem, size_t skb_size,
209 enum dma_data_direction dir,
210 int (*push)(struct ipc_host_env_tag *,
213 elem->skb = dev_alloc_skb(skb_size);
214 if (unlikely(!elem->skb)) {
215 dev_err(ecrnx_hw->dev, "Allocation of ipc skb failed\n");
219 elem->dma_addr = dma_map_single(ecrnx_hw->dev, elem->skb->data, skb_size, dir);
220 if (unlikely(dma_mapping_error(ecrnx_hw->dev, elem->dma_addr))) {
221 dev_err(ecrnx_hw->dev, "DMA mapping failed\n");
222 dev_kfree_skb(elem->skb);
228 push(ecrnx_hw->ipc_env, elem, elem->dma_addr);
234 * ecrnx_ipc_skb_elem_deallocs() - Free a skb buffer allocated for the FW
236 * @ecrnx_hw: Main driver data
237 * @elem: Pointer to the skb elem that contains the address of the buffer
238 * @skb_size: size of the skb buffer data
239 * @dir: DMA direction
241 #ifndef CONFIG_ECRNX_ESWIN
242 static void ecrnx_ipc_skb_elem_deallocs(struct ecrnx_hw *ecrnx_hw,
243 struct ecrnx_ipc_skb_elem *elem,
244 size_t skb_size, enum dma_data_direction dir)
247 dma_unmap_single(ecrnx_hw->dev, elem->dma_addr, skb_size, dir);
248 dev_kfree_skb(elem->skb);
254 * ecrnx_ipc_unsup_rx_vec_elem_allocs() - Allocate and push an unsupported
255 * RX vector buffer for the FW
257 * @ecrnx_hw: Main driver data
258 * @elem: Pointer to the skb elem that will contain the address of the buffer
260 #ifndef CONFIG_ECRNX_ESWIN
261 int ecrnx_ipc_unsup_rx_vec_elem_allocs(struct ecrnx_hw *ecrnx_hw,
262 struct ecrnx_ipc_skb_elem *elem)
264 struct rx_vector_desc *rxdesc;
266 if (ecrnx_ipc_skb_elem_allocs(ecrnx_hw, elem,
267 ecrnx_hw->ipc_env->unsuprxvec_bufsz, DMA_FROM_DEVICE, NULL))
270 rxdesc = (struct rx_vector_desc *) elem->skb->data;
272 dma_sync_single_for_device(ecrnx_hw->dev,
273 elem->dma_addr + offsetof(struct rx_vector_desc, pattern),
274 sizeof(rxdesc->pattern), DMA_BIDIRECTIONAL);
276 ipc_host_unsup_rx_vec_buf_push(ecrnx_hw->ipc_env, elem, (u32) elem->dma_addr);
283 * ecrnx_ipc_rxbuf_elems_deallocs() - Free all unsupported rx vector buffer
284 * allocated for the FW
286 * @ecrnx_hw: Main driver data
288 #ifndef CONFIG_ECRNX_ESWIN
289 static void ecrnx_ipc_unsup_rx_vec_elems_deallocs(struct ecrnx_hw *ecrnx_hw)
291 struct ecrnx_ipc_skb_elem *elem;
292 int i, nb = ecrnx_hw->ipc_env->unsuprxvec_bufnb;
294 if (!ecrnx_hw->e2aunsuprxvec_elems)
297 for (i = 0, elem = ecrnx_hw->e2aunsuprxvec_elems; i < nb; i++, elem++) {
298 ecrnx_ipc_skb_elem_deallocs(ecrnx_hw, elem, ecrnx_hw->ipc_env->unsuprxvec_bufsz, DMA_FROM_DEVICE);
301 kfree(ecrnx_hw->e2aunsuprxvec_elems);
302 ecrnx_hw->e2aunsuprxvec_elems = NULL;
307 * ecrnx_ipc_unsup_rx_vec_elems_allocs() - Allocate and push all unsupported RX
308 * vector buffer for the FW
310 * @ecrnx_hw: Main driver data
312 #ifndef CONFIG_ECRNX_ESWIN
313 static int ecrnx_ipc_unsup_rx_vec_elems_allocs(struct ecrnx_hw *ecrnx_hw)
315 struct ecrnx_ipc_skb_elem *elem;
316 int i, nb = ecrnx_hw->ipc_env->unsuprxvec_bufnb;
318 ecrnx_hw->e2aunsuprxvec_elems = kzalloc(nb * sizeof(struct ecrnx_ipc_skb_elem),
320 if (!ecrnx_hw->e2aunsuprxvec_elems) {
321 dev_err(ecrnx_hw->dev, "Failed to allocate unsuprxvec_elems\n");
325 for (i = 0, elem = ecrnx_hw->e2aunsuprxvec_elems; i < nb; i++, elem++)
327 if (ecrnx_ipc_unsup_rx_vec_elem_allocs(ecrnx_hw, elem)) {
328 dev_err(ecrnx_hw->dev, "Failed to allocate unsuprxvec buf %d/%d\n",
337 #ifdef CONFIG_ECRNX_SOFTMAC
339 * ecrnx_ipc_rxbuf_elem_allocs() - Allocate and push a rx buffer for the FW
341 * @ecrnx_hw: Main driver data
342 * @elem: Pointer to the skb elem that will contain the address of the buffer
344 #ifndef CONFIG_ECRNX_ESWIN
345 int ecrnx_ipc_rxbuf_elem_allocs(struct ecrnx_hw *ecrnx_hw,
346 struct ecrnx_ipc_skb_elem *elem)
348 struct hw_rxhdr *hw_rxhdr;
350 if (ecrnx_ipc_skb_elem_allocs(ecrnx_hw, elem,
351 ecrnx_hw->ipc_env->rx_bufsz, DMA_FROM_DEVICE, NULL))
354 hw_rxhdr = (struct hw_rxhdr *) elem->skb->data;
355 hw_rxhdr->pattern = 0;
356 dma_sync_single_for_device(ecrnx_hw->dev,
357 elem->dma_addr + offsetof(struct hw_rxhdr, pattern),
358 sizeof(hw_rxhdr->pattern), DMA_BIDIRECTIONAL);
360 ipc_host_rxbuf_push(ecrnx_hw->ipc_env, elem, (u32) elem->dma_addr);
367 * ecrnx_ipc_rxbuf_elem_repush() - Reset and repush an already allocated RX buffer
369 * @ecrnx_hw: Main driver data
370 * @elem: Pointer to the skb elem that contains the address of the buffer
372 #ifndef CONFIG_ECRNX_ESWIN
373 void ecrnx_ipc_rxbuf_elem_repush(struct ecrnx_hw *ecrnx_hw,
374 struct ecrnx_ipc_skb_elem *elem)
376 struct sk_buff *skb = elem->skb;
377 int pattern_offset = sizeof(struct hw_rxhdr);
379 ((struct hw_rxhdr *)skb->data)->pattern = 0;
380 dma_sync_single_for_device(ecrnx_hw->dev, elem->dma_addr,
381 pattern_offset, DMA_BIDIRECTIONAL);
382 ipc_host_rxbuf_push(ecrnx_hw->ipc_env, elem, (u32)elem->dma_addr);
387 * ecrnx_ipc_rxbuf_elems_allocs() - Allocate and push all RX buffer for the FW
389 * @ecrnx_hw: Main driver data
391 #ifndef CONFIG_ECRNX_ESWIN
392 static int ecrnx_ipc_rxbuf_elems_allocs(struct ecrnx_hw *ecrnx_hw)
394 struct ecrnx_ipc_skb_elem *elem;
395 int i, nb = ecrnx_hw->ipc_env->rx_bufnb;
397 ecrnx_hw->rxbuf_elems = kzalloc(nb * sizeof(struct ecrnx_ipc_skb_elem),
399 if (!ecrnx_hw->rxbuf_elems) {
400 dev_err(ecrnx_hw->dev, "Failed to allocate rx_elems\n");
404 for (i = 0, elem = ecrnx_hw->rxbuf_elems; i < nb; i++, elem++) {
405 if (ecrnx_ipc_rxbuf_elem_allocs(ecrnx_hw, elem)) {
406 dev_err(ecrnx_hw->dev, "Failed to allocate rx buf %d/%d\n",
417 * ecrnx_ipc_rxbuf_elems_deallocs() - Free all RX buffer allocated for the FW
419 * @ecrnx_hw: Main driver data
421 #ifndef CONFIG_ECRNX_ESWIN
422 static void ecrnx_ipc_rxbuf_elems_deallocs(struct ecrnx_hw *ecrnx_hw)
424 struct ecrnx_ipc_skb_elem *elem;
425 int i, nb = ecrnx_hw->ipc_env->rx_bufnb;
427 if (!ecrnx_hw->rxbuf_elems)
430 for (i = 0, elem = ecrnx_hw->rxbuf_elems; i < nb; i++, elem++) {
431 ecrnx_ipc_skb_elem_deallocs(ecrnx_hw, elem, ecrnx_hw->ipc_env->rx_bufsz, DMA_FROM_DEVICE);
434 kfree(ecrnx_hw->rxbuf_elems);
435 ecrnx_hw->rxbuf_elems = NULL;
439 #else /* ! CONFIG_ECRNX_SOFTMAC */
442 * ecrnx_ipc_rxdesc_elem_repush() - Repush a rxdesc to FW
444 * @ecrnx_hw: Main driver data
445 * @elem: Rx desc to repush
447 * Once rx buffer has been received, the rxdesc used by FW to upload this
448 * buffer can be re-used for another rx buffer.
450 #ifndef CONFIG_ECRNX_ESWIN
451 void ecrnx_ipc_rxdesc_elem_repush(struct ecrnx_hw *ecrnx_hw,
452 struct ecrnx_ipc_elem *elem)
454 struct rxdesc_tag *rxdesc = elem->addr;
456 dma_sync_single_for_device(ecrnx_hw->dev, elem->dma_addr,
457 sizeof(struct rxdesc_tag), DMA_BIDIRECTIONAL);
458 ipc_host_rxdesc_push(ecrnx_hw->ipc_env, elem, (u32)elem->dma_addr);
462 * ecrnx_ipc_rxbuf_elem_allocs() - Allocate and push a RX buffer for the FW
464 * @ecrnx_hw: Main driver data
466 #ifndef CONFIG_ECRNX_ESWIN
467 int ecrnx_ipc_rxbuf_elem_allocs(struct ecrnx_hw *ecrnx_hw)
470 struct hw_rxhdr *hw_rxhdr;
472 int size = ecrnx_hw->ipc_env->rx_bufsz;
475 skb = dev_alloc_skb(size);
476 if (unlikely(!skb)) {
477 dev_err(ecrnx_hw->dev, "Failed to allocate rx buffer\n");
481 dma_addr = dma_map_single(ecrnx_hw->dev, skb->data, size, DMA_FROM_DEVICE);
483 if (unlikely(dma_mapping_error(ecrnx_hw->dev, dma_addr))) {
484 dev_err(ecrnx_hw->dev, "Failed to map rx buffer\n");
488 hw_rxhdr = (struct hw_rxhdr *)skb->data;
489 hw_rxhdr->pattern = 0;
490 dma_sync_single_for_device(ecrnx_hw->dev,
491 dma_addr + offsetof(struct hw_rxhdr, pattern),
492 sizeof(hw_rxhdr->pattern), DMA_BIDIRECTIONAL);
494 /* Find first free slot */
496 idx = ecrnx_hw->rxbuf_elems.idx;
497 while (ecrnx_hw->rxbuf_elems.skb[idx] && nb < ECRNX_RXBUFF_MAX) {
498 idx = ( idx + 1 ) % ECRNX_RXBUFF_MAX;
502 if (WARN((nb == ECRNX_RXBUFF_MAX), "No more free space for rxbuff")) {
506 ecrnx_hw->rxbuf_elems.skb[idx] = skb;
508 /* Save info in skb control buffer */
509 ECRNX_RXBUFF_DMA_ADDR_SET(skb, dma_addr);
510 ECRNX_RXBUFF_PATTERN_SET(skb, ecrnx_rxbuff_pattern);
511 ECRNX_RXBUFF_IDX_SET(skb, idx);
513 /* Push buffer to FW */
514 ipc_host_rxbuf_push(ecrnx_hw->ipc_env, ECRNX_RXBUFF_IDX_TO_HOSTID(idx),
517 /* Save idx so that on next push the free slot will be found quicker */
518 ecrnx_hw->rxbuf_elems.idx = ( idx + 1 ) % ECRNX_RXBUFF_MAX;
523 dma_unmap_single(ecrnx_hw->dev, dma_addr, size, DMA_FROM_DEVICE);
533 * ecrnx_ipc_rxbuf_elem_repush() - Repush a rxbuf to FW
535 * @ecrnx_hw: Main driver data
536 * @skb: Skb to repush
538 * In case a skb is not forwarded to upper layer it can be re-used.
539 * It is assumed that @skb has been verified before calling this function and
540 * that it is a valid rx buffer
541 * (i.e. skb == ecrnx_hw->rxbuf_elems.skb[ECRNX_RXBUFF_IDX_GET(skb)])
543 #ifndef CONFIG_ECRNX_ESWIN
544 void ecrnx_ipc_rxbuf_elem_repush(struct ecrnx_hw *ecrnx_hw,
548 struct hw_rxhdr *hw_rxhdr = (struct hw_rxhdr *)skb->data;
552 hw_rxhdr->pattern = 0;
553 dma_addr = ECRNX_RXBUFF_DMA_ADDR_GET(skb);
554 dma_sync_single_for_device(ecrnx_hw->dev,
555 dma_addr + offsetof(struct hw_rxhdr, pattern),
556 sizeof(hw_rxhdr->pattern), DMA_BIDIRECTIONAL);
558 /* re-push buffer to FW */
559 idx = ECRNX_RXBUFF_IDX_GET(skb);
561 ipc_host_rxbuf_push(ecrnx_hw->ipc_env, ECRNX_RXBUFF_IDX_TO_HOSTID(idx),
567 * ecrnx_ipc_rxbuf_elems_allocs() - Allocate and push all RX buffer for the FW
569 * @ecrnx_hw: Main driver data
571 #ifndef CONFIG_ECRNX_ESWIN
572 static int ecrnx_ipc_rxbuf_elems_allocs(struct ecrnx_hw *ecrnx_hw)
574 //int i, nb = ecrnx_hw->ipc_env->rx_bufnb;
577 for (i = 0; i < ECRNX_RXBUFF_MAX; i++) {
578 ecrnx_hw->rxbuf_elems.skb[i] = NULL;
580 ecrnx_hw->rxbuf_elems.idx = 0;
582 for (i = 0; i < nb; i++) {
583 if (ecrnx_ipc_rxbuf_elem_allocs(ecrnx_hw)) {
584 dev_err(ecrnx_hw->dev, "Failed to allocate rx buf %d/%d\n",
594 * ecrnx_ipc_rxbuf_elems_deallocs() - Free all RX buffer allocated for the FW
596 * @ecrnx_hw: Main driver data
598 #ifndef CONFIG_ECRNX_ESWIN
599 static void ecrnx_ipc_rxbuf_elems_deallocs(struct ecrnx_hw *ecrnx_hw)
604 for (i = 0; i < ECRNX_RXBUFF_MAX; i++) {
605 if (ecrnx_hw->rxbuf_elems.skb[i]) {
606 skb = ecrnx_hw->rxbuf_elems.skb[i];
607 dma_unmap_single(ecrnx_hw->dev, ECRNX_RXBUFF_DMA_ADDR_GET(skb),
608 ecrnx_hw->ipc_env->rx_bufsz, DMA_FROM_DEVICE);
610 ecrnx_hw->rxbuf_elems.skb[i] = NULL;
617 * ecrnx_ipc_rxbuf_elem_pull() - Extract a skb from local table
619 * @ecrnx_hw: Main driver data
620 * @skb: SKb to extract for table
622 * After checking that skb is actually a pointer of local table, extract it
624 * When buffer is removed, DMA mapping is remove which has the effect to
625 * synchronize the buffer for the cpu.
626 * To be called before passing skb to upper layer.
628 #ifndef CONFIG_ECRNX_ESWIN
629 void ecrnx_ipc_rxbuf_elem_pull(struct ecrnx_hw *ecrnx_hw, struct sk_buff *skb)
631 unsigned int idx = ECRNX_RXBUFF_IDX_GET(skb);
633 if (ECRNX_RXBUFF_VALID_IDX(idx) && (ecrnx_hw->rxbuf_elems.skb[idx] == skb)) {
634 dma_addr_t dma_addr = ECRNX_RXBUFF_DMA_ADDR_GET(skb);
635 ecrnx_hw->rxbuf_elems.skb[idx] = NULL;
636 dma_unmap_single(ecrnx_hw->dev, dma_addr,
637 ecrnx_hw->ipc_env->rx_bufsz, DMA_FROM_DEVICE);
639 WARN(1, "Incorrect rxbuff idx skb=%p table[%u]=%p", skb, idx,
640 idx < ECRNX_RXBUFF_MAX ? ecrnx_hw->rxbuf_elems.skb[idx] : NULL);
643 /* Reset the pattern and idx */
644 ECRNX_RXBUFF_PATTERN_SET(skb, 0);
645 ECRNX_RXBUFF_IDX_SET(skb, ECRNX_RXBUFF_MAX);
650 * ecrnx_ipc_rxbuf_elem_sync() - Sync part of a RX buffer
652 * @ecrnx_hw: Main driver data
656 * After checking that skb is actually a pointer of local table, sync @p len
657 * bytes of the buffer for CPU. Buffer is not removed from the table
659 #ifndef CONFIG_ECRNX_ESWIN
660 void ecrnx_ipc_rxbuf_elem_sync(struct ecrnx_hw *ecrnx_hw, struct sk_buff *skb,
663 unsigned int idx = ECRNX_RXBUFF_IDX_GET(skb);
665 if (ECRNX_RXBUFF_VALID_IDX(idx) && (ecrnx_hw->rxbuf_elems.skb[idx] == skb)) {
666 dma_addr_t dma_addr = ECRNX_RXBUFF_DMA_ADDR_GET(skb);
667 dma_sync_single_for_cpu(ecrnx_hw->dev, dma_addr, len, DMA_FROM_DEVICE);
669 WARN(1, "Incorrect rxbuff idx skb=%p table[%u]=%p", skb, idx,
670 idx < ECRNX_RXBUFF_MAX ? ecrnx_hw->rxbuf_elems.skb[idx] : NULL);
674 #endif /* ! CONFIG_ECRNX_SOFTMAC */
677 * ecrnx_elems_deallocs() - Deallocate IPC storage elements.
678 * @ecrnx_hw: Main driver data
680 * This function deallocates all the elements required for communications with
681 * LMAC, such as Rx Data elements, MSGs elements, ...
682 * This function should be called in correspondence with the allocation function.
684 #ifndef CONFIG_ECRNX_ESWIN
685 static void ecrnx_elems_deallocs(struct ecrnx_hw *ecrnx_hw)
687 ecrnx_ipc_rxbuf_elems_deallocs(ecrnx_hw);
688 ecrnx_ipc_unsup_rx_vec_elems_deallocs(ecrnx_hw);
689 #ifdef CONFIG_ECRNX_FULLMAC
690 ecrnx_ipc_elem_pool_deallocs(&ecrnx_hw->e2arxdesc_pool);
692 ecrnx_ipc_elem_pool_deallocs(&ecrnx_hw->e2amsgs_pool);
693 ecrnx_ipc_elem_pool_deallocs(&ecrnx_hw->dbgmsgs_pool);
694 ecrnx_ipc_elem_pool_deallocs(&ecrnx_hw->e2aradars_pool);
695 ecrnx_ipc_elem_var_deallocs(ecrnx_hw, &ecrnx_hw->pattern_elem);
696 ecrnx_ipc_elem_var_deallocs(ecrnx_hw, &ecrnx_hw->dbgdump_elem.buf);
701 * ecrnx_elems_allocs() - Allocate IPC storage elements.
702 * @ecrnx_hw: Main driver data
704 * This function allocates all the elements required for communications with
705 * LMAC, such as Rx Data elements, MSGs elements, ...
706 * This function should be called in correspondence with the deallocation function.
708 static int ecrnx_elems_allocs(struct ecrnx_hw *ecrnx_hw)
710 ECRNX_DBG(ECRNX_FN_ENTRY_STR);
712 #ifndef CONFIG_ECRNX_ESWIN
713 if (dma_set_coherent_mask(ecrnx_hw->dev, DMA_BIT_MASK(32)))
715 if (ecrnx_ipc_elem_pool_allocs(ecrnx_hw, &ecrnx_hw->e2amsgs_pool,
716 ecrnx_hw->ipc_env->ipc_e2amsg_bufnb,
717 ecrnx_hw->ipc_env->ipc_e2amsg_bufsz,
718 "ecrnx_ipc_e2amsgs_pool",
719 ipc_host_msgbuf_push))
722 if (ecrnx_ipc_elem_pool_allocs(ecrnx_hw, &ecrnx_hw->dbgmsgs_pool,
723 ecrnx_hw->ipc_env->ipc_dbg_bufnb,
724 ecrnx_hw->ipc_env->ipc_dbg_bufsz,
725 "ecrnx_ipc_dbgmsgs_pool",
726 ipc_host_dbgbuf_push))
729 if (ecrnx_ipc_elem_pool_allocs(ecrnx_hw, &ecrnx_hw->e2aradars_pool,
730 ecrnx_hw->ipc_env->radar_bufnb,
731 ecrnx_hw->ipc_env->radar_bufsz,
732 "ecrnx_ipc_e2aradars_pool",
733 ipc_host_radarbuf_push))
736 if (ecrnx_ipc_unsup_rx_vec_elems_allocs(ecrnx_hw))
740 if (ecrnx_ipc_elem_var_allocs(ecrnx_hw, &ecrnx_hw->pattern_elem,
741 sizeof(u32), DMA_TO_DEVICE,
742 NULL, &ecrnx_rxbuff_pattern,
743 ipc_host_patt_addr_push))
746 if (ecrnx_ipc_elem_var_allocs(ecrnx_hw, &ecrnx_hw->dbgdump_elem.buf,
747 sizeof(struct dbg_debug_dump_tag),
748 DMA_FROM_DEVICE, NULL, NULL,
749 ipc_host_dbginfobuf_push))
753 * Note that the RX buffers are no longer allocated here as their size depends on the
754 * FW configuration, which is not available at that time.
755 * They will be allocated when checking the parameter compatibility between the driver
756 * and the underlying components (i.e. during the ecrnx_handle_dynparams() execution)
759 #ifdef CONFIG_ECRNX_FULLMAC
760 if (ecrnx_ipc_elem_pool_allocs(ecrnx_hw, &ecrnx_hw->e2arxdesc_pool,
761 ecrnx_hw->ipc_env->rxdesc_nb,
762 sizeof(struct rxdesc_tag),
763 "ecrnx_ipc_e2arxdesc_pool",
764 ipc_host_rxdesc_push))
767 #endif /* CONFIG_ECRNX_FULLMAC */
772 ecrnx_elems_deallocs(ecrnx_hw);
780 * ecrnx_ipc_msg_push() - Push a msg to IPC queue
782 * @ecrnx_hw: Main driver data
783 * @msg_buf: Pointer to message
784 * @len: Size, in bytes, of message
786 void ecrnx_ipc_msg_push(struct ecrnx_hw *ecrnx_hw, void *msg_buf, uint16_t len)
789 ipc_host_msg_push(ecrnx_hw->ipc_env, msg_buf, len);
793 * ecrnx_ipc_txdesc_push() - Push a txdesc to FW
795 * @ecrnx_hw: Main driver data
796 * @tx_desc: Pointer on &struct txdesc_api to push to FW
797 * @hostid: Pointer save in ipc env to retrieve tx buffer upon confirmation.
798 * @hw_queue: Hw queue to push txdesc to
799 * @user: User position to push the txdesc to. It must be set to 0 if MU-MIMMO
802 void ecrnx_ipc_txdesc_push(struct ecrnx_hw *ecrnx_hw, void *tx_desc,
803 void *hostid, int hw_queue, int user)
806 #if !defined(CONFIG_ECRNX_ESWIN_SDIO) && !defined(CONFIG_ECRNX_ESWIN_USB)
807 volatile struct txdesc_host *txdesc_host;
811 txdesc_host = ipc_host_txdesc_get(ecrnx_hw->ipc_env, hw_queue, user);
812 BUG_ON(!txdesc_host);
814 dst = (typeof(dst))&txdesc_host->api;
815 src = (typeof(src))tx_desc;
816 for (i = 0; i < sizeof(txdesc_host->api) / sizeof(*src); i++)
821 ipc_host_txdesc_push(ecrnx_hw->ipc_env, hw_queue, user, hostid);
823 ecrnx_frame_send(ecrnx_hw, tx_desc, hostid, hw_queue, user);
828 * ecrnx_ipc_fw_trace_desc_get() - Return pointer to the start of trace
829 * description in IPC environment
831 * @ecrnx_hw: Main driver data
833 void *ecrnx_ipc_fw_trace_desc_get(struct ecrnx_hw *ecrnx_hw)
835 #ifndef CONFIG_ECRNX_ESWIN
836 return (void *)&(ecrnx_hw->ipc_env->shared->trace_pattern);
842 #ifndef CONFIG_ECRNX_ESWIN
844 * ecrnx_ipc_sta_buffer_init - Initialize counter of bufferred data for a given sta
846 * @ecrnx_hw: Main driver data
847 * @sta_idx: Index of the station to initialize
849 void ecrnx_ipc_sta_buffer_init(struct ecrnx_hw *ecrnx_hw, int sta_idx)
852 volatile u32_l *buffered;
854 if (sta_idx >= NX_REMOTE_STA_MAX)
857 buffered = ecrnx_hw->ipc_env->shared->buffered[sta_idx];
859 for (i = 0; i < TID_MAX; i++) {
865 * ecrnx_ipc_sta_buffer - Update counter of bufferred data for a given sta
867 * @ecrnx_hw: Main driver data
868 * @sta: Managed station
869 * @tid: TID on which data has been added or removed
870 * @size: Size of data to add (or remove if < 0) to STA buffer.
872 void ecrnx_ipc_sta_buffer(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sta *sta, int tid, int size)
874 #ifndef CONFIG_ECRNX_ESWIN
880 if ((sta->sta_idx >= NX_REMOTE_STA_MAX) || (tid >= TID_MAX))
883 buffered = &ecrnx_hw->ipc_env->shared->buffered[sta->sta_idx][tid];
887 if (*buffered < size)
892 // no test on overflow
899 * ecrnx_msgind() - IRQ handler callback for %IPC_IRQ_E2A_MSG
901 * @pthis: Pointer to main driver data
902 * @hostid: Pointer to IPC elem from e2amsgs_pool
904 static u8 ecrnx_msgind(void *pthis, void *hostid)
906 struct ecrnx_hw *ecrnx_hw = pthis;
908 #ifndef CONFIG_ECRNX_ESWIN
909 struct ecrnx_ipc_elem *elem = hostid;
910 struct ipc_e2a_msg *msg = elem->addr;
912 REG_SW_SET_PROFILING(ecrnx_hw, SW_PROF_MSGIND);
914 /* Look for pattern which means that this hostbuf has been used for a MSG */
915 if (msg->pattern != IPC_MSGE2A_VALID_PATTERN) {
920 struct ipc_e2a_msg *msg = NULL;
923 ECRNX_DBG("%s enter 0x%x, 0x%x!!\n", __func__, pthis, hostid);
924 if(!pthis || !hostid){
925 ECRNX_ERR(" %s input param error!! \n", __func__);
930 /* Relay further actions to the msg parser */
931 ecrnx_rx_handle_msg(ecrnx_hw, msg);
933 #ifndef CONFIG_ECRNX_ESWIN
934 /* Reset the msg element and re-use it */
938 /* Push back the buffer to the LMAC */
939 ipc_host_msgbuf_push(ecrnx_hw->ipc_env, elem, elem->dma_addr);
942 REG_SW_CLEAR_PROFILING(ecrnx_hw, SW_PROF_MSGIND);
944 ECRNX_DBG("%s exit!!", __func__);
949 * ecrnx_msgackind() - IRQ handler callback for %IPC_IRQ_E2A_MSG_ACK
951 * @pthis: Pointer to main driver data
952 * @hostid: Pointer to command acknoledged
954 static u8 ecrnx_msgackind(void *pthis, void *hostid)
956 struct ecrnx_hw *ecrnx_hw = (struct ecrnx_hw *)pthis;
958 ecrnx_hw->msg_tx_done++;
959 ecrnx_hw->cmd_mgr.llind(&ecrnx_hw->cmd_mgr, (struct ecrnx_cmd *)hostid);
964 * ecrnx_radarind() - IRQ handler callback for %IPC_IRQ_E2A_RADAR
966 * @pthis: Pointer to main driver data
967 * @hostid: Pointer to IPC elem from e2aradars_pool
969 static u8 ecrnx_radarind(void *pthis, void *hostid)
971 #ifdef CONFIG_ECRNX_RADAR
972 struct ecrnx_hw *ecrnx_hw = pthis;
973 struct ecrnx_ipc_elem *elem = hostid;
974 struct radar_pulse_array_desc *pulses = elem->addr;
978 /* Look for pulse count meaning that this hostbuf contains RADAR pulses */
979 if (pulses->cnt == 0) {
984 if (ecrnx_radar_detection_is_enable(&ecrnx_hw->radar, pulses->idx)) {
985 /* Save the received pulses only if radar detection is enabled */
986 for (i = 0; i < pulses->cnt; i++) {
987 struct ecrnx_radar_pulses *p = &ecrnx_hw->radar.pulses[pulses->idx];
989 p->buffer[p->index] = pulses->pulse[i];
990 p->index = (p->index + 1) % ECRNX_RADAR_PULSE_MAX;
991 if (p->count < ECRNX_RADAR_PULSE_MAX)
995 /* Defer pulse processing in separate work */
996 if (! work_pending(&ecrnx_hw->radar.detection_work))
997 schedule_work(&ecrnx_hw->radar.detection_work);
1000 /* Reset the radar element and re-use it */
1002 #ifndef CONFIG_ECRNX_ESWIN
1005 /* Push back the buffer to the LMAC */
1006 ipc_host_radarbuf_push(ecrnx_hw->ipc_env, elem, (u32)elem->dma_addr);
1016 * ecrnx_prim_tbtt_ind() - IRQ handler callback for %IPC_IRQ_E2A_TBTT_PRIM
1018 * @pthis: Pointer to main driver data
1020 static void ecrnx_prim_tbtt_ind(void *pthis)
1022 #ifdef CONFIG_ECRNX_SOFTMAC
1023 struct ecrnx_hw *ecrnx_hw = (struct ecrnx_hw *)pthis;
1024 ecrnx_tx_bcns(ecrnx_hw);
1025 #endif /* CONFIG_ECRNX_SOFTMAC */
1029 * ecrnx_sec_tbtt_ind() - IRQ handler callback for %IPC_IRQ_E2A_TBTT_SEC
1031 * @pthis: Pointer to main driver data
1033 static void ecrnx_sec_tbtt_ind(void *pthis)
1038 * ecrnx_dbgind() - IRQ handler callback for %IPC_IRQ_E2A_DBG
1040 * @pthis: Pointer to main driver data
1041 * @hostid: Pointer to IPC elem from dbgmsgs_pool
1044 #ifdef CONFIG_ECRNX_ESWIN_USB
1045 extern void usb_dbg_printf(void * data, int len);
1047 static u8 ecrnx_dbgind(void *pthis, void *hostid)
1050 #ifndef CONFIG_ECRNX_ESWIN
1051 struct ecrnx_hw *ecrnx_hw = (struct ecrnx_hw *)pthis;
1052 struct ecrnx_ipc_elem *elem = hostid;
1053 struct ipc_dbg_msg *dbg_msg = elem->addr;
1055 REG_SW_SET_PROFILING(ecrnx_hw, SW_PROF_DBGIND);
1057 /* Look for pattern which means that this hostbuf has been used for a MSG */
1058 if (dbg_msg->pattern != IPC_DBG_VALID_PATTERN) {
1063 /* Display the string */
1064 //printk("%s %s", (char *)FW_STR, (char *)dbg_msg->string);
1066 /* Reset the msg element and re-use it */
1067 dbg_msg->pattern = 0;
1070 /* Push back the buffer to the LMAC */
1071 ipc_host_dbgbuf_push(ecrnx_hw->ipc_env, elem, (u32)elem->dma_addr);
1074 REG_SW_CLEAR_PROFILING(ecrnx_hw, SW_PROF_DBGIND);
1077 struct sk_buff *skb = (struct sk_buff *)hostid;
1078 #ifdef CONFIG_ECRNX_ESWIN_USB
1079 usb_dbg_printf(skb->data, skb->len);
1081 uint8_t string[IPC_DBG_PARAM_SIZE] = {0};
1082 if(skb->len < IPC_DBG_PARAM_SIZE)
1084 memcpy(string, skb->data, skb->len);
1088 printk("waring: string buff no enough \n");
1089 memcpy(string, skb->data, IPC_DBG_PARAM_SIZE-1);
1091 ECRNX_PRINT("%s %s", (char *)FW_STR, (char *)string);
1099 * ecrnx_ipc_rxbuf_init() - Allocate and initialize RX buffers.
1101 * @ecrnx_hw: Main driver data
1102 * @rx_bufsz: Size of the buffer to be allocated
1104 * This function updates the RX buffer size according to the parameter and allocates the
1107 #ifndef CONFIG_ECRNX_ESWIN
1108 int ecrnx_ipc_rxbuf_init(struct ecrnx_hw *ecrnx_hw, uint32_t rx_bufsz)
1110 ecrnx_hw->ipc_env->rx_bufsz = rx_bufsz;
1111 return(ecrnx_ipc_rxbuf_elems_allocs(ecrnx_hw));
1115 * ecrnx_ipc_init() - Initialize IPC interface.
1117 * @ecrnx_hw: Main driver data
1118 * @shared_ram: Pointer to shared memory that contains IPC shared struct
1120 * This function initializes IPC interface by registering callbacks, setting
1121 * shared memory area and calling IPC Init function.
1122 * It should be called only once during driver's lifetime.
1124 int ecrnx_ipc_init(struct ecrnx_hw *ecrnx_hw, u8 *shared_ram)
1126 struct ipc_host_cb_tag cb;
1128 ECRNX_DBG("%s entry!!", __func__);
1129 ECRNX_DBG(ECRNX_FN_ENTRY_STR);
1131 /* initialize the API interface */
1132 cb.recv_data_ind = ecrnx_rxdataind;
1133 cb.recv_radar_ind = ecrnx_radarind;
1134 cb.recv_msg_ind = ecrnx_msgind;
1135 cb.recv_msgack_ind = ecrnx_msgackind;
1136 cb.recv_dbg_ind = ecrnx_dbgind;
1137 cb.send_data_cfm = ecrnx_txdatacfm;
1138 cb.handle_data_cfm = ecrnx_handle_tx_datacfm;
1140 cb.prim_tbtt_ind = ecrnx_prim_tbtt_ind;
1141 cb.sec_tbtt_ind = ecrnx_sec_tbtt_ind;
1142 cb.recv_unsup_rx_vec_ind = ecrnx_unsup_rx_vec_ind;
1144 /* set the IPC environment */
1145 ecrnx_hw->ipc_env = (struct ipc_host_env_tag *)
1146 kzalloc(sizeof(struct ipc_host_env_tag), GFP_KERNEL);
1148 if (!ecrnx_hw->ipc_env)
1151 /* call the initialization of the IPC */
1152 ipc_host_init(ecrnx_hw->ipc_env, &cb,
1153 (struct ipc_shared_env_tag *)shared_ram, ecrnx_hw);
1155 ecrnx_cmd_mgr_init(&ecrnx_hw->cmd_mgr);
1157 ecrnx_rx_reord_init(ecrnx_hw);
1158 #ifdef CONFIG_ECRNX_ESWIN_SDIO
1159 ecrnx_sdio_init(ecrnx_hw);
1160 #elif defined(CONFIG_ECRNX_ESWIN_USB)
1161 ecrnx_usb_init(ecrnx_hw);
1164 res = ecrnx_elems_allocs(ecrnx_hw);
1167 kfree(ecrnx_hw->ipc_env);
1168 ecrnx_hw->ipc_env = NULL;
1170 ECRNX_DBG("%s exit!!", __func__);
1175 * ecrnx_ipc_deinit() - Release IPC interface
1177 * @ecrnx_hw: Main driver data
1179 void ecrnx_ipc_deinit(struct ecrnx_hw *ecrnx_hw)
1181 ECRNX_DBG(ECRNX_FN_ENTRY_STR);
1183 ecrnx_ipc_tx_drain(ecrnx_hw);
1184 ecrnx_cmd_mgr_deinit(&ecrnx_hw->cmd_mgr);
1185 #ifndef CONFIG_ECRNX_ESWIN
1186 ecrnx_elems_deallocs(ecrnx_hw);
1189 ecrnx_rx_reord_deinit(ecrnx_hw);
1190 #ifdef CONFIG_ECRNX_ESWIN_SDIO
1191 if (ecrnx_hw->ipc_env->shared) {
1192 kfree(ecrnx_hw->ipc_env->shared);
1193 ecrnx_hw->ipc_env->shared = NULL;
1195 ecrnx_sdio_deinit(ecrnx_hw);
1196 #elif defined(CONFIG_ECRNX_ESWIN_USB)
1197 ecrnx_usb_deinit(ecrnx_hw);
1200 if (ecrnx_hw->ipc_env) {
1201 kfree(ecrnx_hw->ipc_env);
1202 ecrnx_hw->ipc_env = NULL;
1207 * ecrnx_ipc_start() - Start IPC interface
1209 * @ecrnx_hw: Main driver data
1211 void ecrnx_ipc_start(struct ecrnx_hw *ecrnx_hw)
1213 ipc_host_enable_irq(ecrnx_hw->ipc_env, IPC_IRQ_E2A_ALL);
1217 * ecrnx_ipc_stop() - Stop IPC interface
1219 * @ecrnx_hw: Main driver data
1221 void ecrnx_ipc_stop(struct ecrnx_hw *ecrnx_hw)
1223 ipc_host_disable_irq(ecrnx_hw->ipc_env, IPC_IRQ_E2A_ALL);
1227 * ecrnx_ipc_tx_drain() - Flush IPC TX buffers
1229 * @ecrnx_hw: Main driver data
1231 * This assumes LMAC is still (tx wise) and there's no TX race until LMAC is up
1233 * This also lets both IPC sides remain in sync before resetting the LMAC,
1234 * e.g with ecrnx_send_reset.
1236 void ecrnx_ipc_tx_drain(struct ecrnx_hw *ecrnx_hw)
1240 ECRNX_DBG(ECRNX_FN_ENTRY_STR);
1242 if (!ecrnx_hw->ipc_env) {
1243 printk(KERN_CRIT "%s: bypassing (restart must have failed)\n", __func__);
1247 for (i = 0; i < ECRNX_HWQ_NB; i++) {
1248 for (j = 0; j < nx_txuser_cnt[i]; j++) {
1249 struct sk_buff *skb;
1250 while ((skb = (struct sk_buff *)ipc_host_tx_flush(ecrnx_hw->ipc_env, i, j))) {
1251 struct ecrnx_sw_txhdr *sw_txhdr =
1252 ((struct ecrnx_txhdr *)skb->data)->sw_hdr;
1253 #ifndef CONFIG_ECRNX_ESWIN
1254 #ifdef CONFIG_ECRNX_AMSDUS_TX
1255 if (sw_txhdr->desc.host.packet_cnt > 1) {
1256 struct ecrnx_amsdu_txhdr *amsdu_txhdr;
1257 list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) {
1258 dma_unmap_single(ecrnx_hw->dev, amsdu_txhdr->dma_addr,
1259 amsdu_txhdr->map_len, DMA_TO_DEVICE);
1260 dev_kfree_skb_any(amsdu_txhdr->skb);
1264 kmem_cache_free(ecrnx_hw->sw_txhdr_cache, sw_txhdr);
1265 dma_unmap_single(ecrnx_hw->dev, sw_txhdr->dma_addr,
1266 sw_txhdr->map_len, DMA_TO_DEVICE);
1268 skb_pull(skb, sw_txhdr->headroom);
1269 #ifdef CONFIG_ECRNX_SOFTMAC
1270 ieee80211_free_txskb(ecrnx_hw->hw, skb);
1272 dev_kfree_skb_any(skb);
1273 #endif /* CONFIG_ECRNX_SOFTMAC */
1280 * ecrnx_ipc_tx_pending() - Check if TX pframes are pending at FW level
1282 * @ecrnx_hw: Main driver data
1284 bool ecrnx_ipc_tx_pending(struct ecrnx_hw *ecrnx_hw)
1286 return ipc_host_tx_frames_pending(ecrnx_hw->ipc_env);
1290 * ecrnx_error_ind() - %DBG_ERROR_IND message callback
1292 * @ecrnx_hw: Main driver data
1294 * This function triggers the UMH script call that will indicate to the user
1295 * space the error that occurred and stored the debug dump. Once the UMH script
1296 * is executed, the ecrnx_umh_done() function has to be called.
1298 void ecrnx_error_ind(struct ecrnx_hw *ecrnx_hw)
1300 struct ecrnx_ipc_elem_var *elem = &ecrnx_hw->dbgdump_elem.buf;
1301 struct dbg_debug_dump_tag *dump = elem->addr;
1303 dma_sync_single_for_device(ecrnx_hw->dev, elem->dma_addr, elem->size,
1305 dev_err(ecrnx_hw->dev, "(type %d): dump received\n",
1306 dump->dbg_info.error_type);
1308 #ifdef CONFIG_ECRNX_DEBUGFS
1309 ecrnx_hw->debugfs.trace_prst = true;
1310 ecrnx_trigger_um_helper(&ecrnx_hw->debugfs);
1315 * ecrnx_umh_done() - Indicate User Mode helper finished
1317 * @ecrnx_hw: Main driver data
1320 void ecrnx_umh_done(struct ecrnx_hw *ecrnx_hw)
1322 if (!test_bit(ECRNX_DEV_STARTED, &ecrnx_hw->flags))
1325 /* this assumes error_ind won't trigger before ipc_host_dbginfobuf_push
1326 is called and so does not irq protect (TODO) against error_ind */
1327 #ifdef CONFIG_ECRNX_DEBUGFS
1328 ecrnx_hw->debugfs.trace_prst = false;
1329 #ifndef CONFIG_ECRNX_ESWIN
1330 ipc_host_dbginfobuf_push(ecrnx_hw->ipc_env, ecrnx_hw->dbgdump_elem.buf.dma_addr);