1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/string.h>
47 #include "qed_dev_api.h"
51 #include "qed_iscsi.h"
54 #include "qed_reg_addr.h"
56 #include "qed_sriov.h"
59 /***************************************************************************
60 * Structures & Definitions
61 ***************************************************************************/
63 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
65 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
66 #define SPQ_BLOCK_DELAY_US (10)
67 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
68 #define SPQ_BLOCK_SLEEP_MS (5)
70 /***************************************************************************
71 * Blocking Imp. (BLOCK/EBLOCK mode)
72 ***************************************************************************/
73 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
75 union event_ring_data *data, u8 fw_return_code)
77 struct qed_spq_comp_done *comp_done;
79 comp_done = (struct qed_spq_comp_done *)cookie;
81 comp_done->fw_return_code = fw_return_code;
83 /* Make sure completion done is visible on waiting thread */
84 smp_store_release(&comp_done->done, 0x1);
87 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 struct qed_spq_entry *p_ent,
89 u8 *p_fw_ret, bool sleep_between_iter)
91 struct qed_spq_comp_done *comp_done;
94 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 : SPQ_BLOCK_DELAY_MAX_ITER;
99 /* Validate we receive completion update */
100 if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
102 *p_fw_ret = comp_done->fw_return_code;
106 if (sleep_between_iter)
107 msleep(SPQ_BLOCK_SLEEP_MS);
109 udelay(SPQ_BLOCK_DELAY_US);
115 static int qed_spq_block(struct qed_hwfn *p_hwfn,
116 struct qed_spq_entry *p_ent,
117 u8 *p_fw_ret, bool skip_quick_poll)
119 struct qed_spq_comp_done *comp_done;
120 struct qed_ptt *p_ptt;
123 /* A relatively short polling period w/o sleeping, to allow the FW to
124 * complete the ramrod and thus possibly to avoid the following sleeps.
126 if (!skip_quick_poll) {
127 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
132 /* Move to polling with a sleeping period between iterations */
133 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
137 p_ptt = qed_ptt_acquire(p_hwfn);
139 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 rc = qed_mcp_drain(p_hwfn, p_ptt);
146 DP_NOTICE(p_hwfn, "MCP drain failed\n");
150 /* Retry after drain */
151 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
155 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
156 if (comp_done->done == 1)
158 *p_fw_ret = comp_done->fw_return_code;
160 qed_ptt_release(p_hwfn, p_ptt);
164 qed_ptt_release(p_hwfn, p_ptt);
166 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
167 le32_to_cpu(p_ent->elem.hdr.cid),
168 p_ent->elem.hdr.cmd_id,
169 p_ent->elem.hdr.protocol_id,
170 le16_to_cpu(p_ent->elem.hdr.echo));
175 /***************************************************************************
176 * SPQ entries inner API
177 ***************************************************************************/
178 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
179 struct qed_spq_entry *p_ent)
183 switch (p_ent->comp_mode) {
184 case QED_SPQ_MODE_EBLOCK:
185 case QED_SPQ_MODE_BLOCK:
186 p_ent->comp_cb.function = qed_spq_blocking_cb;
188 case QED_SPQ_MODE_CB:
191 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
196 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
197 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
199 p_ent->elem.hdr.cmd_id,
200 p_ent->elem.hdr.protocol_id,
201 p_ent->elem.data_ptr.hi,
202 p_ent->elem.data_ptr.lo,
203 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
204 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
210 /***************************************************************************
212 ***************************************************************************/
213 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
214 struct qed_spq *p_spq)
216 struct e4_core_conn_context *p_cxt;
217 struct qed_cxt_info cxt_info;
221 cxt_info.iid = p_spq->cid;
223 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
226 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
231 p_cxt = cxt_info.p_cxt;
233 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
234 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
235 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
236 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
237 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
238 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
240 /* QM physical queue */
241 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
242 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
244 p_cxt->xstorm_st_context.spq_base_lo =
245 DMA_LO_LE(p_spq->chain.p_phys_addr);
246 p_cxt->xstorm_st_context.spq_base_hi =
247 DMA_HI_LE(p_spq->chain.p_phys_addr);
249 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
250 p_hwfn->p_consq->chain.p_phys_addr);
253 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
254 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
256 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
257 u16 echo = qed_chain_get_prod_idx(p_chain);
258 struct slow_path_element *elem;
259 struct core_db_data db;
261 p_ent->elem.hdr.echo = cpu_to_le16(echo);
262 elem = qed_chain_produce(p_chain);
264 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
268 *elem = p_ent->elem; /* struct assignment */
270 /* send a doorbell on the slow hwfn session */
271 memset(&db, 0, sizeof(db));
272 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
273 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
274 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
275 DQ_XCM_CORE_SPQ_PROD_CMD);
276 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
277 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
279 /* make sure the SPQE is updated before the doorbell */
282 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
284 /* make sure doorbell is rang */
287 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
288 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
289 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
290 p_spq->cid, db.params, db.agg_flags,
291 qed_chain_get_prod_idx(p_chain));
296 /***************************************************************************
297 * Asynchronous events
298 ***************************************************************************/
300 qed_async_event_completion(struct qed_hwfn *p_hwfn,
301 struct event_ring_entry *p_eqe)
303 qed_spq_async_comp_cb cb;
305 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
308 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
310 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
311 &p_eqe->data, p_eqe->fw_return_code);
314 "Unknown Async completion for protocol: %d\n",
321 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
322 enum protocol_type protocol_id,
323 qed_spq_async_comp_cb cb)
325 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
328 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
333 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
334 enum protocol_type protocol_id)
336 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
339 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
342 /***************************************************************************
344 ***************************************************************************/
345 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
347 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
348 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
350 REG_WR16(p_hwfn, addr, prod);
352 /* keep prod updates ordered */
356 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
358 struct qed_eq *p_eq = cookie;
359 struct qed_chain *p_chain = &p_eq->chain;
362 /* take a snapshot of the FW consumer */
363 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
365 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
367 /* Need to guarantee the fw_cons index we use points to a usuable
368 * element (to comply with our chain), so our macros would comply
370 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
371 qed_chain_get_usable_per_page(p_chain))
372 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
374 /* Complete current segment of eq entries */
375 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
376 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
383 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
384 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
388 le16_to_cpu(p_eqe->echo),
389 p_eqe->fw_return_code,
392 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
393 if (qed_async_event_completion(p_hwfn, p_eqe))
395 } else if (qed_spq_completion(p_hwfn,
397 p_eqe->fw_return_code,
402 qed_chain_recycle_consumed(p_chain);
405 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
410 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
414 /* Allocate EQ struct */
415 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
419 /* Allocate and initialize EQ chain*/
420 if (qed_chain_alloc(p_hwfn->cdev,
421 QED_CHAIN_USE_TO_PRODUCE,
423 QED_CHAIN_CNT_TYPE_U16,
425 sizeof(union event_ring_element),
427 goto eq_allocate_fail;
429 /* register EQ completion on the SP SB */
430 qed_int_register_cb(p_hwfn, qed_eq_completion,
431 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
441 void qed_eq_setup(struct qed_hwfn *p_hwfn)
443 qed_chain_reset(&p_hwfn->p_eq->chain);
446 void qed_eq_free(struct qed_hwfn *p_hwfn)
451 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
457 /***************************************************************************
458 * CQE API - manipulate EQ functionality
459 ***************************************************************************/
460 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
461 struct eth_slow_path_rx_cqe *cqe,
462 enum protocol_type protocol)
464 if (IS_VF(p_hwfn->cdev))
467 /* @@@tmp - it's possible we'll eventually want to handle some
468 * actual commands that can arrive here, but for now this is only
469 * used to complete the ramrod using the echo value on the cqe
471 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
474 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
475 struct eth_slow_path_rx_cqe *cqe)
479 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
482 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
488 /***************************************************************************
489 * Slow hwfn Queue (spq)
490 ***************************************************************************/
491 void qed_spq_setup(struct qed_hwfn *p_hwfn)
493 struct qed_spq *p_spq = p_hwfn->p_spq;
494 struct qed_spq_entry *p_virt = NULL;
495 dma_addr_t p_phys = 0;
498 INIT_LIST_HEAD(&p_spq->pending);
499 INIT_LIST_HEAD(&p_spq->completion_pending);
500 INIT_LIST_HEAD(&p_spq->free_pool);
501 INIT_LIST_HEAD(&p_spq->unlimited_pending);
502 spin_lock_init(&p_spq->lock);
505 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
506 p_virt = p_spq->p_virt;
508 capacity = qed_chain_get_capacity(&p_spq->chain);
509 for (i = 0; i < capacity; i++) {
510 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
512 list_add_tail(&p_virt->list, &p_spq->free_pool);
515 p_phys += sizeof(struct qed_spq_entry);
519 p_spq->normal_count = 0;
520 p_spq->comp_count = 0;
521 p_spq->comp_sent_count = 0;
522 p_spq->unlimited_pending_count = 0;
524 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
525 p_spq->comp_bitmap_idx = 0;
527 /* SPQ cid, cannot fail */
528 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
529 qed_spq_hw_initialize(p_hwfn, p_spq);
531 /* reset the chain itself */
532 qed_chain_reset(&p_spq->chain);
535 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
537 struct qed_spq_entry *p_virt = NULL;
538 struct qed_spq *p_spq = NULL;
539 dma_addr_t p_phys = 0;
543 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
548 if (qed_chain_alloc(p_hwfn->cdev,
549 QED_CHAIN_USE_TO_PRODUCE,
550 QED_CHAIN_MODE_SINGLE,
551 QED_CHAIN_CNT_TYPE_U16,
552 0, /* N/A when the mode is SINGLE */
553 sizeof(struct slow_path_element),
554 &p_spq->chain, NULL))
555 goto spq_allocate_fail;
557 /* allocate and fill the SPQ elements (incl. ramrod data list) */
558 capacity = qed_chain_get_capacity(&p_spq->chain);
559 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
560 capacity * sizeof(struct qed_spq_entry),
561 &p_phys, GFP_KERNEL);
563 goto spq_allocate_fail;
565 p_spq->p_virt = p_virt;
566 p_spq->p_phys = p_phys;
567 p_hwfn->p_spq = p_spq;
572 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
577 void qed_spq_free(struct qed_hwfn *p_hwfn)
579 struct qed_spq *p_spq = p_hwfn->p_spq;
586 capacity = qed_chain_get_capacity(&p_spq->chain);
587 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
589 sizeof(struct qed_spq_entry),
590 p_spq->p_virt, p_spq->p_phys);
593 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
595 p_hwfn->p_spq = NULL;
598 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
600 struct qed_spq *p_spq = p_hwfn->p_spq;
601 struct qed_spq_entry *p_ent = NULL;
604 spin_lock_bh(&p_spq->lock);
606 if (list_empty(&p_spq->free_pool)) {
607 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
610 "Failed to allocate an SPQ entry for a pending ramrod\n");
614 p_ent->queue = &p_spq->unlimited_pending;
616 p_ent = list_first_entry(&p_spq->free_pool,
617 struct qed_spq_entry, list);
618 list_del(&p_ent->list);
619 p_ent->queue = &p_spq->pending;
625 spin_unlock_bh(&p_spq->lock);
629 /* Locked variant; Should be called while the SPQ lock is taken */
630 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
631 struct qed_spq_entry *p_ent)
633 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
636 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
638 spin_lock_bh(&p_hwfn->p_spq->lock);
639 __qed_spq_return_entry(p_hwfn, p_ent);
640 spin_unlock_bh(&p_hwfn->p_spq->lock);
644 * @brief qed_spq_add_entry - adds a new entry to the pending
645 * list. Should be used while lock is being held.
647 * Addes an entry to the pending list is there is room (en empty
648 * element is available in the free_pool), or else places the
649 * entry in the unlimited_pending pool.
657 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
658 struct qed_spq_entry *p_ent,
659 enum spq_priority priority)
661 struct qed_spq *p_spq = p_hwfn->p_spq;
663 if (p_ent->queue == &p_spq->unlimited_pending) {
665 if (list_empty(&p_spq->free_pool)) {
666 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
667 p_spq->unlimited_pending_count++;
671 struct qed_spq_entry *p_en2;
673 p_en2 = list_first_entry(&p_spq->free_pool,
674 struct qed_spq_entry, list);
675 list_del(&p_en2->list);
677 /* Copy the ring element physical pointer to the new
678 * entry, since we are about to override the entire ring
679 * entry and don't want to lose the pointer.
681 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
685 /* EBLOCK responsible to free the allocated p_ent */
686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
689 p_ent->post_ent = p_en2;
695 /* entry is to be placed in 'pending' queue */
697 case QED_SPQ_PRIORITY_NORMAL:
698 list_add_tail(&p_ent->list, &p_spq->pending);
699 p_spq->normal_count++;
701 case QED_SPQ_PRIORITY_HIGH:
702 list_add(&p_ent->list, &p_spq->pending);
712 /***************************************************************************
714 ***************************************************************************/
715 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
718 return 0xffffffff; /* illegal */
719 return p_hwfn->p_spq->cid;
722 /***************************************************************************
723 * Posting new Ramrods
724 ***************************************************************************/
725 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
726 struct list_head *head, u32 keep_reserve)
728 struct qed_spq *p_spq = p_hwfn->p_spq;
731 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
733 struct qed_spq_entry *p_ent =
734 list_first_entry(head, struct qed_spq_entry, list);
735 list_del(&p_ent->list);
736 list_add_tail(&p_ent->list, &p_spq->completion_pending);
737 p_spq->comp_sent_count++;
739 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
741 list_del(&p_ent->list);
742 __qed_spq_return_entry(p_hwfn, p_ent);
750 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
752 struct qed_spq *p_spq = p_hwfn->p_spq;
753 struct qed_spq_entry *p_ent = NULL;
755 while (!list_empty(&p_spq->free_pool)) {
756 if (list_empty(&p_spq->unlimited_pending))
759 p_ent = list_first_entry(&p_spq->unlimited_pending,
760 struct qed_spq_entry, list);
764 list_del(&p_ent->list);
766 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
769 return qed_spq_post_list(p_hwfn, &p_spq->pending,
770 SPQ_HIGH_PRI_RESERVE_DEFAULT);
773 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
774 * marking the completions in a bitmap and increasing the chain consumer only
775 * for the first successive completed entries.
777 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
779 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
780 struct qed_spq *p_spq = p_hwfn->p_spq;
782 __set_bit(pos, p_spq->p_comp_bitmap);
783 while (test_bit(p_spq->comp_bitmap_idx,
784 p_spq->p_comp_bitmap)) {
785 __clear_bit(p_spq->comp_bitmap_idx,
786 p_spq->p_comp_bitmap);
787 p_spq->comp_bitmap_idx++;
788 qed_chain_return_produced(&p_spq->chain);
792 int qed_spq_post(struct qed_hwfn *p_hwfn,
793 struct qed_spq_entry *p_ent, u8 *fw_return_code)
796 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
797 bool b_ret_ent = true;
804 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
808 /* Complete the entry */
809 rc = qed_spq_fill_entry(p_hwfn, p_ent);
811 spin_lock_bh(&p_spq->lock);
813 /* Check return value after LOCK is taken for cleaner error flow */
817 /* Check if entry is in block mode before qed_spq_add_entry,
818 * which might kfree p_ent.
820 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
822 /* Add the request to the pending queue */
823 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
827 rc = qed_spq_pend_post(p_hwfn);
829 /* Since it's possible that pending failed for a different
830 * entry [although unlikely], the failed entry was already
831 * dealt with; No need to return it here.
837 spin_unlock_bh(&p_spq->lock);
840 /* For entries in QED BLOCK mode, the completion code cannot
841 * perform the necessary cleanup - if it did, we couldn't
842 * access p_ent here to see whether it's successful or not.
843 * Thus, after gaining the answer perform the cleanup here.
845 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
846 p_ent->queue == &p_spq->unlimited_pending);
848 if (p_ent->queue == &p_spq->unlimited_pending) {
849 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
853 /* Return the entry which was actually posted */
861 qed_spq_return_entry(p_hwfn, p_ent);
866 spin_lock_bh(&p_spq->lock);
867 list_del(&p_ent->list);
868 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
871 /* return to the free pool */
873 __qed_spq_return_entry(p_hwfn, p_ent);
874 spin_unlock_bh(&p_spq->lock);
879 int qed_spq_completion(struct qed_hwfn *p_hwfn,
882 union event_ring_data *p_data)
884 struct qed_spq *p_spq;
885 struct qed_spq_entry *p_ent = NULL;
886 struct qed_spq_entry *tmp;
887 struct qed_spq_entry *found = NULL;
893 p_spq = p_hwfn->p_spq;
897 spin_lock_bh(&p_spq->lock);
898 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
899 if (p_ent->elem.hdr.echo == echo) {
900 list_del(&p_ent->list);
901 qed_spq_comp_bmap_update(p_hwfn, echo);
907 /* This is relatively uncommon - depends on scenarios
908 * which have mutliple per-PF sent ramrods.
910 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
911 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
913 le16_to_cpu(p_ent->elem.hdr.echo));
916 /* Release lock before callback, as callback may post
917 * an additional ramrod.
919 spin_unlock_bh(&p_spq->lock);
923 "Failed to find an entry this EQE [echo %04x] completes\n",
928 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
929 "Complete EQE [echo %04x]: func %p cookie %p)\n",
931 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
932 if (found->comp_cb.function)
933 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
938 "Got a completion without a callback function\n");
940 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
941 /* EBLOCK is responsible for returning its own entry into the
944 qed_spq_return_entry(p_hwfn, found);
946 /* Attempt to post pending requests */
947 spin_lock_bh(&p_spq->lock);
948 rc = qed_spq_pend_post(p_hwfn);
949 spin_unlock_bh(&p_spq->lock);
954 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
956 struct qed_consq *p_consq;
958 /* Allocate ConsQ struct */
959 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
963 /* Allocate and initialize EQ chain*/
964 if (qed_chain_alloc(p_hwfn->cdev,
965 QED_CHAIN_USE_TO_PRODUCE,
967 QED_CHAIN_CNT_TYPE_U16,
968 QED_CHAIN_PAGE_SIZE / 0x80,
969 0x80, &p_consq->chain, NULL))
970 goto consq_allocate_fail;
972 p_hwfn->p_consq = p_consq;
980 void qed_consq_setup(struct qed_hwfn *p_hwfn)
982 qed_chain_reset(&p_hwfn->p_consq->chain);
985 void qed_consq_free(struct qed_hwfn *p_hwfn)
987 if (!p_hwfn->p_consq)
990 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
992 kfree(p_hwfn->p_consq);
993 p_hwfn->p_consq = NULL;