1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright (c) 2011-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
28 #include "bnx2x_cmn.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 /**** Exe Queue interfaces ****/
36 * bnx2x_exe_queue_init - init the Exe Queue object
38 * @o: poiter to the object
40 * @owner: poiter to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
52 exe_q_optimize optimize,
56 memset(o, 0, sizeof(*o));
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
61 spin_lock_init(&o->lock);
63 o->exe_chunk_len = exe_len;
66 /* Owner specific callbacks */
67 o->validate = validate;
69 o->optimize = optimize;
73 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
86 struct bnx2x_exeq_elem *elem;
89 spin_lock_bh(&o->lock);
91 list_for_each_entry(elem, &o->exe_queue, link)
94 spin_unlock_bh(&o->lock);
100 * bnx2x_exe_queue_add - add a new element to the execution queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
107 * If the element is optimized or is illegal, frees it.
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
116 spin_lock_bh(&o->lock);
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
135 spin_unlock_bh(&o->lock);
140 bnx2x_exe_queue_free_elem(bp, elem);
142 spin_unlock_bh(&o->lock);
148 static inline void __bnx2x_exe_queue_reset_pending(
150 struct bnx2x_exe_queue_obj *o)
152 struct bnx2x_exeq_elem *elem;
154 while (!list_empty(&o->pending_comp)) {
155 elem = list_first_entry(&o->pending_comp,
156 struct bnx2x_exeq_elem, link);
158 list_del(&elem->link);
159 bnx2x_exe_queue_free_elem(bp, elem);
163 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 struct bnx2x_exe_queue_obj *o)
167 spin_lock_bh(&o->lock);
169 __bnx2x_exe_queue_reset_pending(bp, o);
171 spin_unlock_bh(&o->lock);
176 * bnx2x_exe_queue_step - execute one execution chunk atomically
180 * @ramrod_flags: flags
182 * (Atomicy is ensured using the exe_queue->lock).
184 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 struct bnx2x_exe_queue_obj *o,
186 unsigned long *ramrod_flags)
188 struct bnx2x_exeq_elem *elem, spacer;
191 memset(&spacer, 0, sizeof(spacer));
193 spin_lock_bh(&o->lock);
196 * Next step should not be performed until the current is finished,
197 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 * properly clear object internals without sending any command to the FW
199 * which also implies there won't be any completion to clear the
202 if (!list_empty(&o->pending_comp)) {
203 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
204 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
205 __bnx2x_exe_queue_reset_pending(bp, o);
207 spin_unlock_bh(&o->lock);
213 * Run through the pending commands list and create a next
216 while (!list_empty(&o->exe_queue)) {
217 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
219 WARN_ON(!elem->cmd_len);
221 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222 cur_len += elem->cmd_len;
224 * Prevent from both lists being empty when moving an
225 * element. This will allow the call of
226 * bnx2x_exe_queue_empty() without locking.
228 list_add_tail(&spacer.link, &o->pending_comp);
230 list_move_tail(&elem->link, &o->pending_comp);
231 list_del(&spacer.link);
238 spin_unlock_bh(&o->lock);
242 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
245 * In case of an error return the commands back to the queue
246 * and reset the pending_comp.
248 list_splice_init(&o->pending_comp, &o->exe_queue);
251 * If zero is returned, means there are no outstanding pending
252 * completions and we may dismiss the pending list.
254 __bnx2x_exe_queue_reset_pending(bp, o);
256 spin_unlock_bh(&o->lock);
260 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
262 bool empty = list_empty(&o->exe_queue);
264 /* Don't reorder!!! */
267 return empty && list_empty(&o->pending_comp);
270 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
273 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277 /************************ raw_obj functions ***********************************/
278 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
280 return !!test_bit(o->state, o->pstate);
283 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
285 smp_mb__before_clear_bit();
286 clear_bit(o->state, o->pstate);
287 smp_mb__after_clear_bit();
290 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
292 smp_mb__before_clear_bit();
293 set_bit(o->state, o->pstate);
294 smp_mb__after_clear_bit();
298 * bnx2x_state_wait - wait until the given bit(state) is cleared
301 * @state: state which is to be cleared
302 * @state_p: state buffer
305 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306 unsigned long *pstate)
308 /* can take a while if any port is running */
312 if (CHIP_REV_IS_EMUL(bp))
315 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
319 if (!test_bit(state, pstate)) {
320 #ifdef BNX2X_STOP_ON_ERROR
321 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
326 usleep_range(1000, 2000);
333 BNX2X_ERR("timeout waiting for state %d\n", state);
334 #ifdef BNX2X_STOP_ON_ERROR
341 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
343 return bnx2x_state_wait(bp, raw->state, raw->pstate);
346 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347 /* credit handling callbacks */
348 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
350 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
354 return mp->get_entry(mp, offset);
357 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
359 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
363 return mp->get(mp, 1);
366 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
368 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
372 return vp->get_entry(vp, offset);
375 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
377 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
381 return vp->get(vp, 1);
384 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
386 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392 if (!vp->get(vp, 1)) {
400 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
402 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
404 return mp->put_entry(mp, offset);
407 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
409 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
411 return mp->put(mp, 1);
414 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
416 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
418 return vp->put_entry(vp, offset);
421 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
423 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
425 return vp->put(vp, 1);
428 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
430 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
436 if (!vp->put(vp, 1)) {
444 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
445 int n, u8 *base, u8 stride, u8 size)
447 struct bnx2x_vlan_mac_registry_elem *pos;
452 list_for_each_entry(pos, &o->head, link) {
454 memcpy(next, &pos->u, size);
456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
458 next += stride + size;
462 return counter * ETH_ALEN;
465 /* check_add() callbacks */
466 static int bnx2x_check_mac_add(struct bnx2x *bp,
467 struct bnx2x_vlan_mac_obj *o,
468 union bnx2x_classification_ramrod_data *data)
470 struct bnx2x_vlan_mac_registry_elem *pos;
472 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
474 if (!is_valid_ether_addr(data->mac.mac))
477 /* Check if a requested MAC already exists */
478 list_for_each_entry(pos, &o->head, link)
479 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
485 static int bnx2x_check_vlan_add(struct bnx2x *bp,
486 struct bnx2x_vlan_mac_obj *o,
487 union bnx2x_classification_ramrod_data *data)
489 struct bnx2x_vlan_mac_registry_elem *pos;
491 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
493 list_for_each_entry(pos, &o->head, link)
494 if (data->vlan.vlan == pos->u.vlan.vlan)
500 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
501 struct bnx2x_vlan_mac_obj *o,
502 union bnx2x_classification_ramrod_data *data)
504 struct bnx2x_vlan_mac_registry_elem *pos;
506 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
507 data->vlan_mac.mac, data->vlan_mac.vlan);
509 list_for_each_entry(pos, &o->head, link)
510 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
511 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
519 /* check_del() callbacks */
520 static struct bnx2x_vlan_mac_registry_elem *
521 bnx2x_check_mac_del(struct bnx2x *bp,
522 struct bnx2x_vlan_mac_obj *o,
523 union bnx2x_classification_ramrod_data *data)
525 struct bnx2x_vlan_mac_registry_elem *pos;
527 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
529 list_for_each_entry(pos, &o->head, link)
530 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
536 static struct bnx2x_vlan_mac_registry_elem *
537 bnx2x_check_vlan_del(struct bnx2x *bp,
538 struct bnx2x_vlan_mac_obj *o,
539 union bnx2x_classification_ramrod_data *data)
541 struct bnx2x_vlan_mac_registry_elem *pos;
543 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
545 list_for_each_entry(pos, &o->head, link)
546 if (data->vlan.vlan == pos->u.vlan.vlan)
552 static struct bnx2x_vlan_mac_registry_elem *
553 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
554 struct bnx2x_vlan_mac_obj *o,
555 union bnx2x_classification_ramrod_data *data)
557 struct bnx2x_vlan_mac_registry_elem *pos;
559 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
560 data->vlan_mac.mac, data->vlan_mac.vlan);
562 list_for_each_entry(pos, &o->head, link)
563 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
564 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
571 /* check_move() callback */
572 static bool bnx2x_check_move(struct bnx2x *bp,
573 struct bnx2x_vlan_mac_obj *src_o,
574 struct bnx2x_vlan_mac_obj *dst_o,
575 union bnx2x_classification_ramrod_data *data)
577 struct bnx2x_vlan_mac_registry_elem *pos;
580 /* Check if we can delete the requested configuration from the first
583 pos = src_o->check_del(bp, src_o, data);
585 /* check if configuration can be added */
586 rc = dst_o->check_add(bp, dst_o, data);
588 /* If this classification can not be added (is already set)
589 * or can't be deleted - return an error.
597 static bool bnx2x_check_move_always_err(
599 struct bnx2x_vlan_mac_obj *src_o,
600 struct bnx2x_vlan_mac_obj *dst_o,
601 union bnx2x_classification_ramrod_data *data)
607 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
609 struct bnx2x_raw_obj *raw = &o->raw;
612 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
613 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
614 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
616 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
617 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
618 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
624 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
625 bool add, unsigned char *dev_addr, int index)
628 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
629 NIG_REG_LLH0_FUNC_MEM;
631 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
634 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
637 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
638 (add ? "ADD" : "DELETE"), index);
641 /* LLH_FUNC_MEM is a u64 WB register */
642 reg_offset += 8*index;
644 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
645 (dev_addr[4] << 8) | dev_addr[5]);
646 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
648 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
651 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
652 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
656 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
659 * @o: queue for which we want to configure this rule
660 * @add: if true the command is an ADD command, DEL otherwise
661 * @opcode: CLASSIFY_RULE_OPCODE_XXX
662 * @hdr: pointer to a header to setup
665 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
666 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
667 struct eth_classify_cmd_header *hdr)
669 struct bnx2x_raw_obj *raw = &o->raw;
671 hdr->client_id = raw->cl_id;
672 hdr->func_id = raw->func_id;
674 /* Rx or/and Tx (internal switching) configuration ? */
675 hdr->cmd_general_data |=
676 bnx2x_vlan_mac_get_rx_tx_flag(o);
679 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
681 hdr->cmd_general_data |=
682 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
686 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
688 * @cid: connection id
689 * @type: BNX2X_FILTER_XXX_PENDING
690 * @hdr: poiter to header to setup
693 * currently we always configure one rule and echo field to contain a CID and an
696 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
697 struct eth_classify_header *hdr, int rule_cnt)
699 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
700 (type << BNX2X_SWCID_SHIFT));
701 hdr->rule_cnt = (u8)rule_cnt;
705 /* hw_config() callbacks */
706 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
707 struct bnx2x_vlan_mac_obj *o,
708 struct bnx2x_exeq_elem *elem, int rule_idx,
711 struct bnx2x_raw_obj *raw = &o->raw;
712 struct eth_classify_rules_ramrod_data *data =
713 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
714 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
715 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
716 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
717 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
718 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
721 * Set LLH CAM entry: currently only iSCSI and ETH macs are
722 * relevant. In addition, current implementation is tuned for a
725 * When multiple unicast ETH MACs PF configuration in switch
726 * independent mode is required (NetQ, multiple netdev MACs,
727 * etc.), consider better utilisation of 8 per function MAC
728 * entries in the LLH register. There is also
729 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
730 * total number of CAM entries to 16.
732 * Currently we won't configure NIG for MACs other than a primary ETH
733 * MAC and iSCSI L2 MAC.
735 * If this MAC is moving from one Queue to another, no need to change
738 if (cmd != BNX2X_VLAN_MAC_MOVE) {
739 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
740 bnx2x_set_mac_in_nig(bp, add, mac,
741 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
742 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
743 bnx2x_set_mac_in_nig(bp, add, mac,
744 BNX2X_LLH_CAM_ETH_LINE);
747 /* Reset the ramrod data buffer for the first rule */
749 memset(data, 0, sizeof(*data));
751 /* Setup a command header */
752 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
753 &rule_entry->mac.header);
755 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
756 (add ? "add" : "delete"), mac, raw->cl_id);
758 /* Set a MAC itself */
759 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
760 &rule_entry->mac.mac_mid,
761 &rule_entry->mac.mac_lsb, mac);
763 /* MOVE: Add a rule that will add this MAC to the target Queue */
764 if (cmd == BNX2X_VLAN_MAC_MOVE) {
768 /* Setup ramrod data */
769 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
770 elem->cmd_data.vlan_mac.target_obj,
771 true, CLASSIFY_RULE_OPCODE_MAC,
772 &rule_entry->mac.header);
774 /* Set a MAC itself */
775 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
776 &rule_entry->mac.mac_mid,
777 &rule_entry->mac.mac_lsb, mac);
780 /* Set the ramrod data header */
781 /* TODO: take this to the higher level in order to prevent multiple
783 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
788 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
793 * @cam_offset: offset in cam memory
794 * @hdr: pointer to a header to setup
798 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
799 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
800 struct mac_configuration_hdr *hdr)
802 struct bnx2x_raw_obj *r = &o->raw;
805 hdr->offset = (u8)cam_offset;
806 hdr->client_id = cpu_to_le16(0xff);
807 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
808 (type << BNX2X_SWCID_SHIFT));
811 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
812 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
813 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
815 struct bnx2x_raw_obj *r = &o->raw;
816 u32 cl_bit_vec = (1 << r->cl_id);
818 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
819 cfg_entry->pf_id = r->func_id;
820 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
823 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
824 T_ETH_MAC_COMMAND_SET);
825 SET_FLAG(cfg_entry->flags,
826 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
828 /* Set a MAC in a ramrod data */
829 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
830 &cfg_entry->middle_mac_addr,
831 &cfg_entry->lsb_mac_addr, mac);
833 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
834 T_ETH_MAC_COMMAND_INVALIDATE);
837 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
838 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
839 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
841 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
842 struct bnx2x_raw_obj *raw = &o->raw;
844 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
846 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
849 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
850 (add ? "setting" : "clearing"),
851 mac, raw->cl_id, cam_offset);
855 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
858 * @o: bnx2x_vlan_mac_obj
859 * @elem: bnx2x_exeq_elem
860 * @rule_idx: rule_idx
861 * @cam_offset: cam_offset
863 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
864 struct bnx2x_vlan_mac_obj *o,
865 struct bnx2x_exeq_elem *elem, int rule_idx,
868 struct bnx2x_raw_obj *raw = &o->raw;
869 struct mac_configuration_cmd *config =
870 (struct mac_configuration_cmd *)(raw->rdata);
872 * 57710 and 57711 do not support MOVE command,
873 * so it's either ADD or DEL
875 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
878 /* Reset the ramrod data buffer */
879 memset(config, 0, sizeof(*config));
881 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
883 elem->cmd_data.vlan_mac.u.mac.mac, 0,
884 ETH_VLAN_FILTER_ANY_VLAN, config);
887 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
888 struct bnx2x_vlan_mac_obj *o,
889 struct bnx2x_exeq_elem *elem, int rule_idx,
892 struct bnx2x_raw_obj *raw = &o->raw;
893 struct eth_classify_rules_ramrod_data *data =
894 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
895 int rule_cnt = rule_idx + 1;
896 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
897 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
898 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
899 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
901 /* Reset the ramrod data buffer for the first rule */
903 memset(data, 0, sizeof(*data));
905 /* Set a rule header */
906 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
907 &rule_entry->vlan.header);
909 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
912 /* Set a VLAN itself */
913 rule_entry->vlan.vlan = cpu_to_le16(vlan);
915 /* MOVE: Add a rule that will add this MAC to the target Queue */
916 if (cmd == BNX2X_VLAN_MAC_MOVE) {
920 /* Setup ramrod data */
921 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
922 elem->cmd_data.vlan_mac.target_obj,
923 true, CLASSIFY_RULE_OPCODE_VLAN,
924 &rule_entry->vlan.header);
926 /* Set a VLAN itself */
927 rule_entry->vlan.vlan = cpu_to_le16(vlan);
930 /* Set the ramrod data header */
931 /* TODO: take this to the higher level in order to prevent multiple
933 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
937 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
938 struct bnx2x_vlan_mac_obj *o,
939 struct bnx2x_exeq_elem *elem,
940 int rule_idx, int cam_offset)
942 struct bnx2x_raw_obj *raw = &o->raw;
943 struct eth_classify_rules_ramrod_data *data =
944 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
945 int rule_cnt = rule_idx + 1;
946 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
947 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
948 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
949 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
950 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
953 /* Reset the ramrod data buffer for the first rule */
955 memset(data, 0, sizeof(*data));
957 /* Set a rule header */
958 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
959 &rule_entry->pair.header);
961 /* Set VLAN and MAC themselvs */
962 rule_entry->pair.vlan = cpu_to_le16(vlan);
963 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
964 &rule_entry->pair.mac_mid,
965 &rule_entry->pair.mac_lsb, mac);
967 /* MOVE: Add a rule that will add this MAC to the target Queue */
968 if (cmd == BNX2X_VLAN_MAC_MOVE) {
972 /* Setup ramrod data */
973 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
974 elem->cmd_data.vlan_mac.target_obj,
975 true, CLASSIFY_RULE_OPCODE_PAIR,
976 &rule_entry->pair.header);
978 /* Set a VLAN itself */
979 rule_entry->pair.vlan = cpu_to_le16(vlan);
980 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
981 &rule_entry->pair.mac_mid,
982 &rule_entry->pair.mac_lsb, mac);
985 /* Set the ramrod data header */
986 /* TODO: take this to the higher level in order to prevent multiple
988 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
993 * bnx2x_set_one_vlan_mac_e1h -
996 * @o: bnx2x_vlan_mac_obj
997 * @elem: bnx2x_exeq_elem
998 * @rule_idx: rule_idx
999 * @cam_offset: cam_offset
1001 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1002 struct bnx2x_vlan_mac_obj *o,
1003 struct bnx2x_exeq_elem *elem,
1004 int rule_idx, int cam_offset)
1006 struct bnx2x_raw_obj *raw = &o->raw;
1007 struct mac_configuration_cmd *config =
1008 (struct mac_configuration_cmd *)(raw->rdata);
1010 * 57710 and 57711 do not support MOVE command,
1011 * so it's either ADD or DEL
1013 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1016 /* Reset the ramrod data buffer */
1017 memset(config, 0, sizeof(*config));
1019 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1021 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1022 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1023 ETH_VLAN_FILTER_CLASSIFY, config);
1026 #define list_next_entry(pos, member) \
1027 list_entry((pos)->member.next, typeof(*(pos)), member)
1030 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1032 * @bp: device handle
1033 * @p: command parameters
1034 * @ppos: pointer to the cooky
1036 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1037 * previously configured elements list.
1039 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1042 * pointer to the cooky - that should be given back in the next call to make
1043 * function handle the next element. If *ppos is set to NULL it will restart the
1044 * iterator. If returned *ppos == NULL this means that the last element has been
1048 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1049 struct bnx2x_vlan_mac_ramrod_params *p,
1050 struct bnx2x_vlan_mac_registry_elem **ppos)
1052 struct bnx2x_vlan_mac_registry_elem *pos;
1053 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1055 /* If list is empty - there is nothing to do here */
1056 if (list_empty(&o->head)) {
1061 /* make a step... */
1063 *ppos = list_first_entry(&o->head,
1064 struct bnx2x_vlan_mac_registry_elem,
1067 *ppos = list_next_entry(*ppos, link);
1071 /* If it's the last step - return NULL */
1072 if (list_is_last(&pos->link, &o->head))
1075 /* Prepare a 'user_req' */
1076 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1078 /* Set the command */
1079 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1081 /* Set vlan_mac_flags */
1082 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1084 /* Set a restore bit */
1085 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1087 return bnx2x_config_vlan_mac(bp, p);
1091 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1092 * pointer to an element with a specific criteria and NULL if such an element
1093 * hasn't been found.
1095 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1096 struct bnx2x_exe_queue_obj *o,
1097 struct bnx2x_exeq_elem *elem)
1099 struct bnx2x_exeq_elem *pos;
1100 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1102 /* Check pending for execution commands */
1103 list_for_each_entry(pos, &o->exe_queue, link)
1104 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1106 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1112 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1113 struct bnx2x_exe_queue_obj *o,
1114 struct bnx2x_exeq_elem *elem)
1116 struct bnx2x_exeq_elem *pos;
1117 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1119 /* Check pending for execution commands */
1120 list_for_each_entry(pos, &o->exe_queue, link)
1121 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1123 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1129 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1130 struct bnx2x_exe_queue_obj *o,
1131 struct bnx2x_exeq_elem *elem)
1133 struct bnx2x_exeq_elem *pos;
1134 struct bnx2x_vlan_mac_ramrod_data *data =
1135 &elem->cmd_data.vlan_mac.u.vlan_mac;
1137 /* Check pending for execution commands */
1138 list_for_each_entry(pos, &o->exe_queue, link)
1139 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1141 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1148 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1150 * @bp: device handle
1151 * @qo: bnx2x_qable_obj
1152 * @elem: bnx2x_exeq_elem
1154 * Checks that the requested configuration can be added. If yes and if
1155 * requested, consume CAM credit.
1157 * The 'validate' is run after the 'optimize'.
1160 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1161 union bnx2x_qable_obj *qo,
1162 struct bnx2x_exeq_elem *elem)
1164 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1165 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1168 /* Check the registry */
1169 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1171 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1176 * Check if there is a pending ADD command for this
1177 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1179 if (exeq->get(exeq, elem)) {
1180 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1185 * TODO: Check the pending MOVE from other objects where this
1186 * object is a destination object.
1189 /* Consume the credit if not requested not to */
1190 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1191 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1199 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1201 * @bp: device handle
1202 * @qo: quable object to check
1203 * @elem: element that needs to be deleted
1205 * Checks that the requested configuration can be deleted. If yes and if
1206 * requested, returns a CAM credit.
1208 * The 'validate' is run after the 'optimize'.
1210 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1211 union bnx2x_qable_obj *qo,
1212 struct bnx2x_exeq_elem *elem)
1214 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1215 struct bnx2x_vlan_mac_registry_elem *pos;
1216 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1217 struct bnx2x_exeq_elem query_elem;
1219 /* If this classification can not be deleted (doesn't exist)
1220 * - return a BNX2X_EXIST.
1222 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1224 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1229 * Check if there are pending DEL or MOVE commands for this
1230 * MAC/VLAN/VLAN-MAC. Return an error if so.
1232 memcpy(&query_elem, elem, sizeof(query_elem));
1234 /* Check for MOVE commands */
1235 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1236 if (exeq->get(exeq, &query_elem)) {
1237 BNX2X_ERR("There is a pending MOVE command already\n");
1241 /* Check for DEL commands */
1242 if (exeq->get(exeq, elem)) {
1243 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1247 /* Return the credit to the credit pool if not requested not to */
1248 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1249 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1250 o->put_credit(o))) {
1251 BNX2X_ERR("Failed to return a credit\n");
1259 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1261 * @bp: device handle
1262 * @qo: quable object to check (source)
1263 * @elem: element that needs to be moved
1265 * Checks that the requested configuration can be moved. If yes and if
1266 * requested, returns a CAM credit.
1268 * The 'validate' is run after the 'optimize'.
1270 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1271 union bnx2x_qable_obj *qo,
1272 struct bnx2x_exeq_elem *elem)
1274 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1275 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1276 struct bnx2x_exeq_elem query_elem;
1277 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1278 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1281 * Check if we can perform this operation based on the current registry
1284 if (!src_o->check_move(bp, src_o, dest_o,
1285 &elem->cmd_data.vlan_mac.u)) {
1286 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1291 * Check if there is an already pending DEL or MOVE command for the
1292 * source object or ADD command for a destination object. Return an
1295 memcpy(&query_elem, elem, sizeof(query_elem));
1297 /* Check DEL on source */
1298 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1299 if (src_exeq->get(src_exeq, &query_elem)) {
1300 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1304 /* Check MOVE on source */
1305 if (src_exeq->get(src_exeq, elem)) {
1306 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1310 /* Check ADD on destination */
1311 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1312 if (dest_exeq->get(dest_exeq, &query_elem)) {
1313 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1317 /* Consume the credit if not requested not to */
1318 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1319 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1320 dest_o->get_credit(dest_o)))
1323 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1324 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1325 src_o->put_credit(src_o))) {
1326 /* return the credit taken from dest... */
1327 dest_o->put_credit(dest_o);
1334 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1335 union bnx2x_qable_obj *qo,
1336 struct bnx2x_exeq_elem *elem)
1338 switch (elem->cmd_data.vlan_mac.cmd) {
1339 case BNX2X_VLAN_MAC_ADD:
1340 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1341 case BNX2X_VLAN_MAC_DEL:
1342 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1343 case BNX2X_VLAN_MAC_MOVE:
1344 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1350 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1351 union bnx2x_qable_obj *qo,
1352 struct bnx2x_exeq_elem *elem)
1356 /* If consumption wasn't required, nothing to do */
1357 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1358 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1361 switch (elem->cmd_data.vlan_mac.cmd) {
1362 case BNX2X_VLAN_MAC_ADD:
1363 case BNX2X_VLAN_MAC_MOVE:
1364 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1366 case BNX2X_VLAN_MAC_DEL:
1367 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1380 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1382 * @bp: device handle
1383 * @o: bnx2x_vlan_mac_obj
1386 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1387 struct bnx2x_vlan_mac_obj *o)
1390 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1391 struct bnx2x_raw_obj *raw = &o->raw;
1394 /* Wait for the current command to complete */
1395 rc = raw->wait_comp(bp, raw);
1399 /* Wait until there are no pending commands */
1400 if (!bnx2x_exe_queue_empty(exeq))
1401 usleep_range(1000, 2000);
1410 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1412 * @bp: device handle
1413 * @o: bnx2x_vlan_mac_obj
1415 * @cont: if true schedule next execution chunk
1418 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1419 struct bnx2x_vlan_mac_obj *o,
1420 union event_ring_elem *cqe,
1421 unsigned long *ramrod_flags)
1423 struct bnx2x_raw_obj *r = &o->raw;
1426 /* Reset pending list */
1427 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1430 r->clear_pending(r);
1432 /* If ramrod failed this is most likely a SW bug */
1433 if (cqe->message.error)
1436 /* Run the next bulk of pending commands if requested */
1437 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1438 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1443 /* If there is more work to do return PENDING */
1444 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1451 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1453 * @bp: device handle
1454 * @o: bnx2x_qable_obj
1455 * @elem: bnx2x_exeq_elem
1457 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1458 union bnx2x_qable_obj *qo,
1459 struct bnx2x_exeq_elem *elem)
1461 struct bnx2x_exeq_elem query, *pos;
1462 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1463 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1465 memcpy(&query, elem, sizeof(query));
1467 switch (elem->cmd_data.vlan_mac.cmd) {
1468 case BNX2X_VLAN_MAC_ADD:
1469 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1471 case BNX2X_VLAN_MAC_DEL:
1472 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1475 /* Don't handle anything other than ADD or DEL */
1479 /* If we found the appropriate element - delete it */
1480 pos = exeq->get(exeq, &query);
1483 /* Return the credit of the optimized command */
1484 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1485 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1486 if ((query.cmd_data.vlan_mac.cmd ==
1487 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1488 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1490 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1491 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1496 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1497 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1500 list_del(&pos->link);
1501 bnx2x_exe_queue_free_elem(bp, pos);
1509 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1511 * @bp: device handle
1517 * prepare a registry element according to the current command request.
1519 static inline int bnx2x_vlan_mac_get_registry_elem(
1521 struct bnx2x_vlan_mac_obj *o,
1522 struct bnx2x_exeq_elem *elem,
1524 struct bnx2x_vlan_mac_registry_elem **re)
1526 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1527 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1529 /* Allocate a new registry element if needed. */
1531 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1532 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1536 /* Get a new CAM offset */
1537 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1539 * This shell never happen, because we have checked the
1540 * CAM availiability in the 'validate'.
1547 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1549 /* Set a VLAN-MAC data */
1550 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1551 sizeof(reg_elem->u));
1553 /* Copy the flags (needed for DEL and RESTORE flows) */
1554 reg_elem->vlan_mac_flags =
1555 elem->cmd_data.vlan_mac.vlan_mac_flags;
1556 } else /* DEL, RESTORE */
1557 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1564 * bnx2x_execute_vlan_mac - execute vlan mac command
1566 * @bp: device handle
1571 * go and send a ramrod!
1573 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1574 union bnx2x_qable_obj *qo,
1575 struct list_head *exe_chunk,
1576 unsigned long *ramrod_flags)
1578 struct bnx2x_exeq_elem *elem;
1579 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1580 struct bnx2x_raw_obj *r = &o->raw;
1582 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1583 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1584 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1585 enum bnx2x_vlan_mac_cmd cmd;
1588 * If DRIVER_ONLY execution is requested, cleanup a registry
1589 * and exit. Otherwise send a ramrod to FW.
1592 WARN_ON(r->check_pending(r));
1597 /* Fill tha ramrod data */
1598 list_for_each_entry(elem, exe_chunk, link) {
1599 cmd = elem->cmd_data.vlan_mac.cmd;
1601 * We will add to the target object in MOVE command, so
1602 * change the object for a CAM search.
1604 if (cmd == BNX2X_VLAN_MAC_MOVE)
1605 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1609 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1617 /* Push a new entry into the registry */
1619 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1620 (cmd == BNX2X_VLAN_MAC_MOVE)))
1621 list_add(®_elem->link, &cam_obj->head);
1623 /* Configure a single command in a ramrod data buffer */
1624 o->set_one_rule(bp, o, elem, idx,
1625 reg_elem->cam_offset);
1627 /* MOVE command consumes 2 entries in the ramrod data */
1628 if (cmd == BNX2X_VLAN_MAC_MOVE)
1635 * No need for an explicit memory barrier here as long we would
1636 * need to ensure the ordering of writing to the SPQ element
1637 * and updating of the SPQ producer which involves a memory
1638 * read and we will have to put a full memory barrier there
1639 * (inside bnx2x_sp_post()).
1642 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1643 U64_HI(r->rdata_mapping),
1644 U64_LO(r->rdata_mapping),
1645 ETH_CONNECTION_TYPE);
1650 /* Now, when we are done with the ramrod - clean up the registry */
1651 list_for_each_entry(elem, exe_chunk, link) {
1652 cmd = elem->cmd_data.vlan_mac.cmd;
1653 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1654 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1655 reg_elem = o->check_del(bp, o,
1656 &elem->cmd_data.vlan_mac.u);
1660 o->put_cam_offset(o, reg_elem->cam_offset);
1661 list_del(®_elem->link);
1672 r->clear_pending(r);
1674 /* Cleanup a registry in case of a failure */
1675 list_for_each_entry(elem, exe_chunk, link) {
1676 cmd = elem->cmd_data.vlan_mac.cmd;
1678 if (cmd == BNX2X_VLAN_MAC_MOVE)
1679 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1683 /* Delete all newly added above entries */
1685 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1686 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1687 reg_elem = o->check_del(bp, cam_obj,
1688 &elem->cmd_data.vlan_mac.u);
1690 list_del(®_elem->link);
1699 static inline int bnx2x_vlan_mac_push_new_cmd(
1701 struct bnx2x_vlan_mac_ramrod_params *p)
1703 struct bnx2x_exeq_elem *elem;
1704 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1705 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1707 /* Allocate the execution queue element */
1708 elem = bnx2x_exe_queue_alloc_elem(bp);
1712 /* Set the command 'length' */
1713 switch (p->user_req.cmd) {
1714 case BNX2X_VLAN_MAC_MOVE:
1721 /* Fill the object specific info */
1722 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1724 /* Try to add a new command to the pending list */
1725 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1729 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1731 * @bp: device handle
1735 int bnx2x_config_vlan_mac(
1737 struct bnx2x_vlan_mac_ramrod_params *p)
1740 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1741 unsigned long *ramrod_flags = &p->ramrod_flags;
1742 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1743 struct bnx2x_raw_obj *raw = &o->raw;
1746 * Add new elements to the execution list for commands that require it.
1749 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1755 * If nothing will be executed further in this iteration we want to
1756 * return PENDING if there are pending commands
1758 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1761 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1762 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1763 raw->clear_pending(raw);
1766 /* Execute commands if required */
1767 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1768 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1769 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1775 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1776 * then user want to wait until the last command is done.
1778 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1780 * Wait maximum for the current exe_queue length iterations plus
1781 * one (for the current pending command).
1783 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1785 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1788 /* Wait for the current command to complete */
1789 rc = raw->wait_comp(bp, raw);
1793 /* Make a next step */
1794 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1809 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1811 * @bp: device handle
1814 * @ramrod_flags: execution flags to be used for this deletion
1816 * if the last operation has completed successfully and there are no
1817 * moreelements left, positive value if the last operation has completed
1818 * successfully and there are more previously configured elements, negative
1819 * value is current operation has failed.
1821 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1822 struct bnx2x_vlan_mac_obj *o,
1823 unsigned long *vlan_mac_flags,
1824 unsigned long *ramrod_flags)
1826 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1828 struct bnx2x_vlan_mac_ramrod_params p;
1829 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1830 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1832 /* Clear pending commands first */
1834 spin_lock_bh(&exeq->lock);
1836 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1837 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1839 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1841 BNX2X_ERR("Failed to remove command\n");
1842 spin_unlock_bh(&exeq->lock);
1845 list_del(&exeq_pos->link);
1846 bnx2x_exe_queue_free_elem(bp, exeq_pos);
1850 spin_unlock_bh(&exeq->lock);
1852 /* Prepare a command request */
1853 memset(&p, 0, sizeof(p));
1855 p.ramrod_flags = *ramrod_flags;
1856 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1859 * Add all but the last VLAN-MAC to the execution queue without actually
1860 * execution anything.
1862 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1863 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1864 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1866 list_for_each_entry(pos, &o->head, link) {
1867 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1868 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1869 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1870 rc = bnx2x_config_vlan_mac(bp, &p);
1872 BNX2X_ERR("Failed to add a new DEL command\n");
1878 p.ramrod_flags = *ramrod_flags;
1879 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1881 return bnx2x_config_vlan_mac(bp, &p);
1884 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1885 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1886 unsigned long *pstate, bnx2x_obj_type type)
1888 raw->func_id = func_id;
1892 raw->rdata_mapping = rdata_mapping;
1894 raw->pstate = pstate;
1895 raw->obj_type = type;
1896 raw->check_pending = bnx2x_raw_check_pending;
1897 raw->clear_pending = bnx2x_raw_clear_pending;
1898 raw->set_pending = bnx2x_raw_set_pending;
1899 raw->wait_comp = bnx2x_raw_wait;
1902 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1903 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1904 int state, unsigned long *pstate, bnx2x_obj_type type,
1905 struct bnx2x_credit_pool_obj *macs_pool,
1906 struct bnx2x_credit_pool_obj *vlans_pool)
1908 INIT_LIST_HEAD(&o->head);
1910 o->macs_pool = macs_pool;
1911 o->vlans_pool = vlans_pool;
1913 o->delete_all = bnx2x_vlan_mac_del_all;
1914 o->restore = bnx2x_vlan_mac_restore;
1915 o->complete = bnx2x_complete_vlan_mac;
1916 o->wait = bnx2x_wait_vlan_mac;
1918 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1919 state, pstate, type);
1923 void bnx2x_init_mac_obj(struct bnx2x *bp,
1924 struct bnx2x_vlan_mac_obj *mac_obj,
1925 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1926 dma_addr_t rdata_mapping, int state,
1927 unsigned long *pstate, bnx2x_obj_type type,
1928 struct bnx2x_credit_pool_obj *macs_pool)
1930 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1932 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1933 rdata_mapping, state, pstate, type,
1936 /* CAM credit pool handling */
1937 mac_obj->get_credit = bnx2x_get_credit_mac;
1938 mac_obj->put_credit = bnx2x_put_credit_mac;
1939 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1940 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1942 if (CHIP_IS_E1x(bp)) {
1943 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1944 mac_obj->check_del = bnx2x_check_mac_del;
1945 mac_obj->check_add = bnx2x_check_mac_add;
1946 mac_obj->check_move = bnx2x_check_move_always_err;
1947 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1950 bnx2x_exe_queue_init(bp,
1951 &mac_obj->exe_queue, 1, qable_obj,
1952 bnx2x_validate_vlan_mac,
1953 bnx2x_remove_vlan_mac,
1954 bnx2x_optimize_vlan_mac,
1955 bnx2x_execute_vlan_mac,
1956 bnx2x_exeq_get_mac);
1958 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1959 mac_obj->check_del = bnx2x_check_mac_del;
1960 mac_obj->check_add = bnx2x_check_mac_add;
1961 mac_obj->check_move = bnx2x_check_move;
1962 mac_obj->ramrod_cmd =
1963 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1964 mac_obj->get_n_elements = bnx2x_get_n_elements;
1967 bnx2x_exe_queue_init(bp,
1968 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1969 qable_obj, bnx2x_validate_vlan_mac,
1970 bnx2x_remove_vlan_mac,
1971 bnx2x_optimize_vlan_mac,
1972 bnx2x_execute_vlan_mac,
1973 bnx2x_exeq_get_mac);
1977 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1978 struct bnx2x_vlan_mac_obj *vlan_obj,
1979 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1980 dma_addr_t rdata_mapping, int state,
1981 unsigned long *pstate, bnx2x_obj_type type,
1982 struct bnx2x_credit_pool_obj *vlans_pool)
1984 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1986 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1987 rdata_mapping, state, pstate, type, NULL,
1990 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1991 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1992 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1993 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1995 if (CHIP_IS_E1x(bp)) {
1996 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1999 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2000 vlan_obj->check_del = bnx2x_check_vlan_del;
2001 vlan_obj->check_add = bnx2x_check_vlan_add;
2002 vlan_obj->check_move = bnx2x_check_move;
2003 vlan_obj->ramrod_cmd =
2004 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2005 vlan_obj->get_n_elements = bnx2x_get_n_elements;
2008 bnx2x_exe_queue_init(bp,
2009 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2010 qable_obj, bnx2x_validate_vlan_mac,
2011 bnx2x_remove_vlan_mac,
2012 bnx2x_optimize_vlan_mac,
2013 bnx2x_execute_vlan_mac,
2014 bnx2x_exeq_get_vlan);
2018 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2019 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2020 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2021 dma_addr_t rdata_mapping, int state,
2022 unsigned long *pstate, bnx2x_obj_type type,
2023 struct bnx2x_credit_pool_obj *macs_pool,
2024 struct bnx2x_credit_pool_obj *vlans_pool)
2026 union bnx2x_qable_obj *qable_obj =
2027 (union bnx2x_qable_obj *)vlan_mac_obj;
2029 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2030 rdata_mapping, state, pstate, type,
2031 macs_pool, vlans_pool);
2033 /* CAM pool handling */
2034 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2035 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2037 * CAM offset is relevant for 57710 and 57711 chips only which have a
2038 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2039 * will be taken from MACs' pool object only.
2041 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2042 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2044 if (CHIP_IS_E1(bp)) {
2045 BNX2X_ERR("Do not support chips others than E2\n");
2047 } else if (CHIP_IS_E1H(bp)) {
2048 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2049 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2050 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2051 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2052 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2055 bnx2x_exe_queue_init(bp,
2056 &vlan_mac_obj->exe_queue, 1, qable_obj,
2057 bnx2x_validate_vlan_mac,
2058 bnx2x_remove_vlan_mac,
2059 bnx2x_optimize_vlan_mac,
2060 bnx2x_execute_vlan_mac,
2061 bnx2x_exeq_get_vlan_mac);
2063 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2064 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2065 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2066 vlan_mac_obj->check_move = bnx2x_check_move;
2067 vlan_mac_obj->ramrod_cmd =
2068 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2071 bnx2x_exe_queue_init(bp,
2072 &vlan_mac_obj->exe_queue,
2073 CLASSIFY_RULES_COUNT,
2074 qable_obj, bnx2x_validate_vlan_mac,
2075 bnx2x_remove_vlan_mac,
2076 bnx2x_optimize_vlan_mac,
2077 bnx2x_execute_vlan_mac,
2078 bnx2x_exeq_get_vlan_mac);
2083 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2084 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2085 struct tstorm_eth_mac_filter_config *mac_filters,
2088 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2090 u32 addr = BAR_TSTRORM_INTMEM +
2091 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2093 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2096 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2097 struct bnx2x_rx_mode_ramrod_params *p)
2099 /* update the bp MAC filter structure */
2100 u32 mask = (1 << p->cl_id);
2102 struct tstorm_eth_mac_filter_config *mac_filters =
2103 (struct tstorm_eth_mac_filter_config *)p->rdata;
2105 /* initial seeting is drop-all */
2106 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2107 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2108 u8 unmatched_unicast = 0;
2110 /* In e1x there we only take into account rx acceot flag since tx switching
2112 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2113 /* accept matched ucast */
2116 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2117 /* accept matched mcast */
2120 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2121 /* accept all mcast */
2125 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2126 /* accept all mcast */
2130 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2131 /* accept (all) bcast */
2133 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2134 /* accept unmatched unicasts */
2135 unmatched_unicast = 1;
2137 mac_filters->ucast_drop_all = drop_all_ucast ?
2138 mac_filters->ucast_drop_all | mask :
2139 mac_filters->ucast_drop_all & ~mask;
2141 mac_filters->mcast_drop_all = drop_all_mcast ?
2142 mac_filters->mcast_drop_all | mask :
2143 mac_filters->mcast_drop_all & ~mask;
2145 mac_filters->ucast_accept_all = accp_all_ucast ?
2146 mac_filters->ucast_accept_all | mask :
2147 mac_filters->ucast_accept_all & ~mask;
2149 mac_filters->mcast_accept_all = accp_all_mcast ?
2150 mac_filters->mcast_accept_all | mask :
2151 mac_filters->mcast_accept_all & ~mask;
2153 mac_filters->bcast_accept_all = accp_all_bcast ?
2154 mac_filters->bcast_accept_all | mask :
2155 mac_filters->bcast_accept_all & ~mask;
2157 mac_filters->unmatched_unicast = unmatched_unicast ?
2158 mac_filters->unmatched_unicast | mask :
2159 mac_filters->unmatched_unicast & ~mask;
2161 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2162 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2163 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2164 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2165 mac_filters->bcast_accept_all);
2167 /* write the MAC filter structure*/
2168 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2170 /* The operation is completed */
2171 clear_bit(p->state, p->pstate);
2172 smp_mb__after_clear_bit();
2177 /* Setup ramrod data */
2178 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2179 struct eth_classify_header *hdr,
2182 hdr->echo = cpu_to_le32(cid);
2183 hdr->rule_cnt = rule_cnt;
2186 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2187 unsigned long *accept_flags,
2188 struct eth_filter_rules_cmd *cmd,
2189 bool clear_accept_all)
2193 /* start with 'drop-all' */
2194 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2195 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2197 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2198 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2200 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2201 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2203 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2204 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2205 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2208 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2209 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2210 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2213 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2214 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2216 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2217 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2218 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2221 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2222 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2224 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2225 if (clear_accept_all) {
2226 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2227 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2228 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2229 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2232 cmd->state = cpu_to_le16(state);
2236 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2237 struct bnx2x_rx_mode_ramrod_params *p)
2239 struct eth_filter_rules_ramrod_data *data = p->rdata;
2243 /* Reset the ramrod data buffer */
2244 memset(data, 0, sizeof(*data));
2246 /* Setup ramrod data */
2248 /* Tx (internal switching) */
2249 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2250 data->rules[rule_idx].client_id = p->cl_id;
2251 data->rules[rule_idx].func_id = p->func_id;
2253 data->rules[rule_idx].cmd_general_data =
2254 ETH_FILTER_RULES_CMD_TX_CMD;
2256 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2257 &(data->rules[rule_idx++]),
2262 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2263 data->rules[rule_idx].client_id = p->cl_id;
2264 data->rules[rule_idx].func_id = p->func_id;
2266 data->rules[rule_idx].cmd_general_data =
2267 ETH_FILTER_RULES_CMD_RX_CMD;
2269 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2270 &(data->rules[rule_idx++]),
2276 * If FCoE Queue configuration has been requested configure the Rx and
2277 * internal switching modes for this queue in separate rules.
2279 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2280 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2282 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2283 /* Tx (internal switching) */
2284 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2285 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2286 data->rules[rule_idx].func_id = p->func_id;
2288 data->rules[rule_idx].cmd_general_data =
2289 ETH_FILTER_RULES_CMD_TX_CMD;
2291 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2292 &(data->rules[rule_idx]),
2298 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2299 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2300 data->rules[rule_idx].func_id = p->func_id;
2302 data->rules[rule_idx].cmd_general_data =
2303 ETH_FILTER_RULES_CMD_RX_CMD;
2305 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2306 &(data->rules[rule_idx]),
2313 * Set the ramrod header (most importantly - number of rules to
2316 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2318 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2319 data->header.rule_cnt, p->rx_accept_flags,
2320 p->tx_accept_flags);
2323 * No need for an explicit memory barrier here as long we would
2324 * need to ensure the ordering of writing to the SPQ element
2325 * and updating of the SPQ producer which involves a memory
2326 * read and we will have to put a full memory barrier there
2327 * (inside bnx2x_sp_post()).
2331 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2332 U64_HI(p->rdata_mapping),
2333 U64_LO(p->rdata_mapping),
2334 ETH_CONNECTION_TYPE);
2338 /* Ramrod completion is pending */
2342 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2343 struct bnx2x_rx_mode_ramrod_params *p)
2345 return bnx2x_state_wait(bp, p->state, p->pstate);
2348 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2349 struct bnx2x_rx_mode_ramrod_params *p)
2355 int bnx2x_config_rx_mode(struct bnx2x *bp,
2356 struct bnx2x_rx_mode_ramrod_params *p)
2360 /* Configure the new classification in the chip */
2361 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2365 /* Wait for a ramrod completion if was requested */
2366 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2367 rc = p->rx_mode_obj->wait_comp(bp, p);
2375 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2376 struct bnx2x_rx_mode_obj *o)
2378 if (CHIP_IS_E1x(bp)) {
2379 o->wait_comp = bnx2x_empty_rx_mode_wait;
2380 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2382 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2383 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2387 /********************* Multicast verbs: SET, CLEAR ****************************/
2388 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2390 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2393 struct bnx2x_mcast_mac_elem {
2394 struct list_head link;
2396 u8 pad[2]; /* For a natural alignment of the following buffer */
2399 struct bnx2x_pending_mcast_cmd {
2400 struct list_head link;
2401 int type; /* BNX2X_MCAST_CMD_X */
2403 struct list_head macs_head;
2404 u32 macs_num; /* Needed for DEL command */
2405 int next_bin; /* Needed for RESTORE flow with aprox match */
2408 bool done; /* set to true, when the command has been handled,
2409 * practically used in 57712 handling only, where one pending
2410 * command may be handled in a few operations. As long as for
2411 * other chips every operation handling is completed in a
2412 * single ramrod, there is no need to utilize this field.
2416 static int bnx2x_mcast_wait(struct bnx2x *bp,
2417 struct bnx2x_mcast_obj *o)
2419 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2420 o->raw.wait_comp(bp, &o->raw))
2426 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2427 struct bnx2x_mcast_obj *o,
2428 struct bnx2x_mcast_ramrod_params *p,
2429 enum bnx2x_mcast_cmd cmd)
2432 struct bnx2x_pending_mcast_cmd *new_cmd;
2433 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2434 struct bnx2x_mcast_list_elem *pos;
2435 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2436 p->mcast_list_len : 0);
2438 /* If the command is empty ("handle pending commands only"), break */
2439 if (!p->mcast_list_len)
2442 total_sz = sizeof(*new_cmd) +
2443 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2445 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2446 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2451 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2452 cmd, macs_list_len);
2454 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2456 new_cmd->type = cmd;
2457 new_cmd->done = false;
2460 case BNX2X_MCAST_CMD_ADD:
2461 cur_mac = (struct bnx2x_mcast_mac_elem *)
2462 ((u8 *)new_cmd + sizeof(*new_cmd));
2464 /* Push the MACs of the current command into the pendig command
2467 list_for_each_entry(pos, &p->mcast_list, link) {
2468 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2469 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2475 case BNX2X_MCAST_CMD_DEL:
2476 new_cmd->data.macs_num = p->mcast_list_len;
2479 case BNX2X_MCAST_CMD_RESTORE:
2480 new_cmd->data.next_bin = 0;
2485 BNX2X_ERR("Unknown command: %d\n", cmd);
2489 /* Push the new pending command to the tail of the pending list: FIFO */
2490 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2498 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2501 * @last: index to start looking from (including)
2503 * returns the next found (set) bin or a negative value if none is found.
2505 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2507 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2509 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2510 if (o->registry.aprox_match.vec[i])
2511 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2512 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2513 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2526 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2530 * returns the index of the found bin or -1 if none is found
2532 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2534 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2537 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2542 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2544 struct bnx2x_raw_obj *raw = &o->raw;
2547 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2548 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2549 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2551 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2552 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2553 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2558 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2559 struct bnx2x_mcast_obj *o, int idx,
2560 union bnx2x_mcast_config_data *cfg_data,
2561 enum bnx2x_mcast_cmd cmd)
2563 struct bnx2x_raw_obj *r = &o->raw;
2564 struct eth_multicast_rules_ramrod_data *data =
2565 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2566 u8 func_id = r->func_id;
2567 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2570 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2571 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2573 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2575 /* Get a bin and update a bins' vector */
2577 case BNX2X_MCAST_CMD_ADD:
2578 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2579 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2582 case BNX2X_MCAST_CMD_DEL:
2583 /* If there were no more bins to clear
2584 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2585 * clear any (0xff) bin.
2586 * See bnx2x_mcast_validate_e2() for explanation when it may
2589 bin = bnx2x_mcast_clear_first_bin(o);
2592 case BNX2X_MCAST_CMD_RESTORE:
2593 bin = cfg_data->bin;
2597 BNX2X_ERR("Unknown command: %d\n", cmd);
2601 DP(BNX2X_MSG_SP, "%s bin %d\n",
2602 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2603 "Setting" : "Clearing"), bin);
2605 data->rules[idx].bin_id = (u8)bin;
2606 data->rules[idx].func_id = func_id;
2607 data->rules[idx].engine_id = o->engine_id;
2611 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2613 * @bp: device handle
2615 * @start_bin: index in the registry to start from (including)
2616 * @rdata_idx: index in the ramrod data to start from
2618 * returns last handled bin index or -1 if all bins have been handled
2620 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2621 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2624 int cur_bin, cnt = *rdata_idx;
2625 union bnx2x_mcast_config_data cfg_data = {NULL};
2627 /* go through the registry and configure the bins from it */
2628 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2629 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2631 cfg_data.bin = (u8)cur_bin;
2632 o->set_one_rule(bp, o, cnt, &cfg_data,
2633 BNX2X_MCAST_CMD_RESTORE);
2637 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2639 /* Break if we reached the maximum number
2642 if (cnt >= o->max_cmd_len)
2651 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2652 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2655 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2656 int cnt = *line_idx;
2657 union bnx2x_mcast_config_data cfg_data = {NULL};
2659 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2662 cfg_data.mac = &pmac_pos->mac[0];
2663 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2667 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2670 list_del(&pmac_pos->link);
2672 /* Break if we reached the maximum number
2675 if (cnt >= o->max_cmd_len)
2681 /* if no more MACs to configure - we are done */
2682 if (list_empty(&cmd_pos->data.macs_head))
2683 cmd_pos->done = true;
2686 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2687 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2690 int cnt = *line_idx;
2692 while (cmd_pos->data.macs_num) {
2693 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2697 cmd_pos->data.macs_num--;
2699 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2700 cmd_pos->data.macs_num, cnt);
2702 /* Break if we reached the maximum
2705 if (cnt >= o->max_cmd_len)
2711 /* If we cleared all bins - we are done */
2712 if (!cmd_pos->data.macs_num)
2713 cmd_pos->done = true;
2716 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2717 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2720 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2723 if (cmd_pos->data.next_bin < 0)
2724 /* If o->set_restore returned -1 we are done */
2725 cmd_pos->done = true;
2727 /* Start from the next bin next time */
2728 cmd_pos->data.next_bin++;
2731 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2732 struct bnx2x_mcast_ramrod_params *p)
2734 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2736 struct bnx2x_mcast_obj *o = p->mcast_obj;
2738 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2740 switch (cmd_pos->type) {
2741 case BNX2X_MCAST_CMD_ADD:
2742 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2745 case BNX2X_MCAST_CMD_DEL:
2746 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2749 case BNX2X_MCAST_CMD_RESTORE:
2750 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2755 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2759 /* If the command has been completed - remove it from the list
2760 * and free the memory
2762 if (cmd_pos->done) {
2763 list_del(&cmd_pos->link);
2767 /* Break if we reached the maximum number of rules */
2768 if (cnt >= o->max_cmd_len)
2775 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2776 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2779 struct bnx2x_mcast_list_elem *mlist_pos;
2780 union bnx2x_mcast_config_data cfg_data = {NULL};
2781 int cnt = *line_idx;
2783 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2784 cfg_data.mac = mlist_pos->mac;
2785 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2789 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2796 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2797 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2800 int cnt = *line_idx, i;
2802 for (i = 0; i < p->mcast_list_len; i++) {
2803 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2807 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2808 p->mcast_list_len - i - 1);
2815 * bnx2x_mcast_handle_current_cmd -
2817 * @bp: device handle
2820 * @start_cnt: first line in the ramrod data that may be used
2822 * This function is called iff there is enough place for the current command in
2824 * Returns number of lines filled in the ramrod data in total.
2826 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2827 struct bnx2x_mcast_ramrod_params *p,
2828 enum bnx2x_mcast_cmd cmd,
2831 struct bnx2x_mcast_obj *o = p->mcast_obj;
2832 int cnt = start_cnt;
2834 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2837 case BNX2X_MCAST_CMD_ADD:
2838 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2841 case BNX2X_MCAST_CMD_DEL:
2842 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2845 case BNX2X_MCAST_CMD_RESTORE:
2846 o->hdl_restore(bp, o, 0, &cnt);
2850 BNX2X_ERR("Unknown command: %d\n", cmd);
2854 /* The current command has been handled */
2855 p->mcast_list_len = 0;
2860 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2861 struct bnx2x_mcast_ramrod_params *p,
2862 enum bnx2x_mcast_cmd cmd)
2864 struct bnx2x_mcast_obj *o = p->mcast_obj;
2865 int reg_sz = o->get_registry_size(o);
2868 /* DEL command deletes all currently configured MACs */
2869 case BNX2X_MCAST_CMD_DEL:
2870 o->set_registry_size(o, 0);
2873 /* RESTORE command will restore the entire multicast configuration */
2874 case BNX2X_MCAST_CMD_RESTORE:
2875 /* Here we set the approximate amount of work to do, which in
2876 * fact may be only less as some MACs in postponed ADD
2877 * command(s) scheduled before this command may fall into
2878 * the same bin and the actual number of bins set in the
2879 * registry would be less than we estimated here. See
2880 * bnx2x_mcast_set_one_rule_e2() for further details.
2882 p->mcast_list_len = reg_sz;
2885 case BNX2X_MCAST_CMD_ADD:
2886 case BNX2X_MCAST_CMD_CONT:
2887 /* Here we assume that all new MACs will fall into new bins.
2888 * However we will correct the real registry size after we
2889 * handle all pending commands.
2891 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2895 BNX2X_ERR("Unknown command: %d\n", cmd);
2900 /* Increase the total number of MACs pending to be configured */
2901 o->total_pending_num += p->mcast_list_len;
2906 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2907 struct bnx2x_mcast_ramrod_params *p,
2910 struct bnx2x_mcast_obj *o = p->mcast_obj;
2912 o->set_registry_size(o, old_num_bins);
2913 o->total_pending_num -= p->mcast_list_len;
2917 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2919 * @bp: device handle
2921 * @len: number of rules to handle
2923 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2924 struct bnx2x_mcast_ramrod_params *p,
2927 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2928 struct eth_multicast_rules_ramrod_data *data =
2929 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2931 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2932 (BNX2X_FILTER_MCAST_PENDING <<
2933 BNX2X_SWCID_SHIFT));
2934 data->header.rule_cnt = len;
2938 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2940 * @bp: device handle
2943 * Recalculate the actual number of set bins in the registry using Brian
2944 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2946 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2948 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2949 struct bnx2x_mcast_obj *o)
2954 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2955 elem = o->registry.aprox_match.vec[i];
2960 o->set_registry_size(o, cnt);
2965 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2966 struct bnx2x_mcast_ramrod_params *p,
2967 enum bnx2x_mcast_cmd cmd)
2969 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2970 struct bnx2x_mcast_obj *o = p->mcast_obj;
2971 struct eth_multicast_rules_ramrod_data *data =
2972 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2975 /* Reset the ramrod data buffer */
2976 memset(data, 0, sizeof(*data));
2978 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2980 /* If there are no more pending commands - clear SCHEDULED state */
2981 if (list_empty(&o->pending_cmds_head))
2984 /* The below may be true iff there was enough room in ramrod
2985 * data for all pending commands and for the current
2986 * command. Otherwise the current command would have been added
2987 * to the pending commands and p->mcast_list_len would have been
2990 if (p->mcast_list_len > 0)
2991 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2993 /* We've pulled out some MACs - update the total number of
2996 o->total_pending_num -= cnt;
2999 WARN_ON(o->total_pending_num < 0);
3000 WARN_ON(cnt > o->max_cmd_len);
3002 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3004 /* Update a registry size if there are no more pending operations.
3006 * We don't want to change the value of the registry size if there are
3007 * pending operations because we want it to always be equal to the
3008 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3009 * set bins after the last requested operation in order to properly
3010 * evaluate the size of the next DEL/RESTORE operation.
3012 * Note that we update the registry itself during command(s) handling
3013 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3014 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3015 * with a limited amount of update commands (per MAC/bin) and we don't
3016 * know in this scope what the actual state of bins configuration is
3017 * going to be after this ramrod.
3019 if (!o->total_pending_num)
3020 bnx2x_mcast_refresh_registry_e2(bp, o);
3023 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3024 * RAMROD_PENDING status immediately.
3026 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3027 raw->clear_pending(raw);
3031 * No need for an explicit memory barrier here as long we would
3032 * need to ensure the ordering of writing to the SPQ element
3033 * and updating of the SPQ producer which involves a memory
3034 * read and we will have to put a full memory barrier there
3035 * (inside bnx2x_sp_post()).
3039 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3040 raw->cid, U64_HI(raw->rdata_mapping),
3041 U64_LO(raw->rdata_mapping),
3042 ETH_CONNECTION_TYPE);
3046 /* Ramrod completion is pending */
3051 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3052 struct bnx2x_mcast_ramrod_params *p,
3053 enum bnx2x_mcast_cmd cmd)
3055 /* Mark, that there is a work to do */
3056 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3057 p->mcast_list_len = 1;
3062 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3063 struct bnx2x_mcast_ramrod_params *p,
3069 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3071 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3074 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3075 struct bnx2x_mcast_obj *o,
3076 struct bnx2x_mcast_ramrod_params *p,
3079 struct bnx2x_mcast_list_elem *mlist_pos;
3082 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3083 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3084 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3086 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3087 mlist_pos->mac, bit);
3089 /* bookkeeping... */
3090 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3095 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3096 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3101 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3103 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3104 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3105 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3109 /* On 57711 we write the multicast MACs' aproximate match
3110 * table by directly into the TSTORM's internal RAM. So we don't
3111 * really need to handle any tricks to make it work.
3113 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3114 struct bnx2x_mcast_ramrod_params *p,
3115 enum bnx2x_mcast_cmd cmd)
3118 struct bnx2x_mcast_obj *o = p->mcast_obj;
3119 struct bnx2x_raw_obj *r = &o->raw;
3121 /* If CLEAR_ONLY has been requested - clear the registry
3122 * and clear a pending bit.
3124 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3125 u32 mc_filter[MC_HASH_SIZE] = {0};
3127 /* Set the multicast filter bits before writing it into
3128 * the internal memory.
3131 case BNX2X_MCAST_CMD_ADD:
3132 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3135 case BNX2X_MCAST_CMD_DEL:
3137 "Invalidating multicast MACs configuration\n");
3139 /* clear the registry */
3140 memset(o->registry.aprox_match.vec, 0,
3141 sizeof(o->registry.aprox_match.vec));
3144 case BNX2X_MCAST_CMD_RESTORE:
3145 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3149 BNX2X_ERR("Unknown command: %d\n", cmd);
3153 /* Set the mcast filter in the internal memory */
3154 for (i = 0; i < MC_HASH_SIZE; i++)
3155 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3157 /* clear the registry */
3158 memset(o->registry.aprox_match.vec, 0,
3159 sizeof(o->registry.aprox_match.vec));
3162 r->clear_pending(r);
3167 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3168 struct bnx2x_mcast_ramrod_params *p,
3169 enum bnx2x_mcast_cmd cmd)
3171 struct bnx2x_mcast_obj *o = p->mcast_obj;
3172 int reg_sz = o->get_registry_size(o);
3175 /* DEL command deletes all currently configured MACs */
3176 case BNX2X_MCAST_CMD_DEL:
3177 o->set_registry_size(o, 0);
3180 /* RESTORE command will restore the entire multicast configuration */
3181 case BNX2X_MCAST_CMD_RESTORE:
3182 p->mcast_list_len = reg_sz;
3183 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3184 cmd, p->mcast_list_len);
3187 case BNX2X_MCAST_CMD_ADD:
3188 case BNX2X_MCAST_CMD_CONT:
3189 /* Multicast MACs on 57710 are configured as unicast MACs and
3190 * there is only a limited number of CAM entries for that
3193 if (p->mcast_list_len > o->max_cmd_len) {
3194 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3198 /* Every configured MAC should be cleared if DEL command is
3199 * called. Only the last ADD command is relevant as long as
3200 * every ADD commands overrides the previous configuration.
3202 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3203 if (p->mcast_list_len > 0)
3204 o->set_registry_size(o, p->mcast_list_len);
3209 BNX2X_ERR("Unknown command: %d\n", cmd);
3214 /* We want to ensure that commands are executed one by one for 57710.
3215 * Therefore each none-empty command will consume o->max_cmd_len.
3217 if (p->mcast_list_len)
3218 o->total_pending_num += o->max_cmd_len;
3223 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3224 struct bnx2x_mcast_ramrod_params *p,
3227 struct bnx2x_mcast_obj *o = p->mcast_obj;
3229 o->set_registry_size(o, old_num_macs);
3231 /* If current command hasn't been handled yet and we are
3232 * here means that it's meant to be dropped and we have to
3233 * update the number of outstandling MACs accordingly.
3235 if (p->mcast_list_len)
3236 o->total_pending_num -= o->max_cmd_len;
3239 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3240 struct bnx2x_mcast_obj *o, int idx,
3241 union bnx2x_mcast_config_data *cfg_data,
3242 enum bnx2x_mcast_cmd cmd)
3244 struct bnx2x_raw_obj *r = &o->raw;
3245 struct mac_configuration_cmd *data =
3246 (struct mac_configuration_cmd *)(r->rdata);
3249 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3250 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3251 &data->config_table[idx].middle_mac_addr,
3252 &data->config_table[idx].lsb_mac_addr,
3255 data->config_table[idx].vlan_id = 0;
3256 data->config_table[idx].pf_id = r->func_id;
3257 data->config_table[idx].clients_bit_vector =
3258 cpu_to_le32(1 << r->cl_id);
3260 SET_FLAG(data->config_table[idx].flags,
3261 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3262 T_ETH_MAC_COMMAND_SET);
3267 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3269 * @bp: device handle
3271 * @len: number of rules to handle
3273 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3274 struct bnx2x_mcast_ramrod_params *p,
3277 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3278 struct mac_configuration_cmd *data =
3279 (struct mac_configuration_cmd *)(r->rdata);
3281 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3282 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3283 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3285 data->hdr.offset = offset;
3286 data->hdr.client_id = cpu_to_le16(0xff);
3287 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3288 (BNX2X_FILTER_MCAST_PENDING <<
3289 BNX2X_SWCID_SHIFT));
3290 data->hdr.length = len;
3294 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3296 * @bp: device handle
3298 * @start_idx: index in the registry to start from
3299 * @rdata_idx: index in the ramrod data to start from
3301 * restore command for 57710 is like all other commands - always a stand alone
3302 * command - start_idx and rdata_idx will always be 0. This function will always
3304 * returns -1 to comply with 57712 variant.
3306 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3307 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3310 struct bnx2x_mcast_mac_elem *elem;
3312 union bnx2x_mcast_config_data cfg_data = {NULL};
3314 /* go through the registry and configure the MACs from it. */
3315 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3316 cfg_data.mac = &elem->mac[0];
3317 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3321 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3331 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3332 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3334 struct bnx2x_pending_mcast_cmd *cmd_pos;
3335 struct bnx2x_mcast_mac_elem *pmac_pos;
3336 struct bnx2x_mcast_obj *o = p->mcast_obj;
3337 union bnx2x_mcast_config_data cfg_data = {NULL};
3341 /* If nothing to be done - return */
3342 if (list_empty(&o->pending_cmds_head))
3345 /* Handle the first command */
3346 cmd_pos = list_first_entry(&o->pending_cmds_head,
3347 struct bnx2x_pending_mcast_cmd, link);
3349 switch (cmd_pos->type) {
3350 case BNX2X_MCAST_CMD_ADD:
3351 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3352 cfg_data.mac = &pmac_pos->mac[0];
3353 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3357 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3362 case BNX2X_MCAST_CMD_DEL:
3363 cnt = cmd_pos->data.macs_num;
3364 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3367 case BNX2X_MCAST_CMD_RESTORE:
3368 o->hdl_restore(bp, o, 0, &cnt);
3372 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3376 list_del(&cmd_pos->link);
3383 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3390 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3391 __le16 *fw_lo, u8 *mac)
3393 mac[1] = ((u8 *)fw_hi)[0];
3394 mac[0] = ((u8 *)fw_hi)[1];
3395 mac[3] = ((u8 *)fw_mid)[0];
3396 mac[2] = ((u8 *)fw_mid)[1];
3397 mac[5] = ((u8 *)fw_lo)[0];
3398 mac[4] = ((u8 *)fw_lo)[1];
3402 * bnx2x_mcast_refresh_registry_e1 -
3404 * @bp: device handle
3407 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3408 * and update the registry correspondingly: if ADD - allocate a memory and add
3409 * the entries to the registry (list), if DELETE - clear the registry and free
3412 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3413 struct bnx2x_mcast_obj *o)
3415 struct bnx2x_raw_obj *raw = &o->raw;
3416 struct bnx2x_mcast_mac_elem *elem;
3417 struct mac_configuration_cmd *data =
3418 (struct mac_configuration_cmd *)(raw->rdata);
3420 /* If first entry contains a SET bit - the command was ADD,
3421 * otherwise - DEL_ALL
3423 if (GET_FLAG(data->config_table[0].flags,
3424 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3425 int i, len = data->hdr.length;
3427 /* Break if it was a RESTORE command */
3428 if (!list_empty(&o->registry.exact_match.macs))
3431 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3433 BNX2X_ERR("Failed to allocate registry memory\n");
3437 for (i = 0; i < len; i++, elem++) {
3438 bnx2x_get_fw_mac_addr(
3439 &data->config_table[i].msb_mac_addr,
3440 &data->config_table[i].middle_mac_addr,
3441 &data->config_table[i].lsb_mac_addr,
3443 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3445 list_add_tail(&elem->link,
3446 &o->registry.exact_match.macs);
3449 elem = list_first_entry(&o->registry.exact_match.macs,
3450 struct bnx2x_mcast_mac_elem, link);
3451 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3453 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3459 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3460 struct bnx2x_mcast_ramrod_params *p,
3461 enum bnx2x_mcast_cmd cmd)
3463 struct bnx2x_mcast_obj *o = p->mcast_obj;
3464 struct bnx2x_raw_obj *raw = &o->raw;
3465 struct mac_configuration_cmd *data =
3466 (struct mac_configuration_cmd *)(raw->rdata);
3469 /* Reset the ramrod data buffer */
3470 memset(data, 0, sizeof(*data));
3472 /* First set all entries as invalid */
3473 for (i = 0; i < o->max_cmd_len ; i++)
3474 SET_FLAG(data->config_table[i].flags,
3475 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3476 T_ETH_MAC_COMMAND_INVALIDATE);
3478 /* Handle pending commands first */
3479 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3481 /* If there are no more pending commands - clear SCHEDULED state */
3482 if (list_empty(&o->pending_cmds_head))
3485 /* The below may be true iff there were no pending commands */
3487 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3489 /* For 57710 every command has o->max_cmd_len length to ensure that
3490 * commands are done one at a time.
3492 o->total_pending_num -= o->max_cmd_len;
3496 WARN_ON(cnt > o->max_cmd_len);
3498 /* Set ramrod header (in particular, a number of entries to update) */
3499 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3501 /* update a registry: we need the registry contents to be always up
3502 * to date in order to be able to execute a RESTORE opcode. Here
3503 * we use the fact that for 57710 we sent one command at a time
3504 * hence we may take the registry update out of the command handling
3505 * and do it in a simpler way here.
3507 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3512 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3513 * RAMROD_PENDING status immediately.
3515 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3516 raw->clear_pending(raw);
3520 * No need for an explicit memory barrier here as long we would
3521 * need to ensure the ordering of writing to the SPQ element
3522 * and updating of the SPQ producer which involves a memory
3523 * read and we will have to put a full memory barrier there
3524 * (inside bnx2x_sp_post()).
3528 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3529 U64_HI(raw->rdata_mapping),
3530 U64_LO(raw->rdata_mapping),
3531 ETH_CONNECTION_TYPE);
3535 /* Ramrod completion is pending */
3541 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3543 return o->registry.exact_match.num_macs_set;
3546 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3548 return o->registry.aprox_match.num_bins_set;
3551 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3554 o->registry.exact_match.num_macs_set = n;
3557 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3560 o->registry.aprox_match.num_bins_set = n;
3563 int bnx2x_config_mcast(struct bnx2x *bp,
3564 struct bnx2x_mcast_ramrod_params *p,
3565 enum bnx2x_mcast_cmd cmd)
3567 struct bnx2x_mcast_obj *o = p->mcast_obj;
3568 struct bnx2x_raw_obj *r = &o->raw;
3569 int rc = 0, old_reg_size;
3571 /* This is needed to recover number of currently configured mcast macs
3572 * in case of failure.
3574 old_reg_size = o->get_registry_size(o);
3576 /* Do some calculations and checks */
3577 rc = o->validate(bp, p, cmd);
3581 /* Return if there is no work to do */
3582 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3585 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3586 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3588 /* Enqueue the current command to the pending list if we can't complete
3589 * it in the current iteration
3591 if (r->check_pending(r) ||
3592 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3593 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3597 /* As long as the current command is in a command list we
3598 * don't need to handle it separately.
3600 p->mcast_list_len = 0;
3603 if (!r->check_pending(r)) {
3605 /* Set 'pending' state */
3608 /* Configure the new classification in the chip */
3609 rc = o->config_mcast(bp, p, cmd);
3613 /* Wait for a ramrod completion if was requested */
3614 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3615 rc = o->wait_comp(bp, o);
3621 r->clear_pending(r);
3624 o->revert(bp, p, old_reg_size);
3629 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3631 smp_mb__before_clear_bit();
3632 clear_bit(o->sched_state, o->raw.pstate);
3633 smp_mb__after_clear_bit();
3636 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3638 smp_mb__before_clear_bit();
3639 set_bit(o->sched_state, o->raw.pstate);
3640 smp_mb__after_clear_bit();
3643 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3645 return !!test_bit(o->sched_state, o->raw.pstate);
3648 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3650 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3653 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3654 struct bnx2x_mcast_obj *mcast_obj,
3655 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3656 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3657 int state, unsigned long *pstate, bnx2x_obj_type type)
3659 memset(mcast_obj, 0, sizeof(*mcast_obj));
3661 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3662 rdata, rdata_mapping, state, pstate, type);
3664 mcast_obj->engine_id = engine_id;
3666 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3668 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3669 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3670 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3671 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3673 if (CHIP_IS_E1(bp)) {
3674 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3675 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3676 mcast_obj->hdl_restore =
3677 bnx2x_mcast_handle_restore_cmd_e1;
3678 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3680 if (CHIP_REV_IS_SLOW(bp))
3681 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3683 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3685 mcast_obj->wait_comp = bnx2x_mcast_wait;
3686 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3687 mcast_obj->validate = bnx2x_mcast_validate_e1;
3688 mcast_obj->revert = bnx2x_mcast_revert_e1;
3689 mcast_obj->get_registry_size =
3690 bnx2x_mcast_get_registry_size_exact;
3691 mcast_obj->set_registry_size =
3692 bnx2x_mcast_set_registry_size_exact;
3694 /* 57710 is the only chip that uses the exact match for mcast
3697 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3699 } else if (CHIP_IS_E1H(bp)) {
3700 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3701 mcast_obj->enqueue_cmd = NULL;
3702 mcast_obj->hdl_restore = NULL;
3703 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3705 /* 57711 doesn't send a ramrod, so it has unlimited credit
3708 mcast_obj->max_cmd_len = -1;
3709 mcast_obj->wait_comp = bnx2x_mcast_wait;
3710 mcast_obj->set_one_rule = NULL;
3711 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3712 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3713 mcast_obj->get_registry_size =
3714 bnx2x_mcast_get_registry_size_aprox;
3715 mcast_obj->set_registry_size =
3716 bnx2x_mcast_set_registry_size_aprox;
3718 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3719 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3720 mcast_obj->hdl_restore =
3721 bnx2x_mcast_handle_restore_cmd_e2;
3722 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3723 /* TODO: There should be a proper HSI define for this number!!!
3725 mcast_obj->max_cmd_len = 16;
3726 mcast_obj->wait_comp = bnx2x_mcast_wait;
3727 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3728 mcast_obj->validate = bnx2x_mcast_validate_e2;
3729 mcast_obj->revert = bnx2x_mcast_revert_e2;
3730 mcast_obj->get_registry_size =
3731 bnx2x_mcast_get_registry_size_aprox;
3732 mcast_obj->set_registry_size =
3733 bnx2x_mcast_set_registry_size_aprox;
3737 /*************************** Credit handling **********************************/
3740 * atomic_add_ifless - add if the result is less than a given value.
3742 * @v: pointer of type atomic_t
3743 * @a: the amount to add to v...
3744 * @u: ...if (v + a) is less than u.
3746 * returns true if (v + a) was less than u, and false otherwise.
3749 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3755 if (unlikely(c + a >= u))
3758 old = atomic_cmpxchg((v), c, c + a);
3759 if (likely(old == c))
3768 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3770 * @v: pointer of type atomic_t
3771 * @a: the amount to dec from v...
3772 * @u: ...if (v - a) is more or equal than u.
3774 * returns true if (v - a) was more or equal than u, and false
3777 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3783 if (unlikely(c - a < u))
3786 old = atomic_cmpxchg((v), c, c - a);
3787 if (likely(old == c))
3795 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3800 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3806 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3812 /* Don't let to refill if credit + cnt > pool_sz */
3813 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3820 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3825 cur_credit = atomic_read(&o->credit);
3830 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3837 static bool bnx2x_credit_pool_get_entry(
3838 struct bnx2x_credit_pool_obj *o,
3845 /* Find "internal cam-offset" then add to base for this object... */
3846 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3848 /* Skip the current vector if there are no free entries in it */
3849 if (!o->pool_mirror[vec])
3852 /* If we've got here we are going to find a free entry */
3853 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3854 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3856 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3858 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3859 *offset = o->base_pool_offset + idx;
3867 static bool bnx2x_credit_pool_put_entry(
3868 struct bnx2x_credit_pool_obj *o,
3871 if (offset < o->base_pool_offset)
3874 offset -= o->base_pool_offset;
3876 if (offset >= o->pool_sz)
3879 /* Return the entry to the pool */
3880 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3885 static bool bnx2x_credit_pool_put_entry_always_true(
3886 struct bnx2x_credit_pool_obj *o,
3892 static bool bnx2x_credit_pool_get_entry_always_true(
3893 struct bnx2x_credit_pool_obj *o,
3900 * bnx2x_init_credit_pool - initialize credit pool internals.
3903 * @base: Base entry in the CAM to use.
3904 * @credit: pool size.
3906 * If base is negative no CAM entries handling will be performed.
3907 * If credit is negative pool operations will always succeed (unlimited pool).
3910 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3911 int base, int credit)
3913 /* Zero the object first */
3914 memset(p, 0, sizeof(*p));
3916 /* Set the table to all 1s */
3917 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3919 /* Init a pool as full */
3920 atomic_set(&p->credit, credit);
3922 /* The total poll size */
3923 p->pool_sz = credit;
3925 p->base_pool_offset = base;
3927 /* Commit the change */
3930 p->check = bnx2x_credit_pool_check;
3932 /* if pool credit is negative - disable the checks */
3934 p->put = bnx2x_credit_pool_put;
3935 p->get = bnx2x_credit_pool_get;
3936 p->put_entry = bnx2x_credit_pool_put_entry;
3937 p->get_entry = bnx2x_credit_pool_get_entry;
3939 p->put = bnx2x_credit_pool_always_true;
3940 p->get = bnx2x_credit_pool_always_true;
3941 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3942 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3945 /* If base is negative - disable entries handling */
3947 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3948 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3952 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3953 struct bnx2x_credit_pool_obj *p, u8 func_id,
3956 /* TODO: this will be defined in consts as well... */
3957 #define BNX2X_CAM_SIZE_EMUL 5
3961 if (CHIP_IS_E1(bp)) {
3962 /* In E1, Multicast is saved in cam... */
3963 if (!CHIP_REV_IS_SLOW(bp))
3964 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3966 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3968 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3970 } else if (CHIP_IS_E1H(bp)) {
3971 /* CAM credit is equaly divided between all active functions
3974 if ((func_num > 0)) {
3975 if (!CHIP_REV_IS_SLOW(bp))
3976 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3978 cam_sz = BNX2X_CAM_SIZE_EMUL;
3979 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3981 /* this should never happen! Block MAC operations. */
3982 bnx2x_init_credit_pool(p, 0, 0);
3988 * CAM credit is equaly divided between all active functions
3991 if ((func_num > 0)) {
3992 if (!CHIP_REV_IS_SLOW(bp))
3993 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3995 cam_sz = BNX2X_CAM_SIZE_EMUL;
3998 * No need for CAM entries handling for 57712 and
4001 bnx2x_init_credit_pool(p, -1, cam_sz);
4003 /* this should never happen! Block MAC operations. */
4004 bnx2x_init_credit_pool(p, 0, 0);
4010 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4011 struct bnx2x_credit_pool_obj *p,
4015 if (CHIP_IS_E1x(bp)) {
4017 * There is no VLAN credit in HW on 57710 and 57711 only
4018 * MAC / MAC-VLAN can be set
4020 bnx2x_init_credit_pool(p, 0, -1);
4023 * CAM credit is equaly divided between all active functions
4027 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4028 bnx2x_init_credit_pool(p, func_id * credit, credit);
4030 /* this should never happen! Block VLAN operations. */
4031 bnx2x_init_credit_pool(p, 0, 0);
4035 /****************** RSS Configuration ******************/
4037 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4039 * @bp: driver hanlde
4040 * @p: pointer to rss configuration
4042 * Prints it when NETIF_MSG_IFUP debug level is configured.
4044 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4045 struct bnx2x_config_rss_params *p)
4049 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4050 DP(BNX2X_MSG_SP, "0x0000: ");
4051 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4052 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4054 /* Print 4 bytes in a line */
4055 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4056 (((i + 1) & 0x3) == 0)) {
4057 DP_CONT(BNX2X_MSG_SP, "\n");
4058 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4062 DP_CONT(BNX2X_MSG_SP, "\n");
4066 * bnx2x_setup_rss - configure RSS
4068 * @bp: device handle
4069 * @p: rss configuration
4071 * sends on UPDATE ramrod for that matter.
4073 static int bnx2x_setup_rss(struct bnx2x *bp,
4074 struct bnx2x_config_rss_params *p)
4076 struct bnx2x_rss_config_obj *o = p->rss_obj;
4077 struct bnx2x_raw_obj *r = &o->raw;
4078 struct eth_rss_update_ramrod_data *data =
4079 (struct eth_rss_update_ramrod_data *)(r->rdata);
4083 memset(data, 0, sizeof(*data));
4085 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4087 /* Set an echo field */
4088 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4089 (r->state << BNX2X_SWCID_SHIFT));
4092 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4093 rss_mode = ETH_RSS_MODE_DISABLED;
4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4095 rss_mode = ETH_RSS_MODE_REGULAR;
4097 data->rss_mode = rss_mode;
4099 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4101 /* RSS capabilities */
4102 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4103 data->capabilities |=
4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4106 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4115 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4118 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4119 data->capabilities |=
4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4123 data->capabilities |=
4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4127 data->rss_result_mask = p->rss_result_mask;
4130 data->rss_engine_id = o->engine_id;
4132 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4134 /* Indirection table */
4135 memcpy(data->indirection_table, p->ind_table,
4136 T_ETH_INDIRECTION_TABLE_SIZE);
4138 /* Remember the last configuration */
4139 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4141 /* Print the indirection table */
4142 if (netif_msg_ifup(bp))
4143 bnx2x_debug_print_ind_table(bp, p);
4146 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4147 memcpy(&data->rss_key[0], &p->rss_key[0],
4148 sizeof(data->rss_key));
4149 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4153 * No need for an explicit memory barrier here as long we would
4154 * need to ensure the ordering of writing to the SPQ element
4155 * and updating of the SPQ producer which involves a memory
4156 * read and we will have to put a full memory barrier there
4157 * (inside bnx2x_sp_post()).
4161 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4162 U64_HI(r->rdata_mapping),
4163 U64_LO(r->rdata_mapping),
4164 ETH_CONNECTION_TYPE);
4172 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4175 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4178 int bnx2x_config_rss(struct bnx2x *bp,
4179 struct bnx2x_config_rss_params *p)
4182 struct bnx2x_rss_config_obj *o = p->rss_obj;
4183 struct bnx2x_raw_obj *r = &o->raw;
4185 /* Do nothing if only driver cleanup was requested */
4186 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4191 rc = o->config_rss(bp, p);
4193 r->clear_pending(r);
4197 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4198 rc = r->wait_comp(bp, r);
4204 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4205 struct bnx2x_rss_config_obj *rss_obj,
4206 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4207 void *rdata, dma_addr_t rdata_mapping,
4208 int state, unsigned long *pstate,
4209 bnx2x_obj_type type)
4211 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4212 rdata_mapping, state, pstate, type);
4214 rss_obj->engine_id = engine_id;
4215 rss_obj->config_rss = bnx2x_setup_rss;
4218 /********************** Queue state object ***********************************/
4221 * bnx2x_queue_state_change - perform Queue state change transition
4223 * @bp: device handle
4224 * @params: parameters to perform the transition
4226 * returns 0 in case of successfully completed transition, negative error
4227 * code in case of failure, positive (EBUSY) value if there is a completion
4228 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4229 * not set in params->ramrod_flags for asynchronous commands).
4232 int bnx2x_queue_state_change(struct bnx2x *bp,
4233 struct bnx2x_queue_state_params *params)
4235 struct bnx2x_queue_sp_obj *o = params->q_obj;
4236 int rc, pending_bit;
4237 unsigned long *pending = &o->pending;
4239 /* Check that the requested transition is legal */
4240 rc = o->check_transition(bp, o, params);
4242 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4246 /* Set "pending" bit */
4247 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4248 pending_bit = o->set_pending(o, params);
4249 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4251 /* Don't send a command if only driver cleanup was requested */
4252 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4253 o->complete_cmd(bp, o, pending_bit);
4256 rc = o->send_cmd(bp, params);
4258 o->next_state = BNX2X_Q_STATE_MAX;
4259 clear_bit(pending_bit, pending);
4260 smp_mb__after_clear_bit();
4264 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4265 rc = o->wait_comp(bp, o, pending_bit);
4273 return !!test_bit(pending_bit, pending);
4277 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4278 struct bnx2x_queue_state_params *params)
4280 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4282 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4285 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4286 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4287 bit = BNX2X_Q_CMD_UPDATE;
4291 set_bit(bit, &obj->pending);
4295 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4296 struct bnx2x_queue_sp_obj *o,
4297 enum bnx2x_queue_cmd cmd)
4299 return bnx2x_state_wait(bp, cmd, &o->pending);
4303 * bnx2x_queue_comp_cmd - complete the state change command.
4305 * @bp: device handle
4309 * Checks that the arrived completion is expected.
4311 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4312 struct bnx2x_queue_sp_obj *o,
4313 enum bnx2x_queue_cmd cmd)
4315 unsigned long cur_pending = o->pending;
4317 if (!test_and_clear_bit(cmd, &cur_pending)) {
4318 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4319 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4320 o->state, cur_pending, o->next_state);
4324 if (o->next_tx_only >= o->max_cos)
4325 /* >= becuase tx only must always be smaller than cos since the
4326 * primary connection supports COS 0
4328 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4329 o->next_tx_only, o->max_cos);
4332 "Completing command %d for queue %d, setting state to %d\n",
4333 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4335 if (o->next_tx_only) /* print num tx-only if any exist */
4336 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4337 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4339 o->state = o->next_state;
4340 o->num_tx_only = o->next_tx_only;
4341 o->next_state = BNX2X_Q_STATE_MAX;
4343 /* It's important that o->state and o->next_state are
4344 * updated before o->pending.
4348 clear_bit(cmd, &o->pending);
4349 smp_mb__after_clear_bit();
4354 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4355 struct bnx2x_queue_state_params *cmd_params,
4356 struct client_init_ramrod_data *data)
4358 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4362 /* IPv6 TPA supported for E2 and above only */
4363 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4364 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4367 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4368 struct bnx2x_queue_sp_obj *o,
4369 struct bnx2x_general_setup_params *params,
4370 struct client_init_general_data *gen_data,
4371 unsigned long *flags)
4373 gen_data->client_id = o->cl_id;
4375 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4376 gen_data->statistics_counter_id =
4378 gen_data->statistics_en_flg = 1;
4379 gen_data->statistics_zero_flg =
4380 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4382 gen_data->statistics_counter_id =
4383 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4385 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4386 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4387 gen_data->sp_client_id = params->spcl_id;
4388 gen_data->mtu = cpu_to_le16(params->mtu);
4389 gen_data->func_id = o->func_id;
4392 gen_data->cos = params->cos;
4394 gen_data->traffic_type =
4395 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4396 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4398 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4399 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4402 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4403 struct bnx2x_txq_setup_params *params,
4404 struct client_init_tx_data *tx_data,
4405 unsigned long *flags)
4407 tx_data->enforce_security_flg =
4408 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4409 tx_data->default_vlan =
4410 cpu_to_le16(params->default_vlan);
4411 tx_data->default_vlan_flg =
4412 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4413 tx_data->tx_switching_flg =
4414 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4415 tx_data->anti_spoofing_flg =
4416 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4417 tx_data->force_default_pri_flg =
4418 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4420 tx_data->tx_status_block_id = params->fw_sb_id;
4421 tx_data->tx_sb_index_number = params->sb_cq_index;
4422 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4424 tx_data->tx_bd_page_base.lo =
4425 cpu_to_le32(U64_LO(params->dscr_map));
4426 tx_data->tx_bd_page_base.hi =
4427 cpu_to_le32(U64_HI(params->dscr_map));
4429 /* Don't configure any Tx switching mode during queue SETUP */
4433 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4434 struct rxq_pause_params *params,
4435 struct client_init_rx_data *rx_data)
4437 /* flow control data */
4438 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4439 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4440 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4441 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4442 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4443 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4444 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4447 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4448 struct bnx2x_rxq_setup_params *params,
4449 struct client_init_rx_data *rx_data,
4450 unsigned long *flags)
4452 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4453 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4454 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4455 CLIENT_INIT_RX_DATA_TPA_MODE;
4456 rx_data->vmqueue_mode_en_flg = 0;
4458 rx_data->cache_line_alignment_log_size =
4459 params->cache_line_log;
4460 rx_data->enable_dynamic_hc =
4461 test_bit(BNX2X_Q_FLG_DHC, flags);
4462 rx_data->max_sges_for_packet = params->max_sges_pkt;
4463 rx_data->client_qzone_id = params->cl_qzone_id;
4464 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4466 /* Always start in DROP_ALL mode */
4467 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4468 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4470 /* We don't set drop flags */
4471 rx_data->drop_ip_cs_err_flg = 0;
4472 rx_data->drop_tcp_cs_err_flg = 0;
4473 rx_data->drop_ttl0_flg = 0;
4474 rx_data->drop_udp_cs_err_flg = 0;
4475 rx_data->inner_vlan_removal_enable_flg =
4476 test_bit(BNX2X_Q_FLG_VLAN, flags);
4477 rx_data->outer_vlan_removal_enable_flg =
4478 test_bit(BNX2X_Q_FLG_OV, flags);
4479 rx_data->status_block_id = params->fw_sb_id;
4480 rx_data->rx_sb_index_number = params->sb_cq_index;
4481 rx_data->max_tpa_queues = params->max_tpa_queues;
4482 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4483 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4484 rx_data->bd_page_base.lo =
4485 cpu_to_le32(U64_LO(params->dscr_map));
4486 rx_data->bd_page_base.hi =
4487 cpu_to_le32(U64_HI(params->dscr_map));
4488 rx_data->sge_page_base.lo =
4489 cpu_to_le32(U64_LO(params->sge_map));
4490 rx_data->sge_page_base.hi =
4491 cpu_to_le32(U64_HI(params->sge_map));
4492 rx_data->cqe_page_base.lo =
4493 cpu_to_le32(U64_LO(params->rcq_map));
4494 rx_data->cqe_page_base.hi =
4495 cpu_to_le32(U64_HI(params->rcq_map));
4496 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4498 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4499 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4500 rx_data->is_approx_mcast = 1;
4503 rx_data->rss_engine_id = params->rss_engine_id;
4505 /* silent vlan removal */
4506 rx_data->silent_vlan_removal_flg =
4507 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4508 rx_data->silent_vlan_value =
4509 cpu_to_le16(params->silent_removal_value);
4510 rx_data->silent_vlan_mask =
4511 cpu_to_le16(params->silent_removal_mask);
4515 /* initialize the general, tx and rx parts of a queue object */
4516 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4517 struct bnx2x_queue_state_params *cmd_params,
4518 struct client_init_ramrod_data *data)
4520 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4521 &cmd_params->params.setup.gen_params,
4523 &cmd_params->params.setup.flags);
4525 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4526 &cmd_params->params.setup.txq_params,
4528 &cmd_params->params.setup.flags);
4530 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4531 &cmd_params->params.setup.rxq_params,
4533 &cmd_params->params.setup.flags);
4535 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4536 &cmd_params->params.setup.pause_params,
4540 /* initialize the general and tx parts of a tx-only queue object */
4541 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4542 struct bnx2x_queue_state_params *cmd_params,
4543 struct tx_queue_init_ramrod_data *data)
4545 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4546 &cmd_params->params.tx_only.gen_params,
4548 &cmd_params->params.tx_only.flags);
4550 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4551 &cmd_params->params.tx_only.txq_params,
4553 &cmd_params->params.tx_only.flags);
4555 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4556 cmd_params->q_obj->cids[0],
4557 data->tx.tx_bd_page_base.lo,
4558 data->tx.tx_bd_page_base.hi);
4562 * bnx2x_q_init - init HW/FW queue
4564 * @bp: device handle
4567 * HW/FW initial Queue configuration:
4569 * - CDU context validation
4572 static inline int bnx2x_q_init(struct bnx2x *bp,
4573 struct bnx2x_queue_state_params *params)
4575 struct bnx2x_queue_sp_obj *o = params->q_obj;
4576 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4580 /* Tx HC configuration */
4581 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4582 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4583 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4585 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4586 init->tx.sb_cq_index,
4587 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4591 /* Rx HC configuration */
4592 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4593 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4594 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4596 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4597 init->rx.sb_cq_index,
4598 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4602 /* Set CDU context validation values */
4603 for (cos = 0; cos < o->max_cos; cos++) {
4604 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4606 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4607 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4610 /* As no ramrod is sent, complete the command immediately */
4611 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4619 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4620 struct bnx2x_queue_state_params *params)
4622 struct bnx2x_queue_sp_obj *o = params->q_obj;
4623 struct client_init_ramrod_data *rdata =
4624 (struct client_init_ramrod_data *)o->rdata;
4625 dma_addr_t data_mapping = o->rdata_mapping;
4626 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4628 /* Clear the ramrod data */
4629 memset(rdata, 0, sizeof(*rdata));
4631 /* Fill the ramrod data */
4632 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4635 * No need for an explicit memory barrier here as long we would
4636 * need to ensure the ordering of writing to the SPQ element
4637 * and updating of the SPQ producer which involves a memory
4638 * read and we will have to put a full memory barrier there
4639 * (inside bnx2x_sp_post()).
4642 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4643 U64_HI(data_mapping),
4644 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4647 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4648 struct bnx2x_queue_state_params *params)
4650 struct bnx2x_queue_sp_obj *o = params->q_obj;
4651 struct client_init_ramrod_data *rdata =
4652 (struct client_init_ramrod_data *)o->rdata;
4653 dma_addr_t data_mapping = o->rdata_mapping;
4654 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4656 /* Clear the ramrod data */
4657 memset(rdata, 0, sizeof(*rdata));
4659 /* Fill the ramrod data */
4660 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4661 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4664 * No need for an explicit memory barrier here as long we would
4665 * need to ensure the ordering of writing to the SPQ element
4666 * and updating of the SPQ producer which involves a memory
4667 * read and we will have to put a full memory barrier there
4668 * (inside bnx2x_sp_post()).
4671 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4672 U64_HI(data_mapping),
4673 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4676 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4677 struct bnx2x_queue_state_params *params)
4679 struct bnx2x_queue_sp_obj *o = params->q_obj;
4680 struct tx_queue_init_ramrod_data *rdata =
4681 (struct tx_queue_init_ramrod_data *)o->rdata;
4682 dma_addr_t data_mapping = o->rdata_mapping;
4683 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4684 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4685 ¶ms->params.tx_only;
4686 u8 cid_index = tx_only_params->cid_index;
4689 if (cid_index >= o->max_cos) {
4690 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4691 o->cl_id, cid_index);
4695 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4696 tx_only_params->gen_params.cos,
4697 tx_only_params->gen_params.spcl_id);
4699 /* Clear the ramrod data */
4700 memset(rdata, 0, sizeof(*rdata));
4702 /* Fill the ramrod data */
4703 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4705 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4706 o->cids[cid_index], rdata->general.client_id,
4707 rdata->general.sp_client_id, rdata->general.cos);
4710 * No need for an explicit memory barrier here as long we would
4711 * need to ensure the ordering of writing to the SPQ element
4712 * and updating of the SPQ producer which involves a memory
4713 * read and we will have to put a full memory barrier there
4714 * (inside bnx2x_sp_post()).
4717 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4718 U64_HI(data_mapping),
4719 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4722 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4723 struct bnx2x_queue_sp_obj *obj,
4724 struct bnx2x_queue_update_params *params,
4725 struct client_update_ramrod_data *data)
4727 /* Client ID of the client to update */
4728 data->client_id = obj->cl_id;
4730 /* Function ID of the client to update */
4731 data->func_id = obj->func_id;
4733 /* Default VLAN value */
4734 data->default_vlan = cpu_to_le16(params->def_vlan);
4736 /* Inner VLAN stripping */
4737 data->inner_vlan_removal_enable_flg =
4738 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4739 data->inner_vlan_removal_change_flg =
4740 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4741 ¶ms->update_flags);
4743 /* Outer VLAN sripping */
4744 data->outer_vlan_removal_enable_flg =
4745 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4746 data->outer_vlan_removal_change_flg =
4747 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4748 ¶ms->update_flags);
4750 /* Drop packets that have source MAC that doesn't belong to this
4753 data->anti_spoofing_enable_flg =
4754 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4755 data->anti_spoofing_change_flg =
4756 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4758 /* Activate/Deactivate */
4759 data->activate_flg =
4760 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4761 data->activate_change_flg =
4762 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4764 /* Enable default VLAN */
4765 data->default_vlan_enable_flg =
4766 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4767 data->default_vlan_change_flg =
4768 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4769 ¶ms->update_flags);
4771 /* silent vlan removal */
4772 data->silent_vlan_change_flg =
4773 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4774 ¶ms->update_flags);
4775 data->silent_vlan_removal_flg =
4776 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4777 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4778 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4781 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4782 struct bnx2x_queue_state_params *params)
4784 struct bnx2x_queue_sp_obj *o = params->q_obj;
4785 struct client_update_ramrod_data *rdata =
4786 (struct client_update_ramrod_data *)o->rdata;
4787 dma_addr_t data_mapping = o->rdata_mapping;
4788 struct bnx2x_queue_update_params *update_params =
4789 ¶ms->params.update;
4790 u8 cid_index = update_params->cid_index;
4792 if (cid_index >= o->max_cos) {
4793 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4794 o->cl_id, cid_index);
4799 /* Clear the ramrod data */
4800 memset(rdata, 0, sizeof(*rdata));
4802 /* Fill the ramrod data */
4803 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4806 * No need for an explicit memory barrier here as long we would
4807 * need to ensure the ordering of writing to the SPQ element
4808 * and updating of the SPQ producer which involves a memory
4809 * read and we will have to put a full memory barrier there
4810 * (inside bnx2x_sp_post()).
4813 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4814 o->cids[cid_index], U64_HI(data_mapping),
4815 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4819 * bnx2x_q_send_deactivate - send DEACTIVATE command
4821 * @bp: device handle
4824 * implemented using the UPDATE command.
4826 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4827 struct bnx2x_queue_state_params *params)
4829 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4831 memset(update, 0, sizeof(*update));
4833 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4835 return bnx2x_q_send_update(bp, params);
4839 * bnx2x_q_send_activate - send ACTIVATE command
4841 * @bp: device handle
4844 * implemented using the UPDATE command.
4846 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4847 struct bnx2x_queue_state_params *params)
4849 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4851 memset(update, 0, sizeof(*update));
4853 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4854 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4856 return bnx2x_q_send_update(bp, params);
4859 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4860 struct bnx2x_queue_state_params *params)
4862 /* TODO: Not implemented yet. */
4866 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4867 struct bnx2x_queue_state_params *params)
4869 struct bnx2x_queue_sp_obj *o = params->q_obj;
4871 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4872 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4873 ETH_CONNECTION_TYPE);
4876 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4877 struct bnx2x_queue_state_params *params)
4879 struct bnx2x_queue_sp_obj *o = params->q_obj;
4880 u8 cid_idx = params->params.cfc_del.cid_index;
4882 if (cid_idx >= o->max_cos) {
4883 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4888 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4889 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4892 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4893 struct bnx2x_queue_state_params *params)
4895 struct bnx2x_queue_sp_obj *o = params->q_obj;
4896 u8 cid_index = params->params.terminate.cid_index;
4898 if (cid_index >= o->max_cos) {
4899 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4900 o->cl_id, cid_index);
4904 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4905 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4908 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4909 struct bnx2x_queue_state_params *params)
4911 struct bnx2x_queue_sp_obj *o = params->q_obj;
4913 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4914 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4915 ETH_CONNECTION_TYPE);
4918 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4919 struct bnx2x_queue_state_params *params)
4921 switch (params->cmd) {
4922 case BNX2X_Q_CMD_INIT:
4923 return bnx2x_q_init(bp, params);
4924 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4925 return bnx2x_q_send_setup_tx_only(bp, params);
4926 case BNX2X_Q_CMD_DEACTIVATE:
4927 return bnx2x_q_send_deactivate(bp, params);
4928 case BNX2X_Q_CMD_ACTIVATE:
4929 return bnx2x_q_send_activate(bp, params);
4930 case BNX2X_Q_CMD_UPDATE:
4931 return bnx2x_q_send_update(bp, params);
4932 case BNX2X_Q_CMD_UPDATE_TPA:
4933 return bnx2x_q_send_update_tpa(bp, params);
4934 case BNX2X_Q_CMD_HALT:
4935 return bnx2x_q_send_halt(bp, params);
4936 case BNX2X_Q_CMD_CFC_DEL:
4937 return bnx2x_q_send_cfc_del(bp, params);
4938 case BNX2X_Q_CMD_TERMINATE:
4939 return bnx2x_q_send_terminate(bp, params);
4940 case BNX2X_Q_CMD_EMPTY:
4941 return bnx2x_q_send_empty(bp, params);
4943 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4948 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4949 struct bnx2x_queue_state_params *params)
4951 switch (params->cmd) {
4952 case BNX2X_Q_CMD_SETUP:
4953 return bnx2x_q_send_setup_e1x(bp, params);
4954 case BNX2X_Q_CMD_INIT:
4955 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4956 case BNX2X_Q_CMD_DEACTIVATE:
4957 case BNX2X_Q_CMD_ACTIVATE:
4958 case BNX2X_Q_CMD_UPDATE:
4959 case BNX2X_Q_CMD_UPDATE_TPA:
4960 case BNX2X_Q_CMD_HALT:
4961 case BNX2X_Q_CMD_CFC_DEL:
4962 case BNX2X_Q_CMD_TERMINATE:
4963 case BNX2X_Q_CMD_EMPTY:
4964 return bnx2x_queue_send_cmd_cmn(bp, params);
4966 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4971 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4972 struct bnx2x_queue_state_params *params)
4974 switch (params->cmd) {
4975 case BNX2X_Q_CMD_SETUP:
4976 return bnx2x_q_send_setup_e2(bp, params);
4977 case BNX2X_Q_CMD_INIT:
4978 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4979 case BNX2X_Q_CMD_DEACTIVATE:
4980 case BNX2X_Q_CMD_ACTIVATE:
4981 case BNX2X_Q_CMD_UPDATE:
4982 case BNX2X_Q_CMD_UPDATE_TPA:
4983 case BNX2X_Q_CMD_HALT:
4984 case BNX2X_Q_CMD_CFC_DEL:
4985 case BNX2X_Q_CMD_TERMINATE:
4986 case BNX2X_Q_CMD_EMPTY:
4987 return bnx2x_queue_send_cmd_cmn(bp, params);
4989 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4995 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4997 * @bp: device handle
5002 * It both checks if the requested command is legal in a current
5003 * state and, if it's legal, sets a `next_state' in the object
5004 * that will be used in the completion flow to set the `state'
5007 * returns 0 if a requested command is a legal transition,
5008 * -EINVAL otherwise.
5010 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5011 struct bnx2x_queue_sp_obj *o,
5012 struct bnx2x_queue_state_params *params)
5014 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5015 enum bnx2x_queue_cmd cmd = params->cmd;
5016 struct bnx2x_queue_update_params *update_params =
5017 ¶ms->params.update;
5018 u8 next_tx_only = o->num_tx_only;
5021 * Forget all pending for completion commands if a driver only state
5022 * transition has been requested.
5024 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5026 o->next_state = BNX2X_Q_STATE_MAX;
5030 * Don't allow a next state transition if we are in the middle of
5034 BNX2X_ERR("Blocking transition since pending was %lx\n",
5040 case BNX2X_Q_STATE_RESET:
5041 if (cmd == BNX2X_Q_CMD_INIT)
5042 next_state = BNX2X_Q_STATE_INITIALIZED;
5045 case BNX2X_Q_STATE_INITIALIZED:
5046 if (cmd == BNX2X_Q_CMD_SETUP) {
5047 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5048 ¶ms->params.setup.flags))
5049 next_state = BNX2X_Q_STATE_ACTIVE;
5051 next_state = BNX2X_Q_STATE_INACTIVE;
5055 case BNX2X_Q_STATE_ACTIVE:
5056 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5057 next_state = BNX2X_Q_STATE_INACTIVE;
5059 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5060 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5061 next_state = BNX2X_Q_STATE_ACTIVE;
5063 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5064 next_state = BNX2X_Q_STATE_MULTI_COS;
5068 else if (cmd == BNX2X_Q_CMD_HALT)
5069 next_state = BNX2X_Q_STATE_STOPPED;
5071 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5072 /* If "active" state change is requested, update the
5073 * state accordingly.
5075 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5076 &update_params->update_flags) &&
5077 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5078 &update_params->update_flags))
5079 next_state = BNX2X_Q_STATE_INACTIVE;
5081 next_state = BNX2X_Q_STATE_ACTIVE;
5085 case BNX2X_Q_STATE_MULTI_COS:
5086 if (cmd == BNX2X_Q_CMD_TERMINATE)
5087 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5089 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5090 next_state = BNX2X_Q_STATE_MULTI_COS;
5091 next_tx_only = o->num_tx_only + 1;
5094 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5095 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5096 next_state = BNX2X_Q_STATE_MULTI_COS;
5098 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5099 /* If "active" state change is requested, update the
5100 * state accordingly.
5102 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5103 &update_params->update_flags) &&
5104 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5105 &update_params->update_flags))
5106 next_state = BNX2X_Q_STATE_INACTIVE;
5108 next_state = BNX2X_Q_STATE_MULTI_COS;
5112 case BNX2X_Q_STATE_MCOS_TERMINATED:
5113 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5114 next_tx_only = o->num_tx_only - 1;
5115 if (next_tx_only == 0)
5116 next_state = BNX2X_Q_STATE_ACTIVE;
5118 next_state = BNX2X_Q_STATE_MULTI_COS;
5122 case BNX2X_Q_STATE_INACTIVE:
5123 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5124 next_state = BNX2X_Q_STATE_ACTIVE;
5126 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5127 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5128 next_state = BNX2X_Q_STATE_INACTIVE;
5130 else if (cmd == BNX2X_Q_CMD_HALT)
5131 next_state = BNX2X_Q_STATE_STOPPED;
5133 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5134 /* If "active" state change is requested, update the
5135 * state accordingly.
5137 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5138 &update_params->update_flags) &&
5139 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5140 &update_params->update_flags)){
5141 if (o->num_tx_only == 0)
5142 next_state = BNX2X_Q_STATE_ACTIVE;
5143 else /* tx only queues exist for this queue */
5144 next_state = BNX2X_Q_STATE_MULTI_COS;
5146 next_state = BNX2X_Q_STATE_INACTIVE;
5150 case BNX2X_Q_STATE_STOPPED:
5151 if (cmd == BNX2X_Q_CMD_TERMINATE)
5152 next_state = BNX2X_Q_STATE_TERMINATED;
5155 case BNX2X_Q_STATE_TERMINATED:
5156 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5157 next_state = BNX2X_Q_STATE_RESET;
5161 BNX2X_ERR("Illegal state: %d\n", state);
5164 /* Transition is assured */
5165 if (next_state != BNX2X_Q_STATE_MAX) {
5166 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5167 state, cmd, next_state);
5168 o->next_state = next_state;
5169 o->next_tx_only = next_tx_only;
5173 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5178 void bnx2x_init_queue_obj(struct bnx2x *bp,
5179 struct bnx2x_queue_sp_obj *obj,
5180 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5182 dma_addr_t rdata_mapping, unsigned long type)
5184 memset(obj, 0, sizeof(*obj));
5186 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5187 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5189 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5190 obj->max_cos = cid_cnt;
5192 obj->func_id = func_id;
5194 obj->rdata_mapping = rdata_mapping;
5196 obj->next_state = BNX2X_Q_STATE_MAX;
5198 if (CHIP_IS_E1x(bp))
5199 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5201 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5203 obj->check_transition = bnx2x_queue_chk_transition;
5205 obj->complete_cmd = bnx2x_queue_comp_cmd;
5206 obj->wait_comp = bnx2x_queue_wait_comp;
5207 obj->set_pending = bnx2x_queue_set_pending;
5210 /* return a queue object's logical state*/
5211 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5212 struct bnx2x_queue_sp_obj *obj)
5214 switch (obj->state) {
5215 case BNX2X_Q_STATE_ACTIVE:
5216 case BNX2X_Q_STATE_MULTI_COS:
5217 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5218 case BNX2X_Q_STATE_RESET:
5219 case BNX2X_Q_STATE_INITIALIZED:
5220 case BNX2X_Q_STATE_MCOS_TERMINATED:
5221 case BNX2X_Q_STATE_INACTIVE:
5222 case BNX2X_Q_STATE_STOPPED:
5223 case BNX2X_Q_STATE_TERMINATED:
5224 case BNX2X_Q_STATE_FLRED:
5225 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5231 /********************** Function state object *********************************/
5232 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5233 struct bnx2x_func_sp_obj *o)
5235 /* in the middle of transaction - return INVALID state */
5237 return BNX2X_F_STATE_MAX;
5240 * unsure the order of reading of o->pending and o->state
5241 * o->pending should be read first
5248 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5249 struct bnx2x_func_sp_obj *o,
5250 enum bnx2x_func_cmd cmd)
5252 return bnx2x_state_wait(bp, cmd, &o->pending);
5256 * bnx2x_func_state_change_comp - complete the state machine transition
5258 * @bp: device handle
5262 * Called on state change transition. Completes the state
5263 * machine transition only - no HW interaction.
5265 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5266 struct bnx2x_func_sp_obj *o,
5267 enum bnx2x_func_cmd cmd)
5269 unsigned long cur_pending = o->pending;
5271 if (!test_and_clear_bit(cmd, &cur_pending)) {
5272 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5273 cmd, BP_FUNC(bp), o->state,
5274 cur_pending, o->next_state);
5279 "Completing command %d for func %d, setting state to %d\n",
5280 cmd, BP_FUNC(bp), o->next_state);
5282 o->state = o->next_state;
5283 o->next_state = BNX2X_F_STATE_MAX;
5285 /* It's important that o->state and o->next_state are
5286 * updated before o->pending.
5290 clear_bit(cmd, &o->pending);
5291 smp_mb__after_clear_bit();
5297 * bnx2x_func_comp_cmd - complete the state change command
5299 * @bp: device handle
5303 * Checks that the arrived completion is expected.
5305 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5306 struct bnx2x_func_sp_obj *o,
5307 enum bnx2x_func_cmd cmd)
5309 /* Complete the state machine part first, check if it's a
5312 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5317 * bnx2x_func_chk_transition - perform function state machine transition
5319 * @bp: device handle
5323 * It both checks if the requested command is legal in a current
5324 * state and, if it's legal, sets a `next_state' in the object
5325 * that will be used in the completion flow to set the `state'
5328 * returns 0 if a requested command is a legal transition,
5329 * -EINVAL otherwise.
5331 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5332 struct bnx2x_func_sp_obj *o,
5333 struct bnx2x_func_state_params *params)
5335 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5336 enum bnx2x_func_cmd cmd = params->cmd;
5339 * Forget all pending for completion commands if a driver only state
5340 * transition has been requested.
5342 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5344 o->next_state = BNX2X_F_STATE_MAX;
5348 * Don't allow a next state transition if we are in the middle of
5355 case BNX2X_F_STATE_RESET:
5356 if (cmd == BNX2X_F_CMD_HW_INIT)
5357 next_state = BNX2X_F_STATE_INITIALIZED;
5360 case BNX2X_F_STATE_INITIALIZED:
5361 if (cmd == BNX2X_F_CMD_START)
5362 next_state = BNX2X_F_STATE_STARTED;
5364 else if (cmd == BNX2X_F_CMD_HW_RESET)
5365 next_state = BNX2X_F_STATE_RESET;
5368 case BNX2X_F_STATE_STARTED:
5369 if (cmd == BNX2X_F_CMD_STOP)
5370 next_state = BNX2X_F_STATE_INITIALIZED;
5371 /* afex ramrods can be sent only in started mode, and only
5372 * if not pending for function_stop ramrod completion
5373 * for these events - next state remained STARTED.
5375 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5376 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5377 next_state = BNX2X_F_STATE_STARTED;
5379 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5380 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5381 next_state = BNX2X_F_STATE_STARTED;
5383 /* Switch_update ramrod can be sent in either started or
5384 * tx_stopped state, and it doesn't change the state.
5386 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5387 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5388 next_state = BNX2X_F_STATE_STARTED;
5390 else if (cmd == BNX2X_F_CMD_TX_STOP)
5391 next_state = BNX2X_F_STATE_TX_STOPPED;
5394 case BNX2X_F_STATE_TX_STOPPED:
5395 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5396 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5397 next_state = BNX2X_F_STATE_TX_STOPPED;
5399 else if (cmd == BNX2X_F_CMD_TX_START)
5400 next_state = BNX2X_F_STATE_STARTED;
5404 BNX2X_ERR("Unknown state: %d\n", state);
5407 /* Transition is assured */
5408 if (next_state != BNX2X_F_STATE_MAX) {
5409 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5410 state, cmd, next_state);
5411 o->next_state = next_state;
5415 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5422 * bnx2x_func_init_func - performs HW init at function stage
5424 * @bp: device handle
5427 * Init HW when the current phase is
5428 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5431 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5432 const struct bnx2x_func_sp_drv_ops *drv)
5434 return drv->init_hw_func(bp);
5438 * bnx2x_func_init_port - performs HW init at port stage
5440 * @bp: device handle
5443 * Init HW when the current phase is
5444 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5445 * FUNCTION-only HW blocks.
5448 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5449 const struct bnx2x_func_sp_drv_ops *drv)
5451 int rc = drv->init_hw_port(bp);
5455 return bnx2x_func_init_func(bp, drv);
5459 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5461 * @bp: device handle
5464 * Init HW when the current phase is
5465 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5466 * PORT-only and FUNCTION-only HW blocks.
5468 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5469 const struct bnx2x_func_sp_drv_ops *drv)
5471 int rc = drv->init_hw_cmn_chip(bp);
5475 return bnx2x_func_init_port(bp, drv);
5479 * bnx2x_func_init_cmn - performs HW init at common stage
5481 * @bp: device handle
5484 * Init HW when the current phase is
5485 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5486 * PORT-only and FUNCTION-only HW blocks.
5488 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5489 const struct bnx2x_func_sp_drv_ops *drv)
5491 int rc = drv->init_hw_cmn(bp);
5495 return bnx2x_func_init_port(bp, drv);
5498 static int bnx2x_func_hw_init(struct bnx2x *bp,
5499 struct bnx2x_func_state_params *params)
5501 u32 load_code = params->params.hw_init.load_phase;
5502 struct bnx2x_func_sp_obj *o = params->f_obj;
5503 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5506 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5507 BP_ABS_FUNC(bp), load_code);
5509 /* Prepare buffers for unzipping the FW */
5510 rc = drv->gunzip_init(bp);
5515 rc = drv->init_fw(bp);
5517 BNX2X_ERR("Error loading firmware\n");
5521 /* Handle the beginning of COMMON_XXX pases separatelly... */
5522 switch (load_code) {
5523 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5524 rc = bnx2x_func_init_cmn_chip(bp, drv);
5529 case FW_MSG_CODE_DRV_LOAD_COMMON:
5530 rc = bnx2x_func_init_cmn(bp, drv);
5535 case FW_MSG_CODE_DRV_LOAD_PORT:
5536 rc = bnx2x_func_init_port(bp, drv);
5541 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5542 rc = bnx2x_func_init_func(bp, drv);
5548 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5553 drv->gunzip_end(bp);
5555 /* In case of success, complete the comand immediatelly: no ramrods
5559 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5565 * bnx2x_func_reset_func - reset HW at function stage
5567 * @bp: device handle
5570 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5571 * FUNCTION-only HW blocks.
5573 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5574 const struct bnx2x_func_sp_drv_ops *drv)
5576 drv->reset_hw_func(bp);
5580 * bnx2x_func_reset_port - reser HW at port stage
5582 * @bp: device handle
5585 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5586 * FUNCTION-only and PORT-only HW blocks.
5590 * It's important to call reset_port before reset_func() as the last thing
5591 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5592 * makes impossible any DMAE transactions.
5594 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5595 const struct bnx2x_func_sp_drv_ops *drv)
5597 drv->reset_hw_port(bp);
5598 bnx2x_func_reset_func(bp, drv);
5602 * bnx2x_func_reset_cmn - reser HW at common stage
5604 * @bp: device handle
5607 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5608 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5609 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5611 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5612 const struct bnx2x_func_sp_drv_ops *drv)
5614 bnx2x_func_reset_port(bp, drv);
5615 drv->reset_hw_cmn(bp);
5619 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5620 struct bnx2x_func_state_params *params)
5622 u32 reset_phase = params->params.hw_reset.reset_phase;
5623 struct bnx2x_func_sp_obj *o = params->f_obj;
5624 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5626 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5629 switch (reset_phase) {
5630 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5631 bnx2x_func_reset_cmn(bp, drv);
5633 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5634 bnx2x_func_reset_port(bp, drv);
5636 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5637 bnx2x_func_reset_func(bp, drv);
5640 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5645 /* Complete the comand immediatelly: no ramrods have been sent. */
5646 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5651 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5652 struct bnx2x_func_state_params *params)
5654 struct bnx2x_func_sp_obj *o = params->f_obj;
5655 struct function_start_data *rdata =
5656 (struct function_start_data *)o->rdata;
5657 dma_addr_t data_mapping = o->rdata_mapping;
5658 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5660 memset(rdata, 0, sizeof(*rdata));
5662 /* Fill the ramrod data with provided parameters */
5663 rdata->function_mode = (u8)start_params->mf_mode;
5664 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5665 rdata->path_id = BP_PATH(bp);
5666 rdata->network_cos_mode = start_params->network_cos_mode;
5669 * No need for an explicit memory barrier here as long we would
5670 * need to ensure the ordering of writing to the SPQ element
5671 * and updating of the SPQ producer which involves a memory
5672 * read and we will have to put a full memory barrier there
5673 * (inside bnx2x_sp_post()).
5676 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5677 U64_HI(data_mapping),
5678 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5681 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5682 struct bnx2x_func_state_params *params)
5684 struct bnx2x_func_sp_obj *o = params->f_obj;
5685 struct function_update_data *rdata =
5686 (struct function_update_data *)o->rdata;
5687 dma_addr_t data_mapping = o->rdata_mapping;
5688 struct bnx2x_func_switch_update_params *switch_update_params =
5689 ¶ms->params.switch_update;
5691 memset(rdata, 0, sizeof(*rdata));
5693 /* Fill the ramrod data with provided parameters */
5694 rdata->tx_switch_suspend_change_flg = 1;
5695 rdata->tx_switch_suspend = switch_update_params->suspend;
5696 rdata->echo = SWITCH_UPDATE;
5698 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5699 U64_HI(data_mapping),
5700 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5703 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5704 struct bnx2x_func_state_params *params)
5706 struct bnx2x_func_sp_obj *o = params->f_obj;
5707 struct function_update_data *rdata =
5708 (struct function_update_data *)o->afex_rdata;
5709 dma_addr_t data_mapping = o->afex_rdata_mapping;
5710 struct bnx2x_func_afex_update_params *afex_update_params =
5711 ¶ms->params.afex_update;
5713 memset(rdata, 0, sizeof(*rdata));
5715 /* Fill the ramrod data with provided parameters */
5716 rdata->vif_id_change_flg = 1;
5717 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5718 rdata->afex_default_vlan_change_flg = 1;
5719 rdata->afex_default_vlan =
5720 cpu_to_le16(afex_update_params->afex_default_vlan);
5721 rdata->allowed_priorities_change_flg = 1;
5722 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5723 rdata->echo = AFEX_UPDATE;
5725 /* No need for an explicit memory barrier here as long we would
5726 * need to ensure the ordering of writing to the SPQ element
5727 * and updating of the SPQ producer which involves a memory
5728 * read and we will have to put a full memory barrier there
5729 * (inside bnx2x_sp_post()).
5732 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5734 rdata->afex_default_vlan, rdata->allowed_priorities);
5736 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5737 U64_HI(data_mapping),
5738 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5742 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5743 struct bnx2x_func_state_params *params)
5745 struct bnx2x_func_sp_obj *o = params->f_obj;
5746 struct afex_vif_list_ramrod_data *rdata =
5747 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5748 struct bnx2x_func_afex_viflists_params *afex_vif_params =
5749 ¶ms->params.afex_viflists;
5750 u64 *p_rdata = (u64 *)rdata;
5752 memset(rdata, 0, sizeof(*rdata));
5754 /* Fill the ramrod data with provided parameters */
5755 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5756 rdata->func_bit_map = afex_vif_params->func_bit_map;
5757 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5758 rdata->func_to_clear = afex_vif_params->func_to_clear;
5760 /* send in echo type of sub command */
5761 rdata->echo = afex_vif_params->afex_vif_list_command;
5763 /* No need for an explicit memory barrier here as long we would
5764 * need to ensure the ordering of writing to the SPQ element
5765 * and updating of the SPQ producer which involves a memory
5766 * read and we will have to put a full memory barrier there
5767 * (inside bnx2x_sp_post()).
5770 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5771 rdata->afex_vif_list_command, rdata->vif_list_index,
5772 rdata->func_bit_map, rdata->func_to_clear);
5774 /* this ramrod sends data directly and not through DMA mapping */
5775 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5776 U64_HI(*p_rdata), U64_LO(*p_rdata),
5777 NONE_CONNECTION_TYPE);
5780 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5781 struct bnx2x_func_state_params *params)
5783 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5784 NONE_CONNECTION_TYPE);
5787 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5788 struct bnx2x_func_state_params *params)
5790 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5791 NONE_CONNECTION_TYPE);
5793 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5794 struct bnx2x_func_state_params *params)
5796 struct bnx2x_func_sp_obj *o = params->f_obj;
5797 struct flow_control_configuration *rdata =
5798 (struct flow_control_configuration *)o->rdata;
5799 dma_addr_t data_mapping = o->rdata_mapping;
5800 struct bnx2x_func_tx_start_params *tx_start_params =
5801 ¶ms->params.tx_start;
5804 memset(rdata, 0, sizeof(*rdata));
5806 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5807 rdata->dcb_version = tx_start_params->dcb_version;
5808 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5810 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5811 rdata->traffic_type_to_priority_cos[i] =
5812 tx_start_params->traffic_type_to_priority_cos[i];
5814 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5815 U64_HI(data_mapping),
5816 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5819 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5820 struct bnx2x_func_state_params *params)
5822 switch (params->cmd) {
5823 case BNX2X_F_CMD_HW_INIT:
5824 return bnx2x_func_hw_init(bp, params);
5825 case BNX2X_F_CMD_START:
5826 return bnx2x_func_send_start(bp, params);
5827 case BNX2X_F_CMD_STOP:
5828 return bnx2x_func_send_stop(bp, params);
5829 case BNX2X_F_CMD_HW_RESET:
5830 return bnx2x_func_hw_reset(bp, params);
5831 case BNX2X_F_CMD_AFEX_UPDATE:
5832 return bnx2x_func_send_afex_update(bp, params);
5833 case BNX2X_F_CMD_AFEX_VIFLISTS:
5834 return bnx2x_func_send_afex_viflists(bp, params);
5835 case BNX2X_F_CMD_TX_STOP:
5836 return bnx2x_func_send_tx_stop(bp, params);
5837 case BNX2X_F_CMD_TX_START:
5838 return bnx2x_func_send_tx_start(bp, params);
5839 case BNX2X_F_CMD_SWITCH_UPDATE:
5840 return bnx2x_func_send_switch_update(bp, params);
5842 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5847 void bnx2x_init_func_obj(struct bnx2x *bp,
5848 struct bnx2x_func_sp_obj *obj,
5849 void *rdata, dma_addr_t rdata_mapping,
5850 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5851 struct bnx2x_func_sp_drv_ops *drv_iface)
5853 memset(obj, 0, sizeof(*obj));
5855 mutex_init(&obj->one_pending_mutex);
5858 obj->rdata_mapping = rdata_mapping;
5859 obj->afex_rdata = afex_rdata;
5860 obj->afex_rdata_mapping = afex_rdata_mapping;
5861 obj->send_cmd = bnx2x_func_send_cmd;
5862 obj->check_transition = bnx2x_func_chk_transition;
5863 obj->complete_cmd = bnx2x_func_comp_cmd;
5864 obj->wait_comp = bnx2x_func_wait_comp;
5866 obj->drv = drv_iface;
5870 * bnx2x_func_state_change - perform Function state change transition
5872 * @bp: device handle
5873 * @params: parameters to perform the transaction
5875 * returns 0 in case of successfully completed transition,
5876 * negative error code in case of failure, positive
5877 * (EBUSY) value if there is a completion to that is
5878 * still pending (possible only if RAMROD_COMP_WAIT is
5879 * not set in params->ramrod_flags for asynchronous
5882 int bnx2x_func_state_change(struct bnx2x *bp,
5883 struct bnx2x_func_state_params *params)
5885 struct bnx2x_func_sp_obj *o = params->f_obj;
5887 enum bnx2x_func_cmd cmd = params->cmd;
5888 unsigned long *pending = &o->pending;
5890 mutex_lock(&o->one_pending_mutex);
5892 /* Check that the requested transition is legal */
5893 rc = o->check_transition(bp, o, params);
5894 if ((rc == -EBUSY) &&
5895 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5896 while ((rc == -EBUSY) && (--cnt > 0)) {
5897 mutex_unlock(&o->one_pending_mutex);
5899 mutex_lock(&o->one_pending_mutex);
5900 rc = o->check_transition(bp, o, params);
5903 mutex_unlock(&o->one_pending_mutex);
5904 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5908 mutex_unlock(&o->one_pending_mutex);
5912 /* Set "pending" bit */
5913 set_bit(cmd, pending);
5915 /* Don't send a command if only driver cleanup was requested */
5916 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5917 bnx2x_func_state_change_comp(bp, o, cmd);
5918 mutex_unlock(&o->one_pending_mutex);
5921 rc = o->send_cmd(bp, params);
5923 mutex_unlock(&o->one_pending_mutex);
5926 o->next_state = BNX2X_F_STATE_MAX;
5927 clear_bit(cmd, pending);
5928 smp_mb__after_clear_bit();
5932 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5933 rc = o->wait_comp(bp, o, cmd);
5941 return !!test_bit(cmd, pending);