071aea79d218f01eb166d910a310ef0f49efa470
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23         {
24                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25                 CMD_SUBSYSTEM_ETH,
26                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28         },
29         {
30                 OPCODE_COMMON_GET_FLOW_CONTROL,
31                 CMD_SUBSYSTEM_COMMON,
32                 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34         },
35         {
36                 OPCODE_COMMON_SET_FLOW_CONTROL,
37                 CMD_SUBSYSTEM_COMMON,
38                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40         },
41         {
42                 OPCODE_ETH_GET_PPORT_STATS,
43                 CMD_SUBSYSTEM_ETH,
44                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46         },
47         {
48                 OPCODE_COMMON_GET_PHY_DETAILS,
49                 CMD_SUBSYSTEM_COMMON,
50                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52         }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56                            u8 subsystem)
57 {
58         int i;
59         int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60         u32 cmd_privileges = adapter->cmd_privileges;
61
62         for (i = 0; i < num_entries; i++)
63                 if (opcode == cmd_priv_map[i].opcode &&
64                     subsystem == cmd_priv_map[i].subsystem)
65                         if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66                                 return false;
67
68         return true;
69 }
70
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73         return wrb->payload.embedded_payload;
74 }
75
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78         struct be_queue_info *mccq = &adapter->mcc_obj.q;
79         u32 val = 0;
80
81         if (be_error(adapter))
82                 return;
83
84         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87         wmb();
88         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90
91 /* To check if valid bit is set, check the entire word as we don't know
92  * the endianness of the data (old entry is host endian while a new entry is
93  * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96         u32 flags;
97
98         if (compl->flags != 0) {
99                 flags = le32_to_cpu(compl->flags);
100                 if (flags & CQE_FLAGS_VALID_MASK) {
101                         compl->flags = flags;
102                         return true;
103                 }
104         }
105         return false;
106 }
107
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110 {
111         compl->flags = 0;
112 }
113
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
115 {
116         unsigned long addr;
117
118         addr = tag1;
119         addr = ((addr << 16) << 16) | tag0;
120         return (void *)addr;
121 }
122
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124                                 struct be_mcc_compl *compl)
125 {
126         u16 compl_status, extd_status;
127         struct be_cmd_resp_hdr *resp_hdr;
128         u8 opcode = 0, subsystem = 0;
129
130         /* Just swap the status to host endian; mcc tag is opaquely copied
131          * from mcc_wrb */
132         be_dws_le_to_cpu(compl, 4);
133
134         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135                                 CQE_STATUS_COMPL_MASK;
136
137         resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
138
139         if (resp_hdr) {
140                 opcode = resp_hdr->opcode;
141                 subsystem = resp_hdr->subsystem;
142         }
143
144         if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145              (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146             (subsystem == CMD_SUBSYSTEM_COMMON)) {
147                 adapter->flash_status = compl_status;
148                 complete(&adapter->flash_compl);
149         }
150
151         if (compl_status == MCC_STATUS_SUCCESS) {
152                 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153                      (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154                     (subsystem == CMD_SUBSYSTEM_ETH)) {
155                         be_parse_stats(adapter);
156                         adapter->stats_cmd_sent = false;
157                 }
158                 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159                     subsystem == CMD_SUBSYSTEM_COMMON) {
160                         struct be_cmd_resp_get_cntl_addnl_attribs *resp =
161                                 (void *)resp_hdr;
162                         adapter->drv_stats.be_on_die_temperature =
163                                 resp->on_die_temperature;
164                 }
165         } else {
166                 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167                         adapter->be_get_temp_freq = 0;
168
169                 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170                         compl_status == MCC_STATUS_ILLEGAL_REQUEST)
171                         goto done;
172
173                 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174                         dev_warn(&adapter->pdev->dev,
175                                  "VF is not privileged to issue opcode %d-%d\n",
176                                  opcode, subsystem);
177                 } else {
178                         extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179                                         CQE_STATUS_EXTD_MASK;
180                         dev_err(&adapter->pdev->dev,
181                                 "opcode %d-%d failed:status %d-%d\n",
182                                 opcode, subsystem, compl_status, extd_status);
183                 }
184         }
185 done:
186         return compl_status;
187 }
188
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191                 struct be_async_event_link_state *evt)
192 {
193         /* When link status changes, link speed must be re-queried from FW */
194         adapter->phy.link_speed = -1;
195
196         /* Ignore physical link event */
197         if (lancer_chip(adapter) &&
198             !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
199                 return;
200
201         /* For the initial link status do not rely on the ASYNC event as
202          * it may not be received in some cases.
203          */
204         if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205                 be_link_status_update(adapter, evt->port_link_status);
206 }
207
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210                 struct be_async_event_grp5_cos_priority *evt)
211 {
212         if (evt->valid) {
213                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214                 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215                 adapter->recommended_prio =
216                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
217         }
218 }
219
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222                 struct be_async_event_grp5_qos_link_speed *evt)
223 {
224         if (adapter->phy.link_speed >= 0 &&
225             evt->physical_port == adapter->port_num)
226                 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
227 }
228
229 /*Grp5 PVID evt*/
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231                 struct be_async_event_grp5_pvid_state *evt)
232 {
233         if (evt->enabled)
234                 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
235         else
236                 adapter->pvid = 0;
237 }
238
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240                 u32 trailer, struct be_mcc_compl *evt)
241 {
242         u8 event_type = 0;
243
244         event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245                 ASYNC_TRAILER_EVENT_TYPE_MASK;
246
247         switch (event_type) {
248         case ASYNC_EVENT_COS_PRIORITY:
249                 be_async_grp5_cos_priority_process(adapter,
250                 (struct be_async_event_grp5_cos_priority *)evt);
251         break;
252         case ASYNC_EVENT_QOS_SPEED:
253                 be_async_grp5_qos_speed_process(adapter,
254                 (struct be_async_event_grp5_qos_link_speed *)evt);
255         break;
256         case ASYNC_EVENT_PVID_STATE:
257                 be_async_grp5_pvid_state_process(adapter,
258                 (struct be_async_event_grp5_pvid_state *)evt);
259         break;
260         default:
261                 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
262                 break;
263         }
264 }
265
266 static inline bool is_link_state_evt(u32 trailer)
267 {
268         return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
269                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
270                                 ASYNC_EVENT_CODE_LINK_STATE;
271 }
272
273 static inline bool is_grp5_evt(u32 trailer)
274 {
275         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
276                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
277                                 ASYNC_EVENT_CODE_GRP_5);
278 }
279
280 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
281 {
282         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
283         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
284
285         if (be_mcc_compl_is_new(compl)) {
286                 queue_tail_inc(mcc_cq);
287                 return compl;
288         }
289         return NULL;
290 }
291
292 void be_async_mcc_enable(struct be_adapter *adapter)
293 {
294         spin_lock_bh(&adapter->mcc_cq_lock);
295
296         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
297         adapter->mcc_obj.rearm_cq = true;
298
299         spin_unlock_bh(&adapter->mcc_cq_lock);
300 }
301
302 void be_async_mcc_disable(struct be_adapter *adapter)
303 {
304         spin_lock_bh(&adapter->mcc_cq_lock);
305
306         adapter->mcc_obj.rearm_cq = false;
307         be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
308
309         spin_unlock_bh(&adapter->mcc_cq_lock);
310 }
311
312 int be_process_mcc(struct be_adapter *adapter)
313 {
314         struct be_mcc_compl *compl;
315         int num = 0, status = 0;
316         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
317
318         spin_lock(&adapter->mcc_cq_lock);
319         while ((compl = be_mcc_compl_get(adapter))) {
320                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
321                         /* Interpret flags as an async trailer */
322                         if (is_link_state_evt(compl->flags))
323                                 be_async_link_state_process(adapter,
324                                 (struct be_async_event_link_state *) compl);
325                         else if (is_grp5_evt(compl->flags))
326                                 be_async_grp5_evt_process(adapter,
327                                 compl->flags, compl);
328                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
329                                 status = be_mcc_compl_process(adapter, compl);
330                                 atomic_dec(&mcc_obj->q.used);
331                 }
332                 be_mcc_compl_use(compl);
333                 num++;
334         }
335
336         if (num)
337                 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
338
339         spin_unlock(&adapter->mcc_cq_lock);
340         return status;
341 }
342
343 /* Wait till no more pending mcc requests are present */
344 static int be_mcc_wait_compl(struct be_adapter *adapter)
345 {
346 #define mcc_timeout             120000 /* 12s timeout */
347         int i, status = 0;
348         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
349
350         for (i = 0; i < mcc_timeout; i++) {
351                 if (be_error(adapter))
352                         return -EIO;
353
354                 local_bh_disable();
355                 status = be_process_mcc(adapter);
356                 local_bh_enable();
357
358                 if (atomic_read(&mcc_obj->q.used) == 0)
359                         break;
360                 udelay(100);
361         }
362         if (i == mcc_timeout) {
363                 dev_err(&adapter->pdev->dev, "FW not responding\n");
364                 adapter->fw_timeout = true;
365                 return -EIO;
366         }
367         return status;
368 }
369
370 /* Notify MCC requests and wait for completion */
371 static int be_mcc_notify_wait(struct be_adapter *adapter)
372 {
373         int status;
374         struct be_mcc_wrb *wrb;
375         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
376         u16 index = mcc_obj->q.head;
377         struct be_cmd_resp_hdr *resp;
378
379         index_dec(&index, mcc_obj->q.len);
380         wrb = queue_index_node(&mcc_obj->q, index);
381
382         resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
383
384         be_mcc_notify(adapter);
385
386         status = be_mcc_wait_compl(adapter);
387         if (status == -EIO)
388                 goto out;
389
390         status = resp->status;
391 out:
392         return status;
393 }
394
395 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
396 {
397         int msecs = 0;
398         u32 ready;
399
400         do {
401                 if (be_error(adapter))
402                         return -EIO;
403
404                 ready = ioread32(db);
405                 if (ready == 0xffffffff)
406                         return -1;
407
408                 ready &= MPU_MAILBOX_DB_RDY_MASK;
409                 if (ready)
410                         break;
411
412                 if (msecs > 4000) {
413                         dev_err(&adapter->pdev->dev, "FW not responding\n");
414                         adapter->fw_timeout = true;
415                         be_detect_error(adapter);
416                         return -1;
417                 }
418
419                 msleep(1);
420                 msecs++;
421         } while (true);
422
423         return 0;
424 }
425
426 /*
427  * Insert the mailbox address into the doorbell in two steps
428  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
429  */
430 static int be_mbox_notify_wait(struct be_adapter *adapter)
431 {
432         int status;
433         u32 val = 0;
434         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
435         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
436         struct be_mcc_mailbox *mbox = mbox_mem->va;
437         struct be_mcc_compl *compl = &mbox->compl;
438
439         /* wait for ready to be set */
440         status = be_mbox_db_ready_wait(adapter, db);
441         if (status != 0)
442                 return status;
443
444         val |= MPU_MAILBOX_DB_HI_MASK;
445         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
446         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
447         iowrite32(val, db);
448
449         /* wait for ready to be set */
450         status = be_mbox_db_ready_wait(adapter, db);
451         if (status != 0)
452                 return status;
453
454         val = 0;
455         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
456         val |= (u32)(mbox_mem->dma >> 4) << 2;
457         iowrite32(val, db);
458
459         status = be_mbox_db_ready_wait(adapter, db);
460         if (status != 0)
461                 return status;
462
463         /* A cq entry has been made now */
464         if (be_mcc_compl_is_new(compl)) {
465                 status = be_mcc_compl_process(adapter, &mbox->compl);
466                 be_mcc_compl_use(compl);
467                 if (status)
468                         return status;
469         } else {
470                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
471                 return -1;
472         }
473         return 0;
474 }
475
476 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
477 {
478         u32 sem;
479         u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
480                                           SLIPORT_SEMAPHORE_OFFSET_BE;
481
482         pci_read_config_dword(adapter->pdev, reg, &sem);
483         *stage = sem & POST_STAGE_MASK;
484
485         if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
486                 return -1;
487         else
488                 return 0;
489 }
490
491 int lancer_wait_ready(struct be_adapter *adapter)
492 {
493 #define SLIPORT_READY_TIMEOUT 30
494         u32 sliport_status;
495         int status = 0, i;
496
497         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
498                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
499                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
500                         break;
501
502                 msleep(1000);
503         }
504
505         if (i == SLIPORT_READY_TIMEOUT)
506                 status = -1;
507
508         return status;
509 }
510
511 static bool lancer_provisioning_error(struct be_adapter *adapter)
512 {
513         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
514         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
515         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
516                 sliport_err1 = ioread32(adapter->db +
517                                         SLIPORT_ERROR1_OFFSET);
518                 sliport_err2 = ioread32(adapter->db +
519                                         SLIPORT_ERROR2_OFFSET);
520
521                 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
522                     sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
523                         return true;
524         }
525         return false;
526 }
527
528 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
529 {
530         int status;
531         u32 sliport_status, err, reset_needed;
532         bool resource_error;
533
534         resource_error = lancer_provisioning_error(adapter);
535         if (resource_error)
536                 return -1;
537
538         status = lancer_wait_ready(adapter);
539         if (!status) {
540                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
541                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
542                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
543                 if (err && reset_needed) {
544                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
545                                   adapter->db + SLIPORT_CONTROL_OFFSET);
546
547                         /* check adapter has corrected the error */
548                         status = lancer_wait_ready(adapter);
549                         sliport_status = ioread32(adapter->db +
550                                                   SLIPORT_STATUS_OFFSET);
551                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
552                                                 SLIPORT_STATUS_RN_MASK);
553                         if (status || sliport_status)
554                                 status = -1;
555                 } else if (err || reset_needed) {
556                         status = -1;
557                 }
558         }
559         /* Stop error recovery if error is not recoverable.
560          * No resource error is temporary errors and will go away
561          * when PF provisions resources.
562          */
563         resource_error = lancer_provisioning_error(adapter);
564         if (status == -1 && !resource_error)
565                 adapter->eeh_error = true;
566
567         return status;
568 }
569
570 int be_fw_wait_ready(struct be_adapter *adapter)
571 {
572         u16 stage;
573         int status, timeout = 0;
574         struct device *dev = &adapter->pdev->dev;
575
576         if (lancer_chip(adapter)) {
577                 status = lancer_wait_ready(adapter);
578                 return status;
579         }
580
581         do {
582                 status = be_POST_stage_get(adapter, &stage);
583                 if (status) {
584                         dev_err(dev, "POST error; stage=0x%x\n", stage);
585                         return -1;
586                 } else if (stage != POST_STAGE_ARMFW_RDY) {
587                         if (msleep_interruptible(2000)) {
588                                 dev_err(dev, "Waiting for POST aborted\n");
589                                 return -EINTR;
590                         }
591                         timeout += 2;
592                 } else {
593                         return 0;
594                 }
595         } while (timeout < 60);
596
597         dev_err(dev, "POST timeout; stage=0x%x\n", stage);
598         return -1;
599 }
600
601
602 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
603 {
604         return &wrb->payload.sgl[0];
605 }
606
607
608 /* Don't touch the hdr after it's prepared */
609 /* mem will be NULL for embedded commands */
610 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
611                                 u8 subsystem, u8 opcode, int cmd_len,
612                                 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
613 {
614         struct be_sge *sge;
615         unsigned long addr = (unsigned long)req_hdr;
616         u64 req_addr = addr;
617
618         req_hdr->opcode = opcode;
619         req_hdr->subsystem = subsystem;
620         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
621         req_hdr->version = 0;
622
623         wrb->tag0 = req_addr & 0xFFFFFFFF;
624         wrb->tag1 = upper_32_bits(req_addr);
625
626         wrb->payload_length = cmd_len;
627         if (mem) {
628                 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
629                         MCC_WRB_SGE_CNT_SHIFT;
630                 sge = nonembedded_sgl(wrb);
631                 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
632                 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
633                 sge->len = cpu_to_le32(mem->size);
634         } else
635                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
636         be_dws_cpu_to_le(wrb, 8);
637 }
638
639 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
640                         struct be_dma_mem *mem)
641 {
642         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
643         u64 dma = (u64)mem->dma;
644
645         for (i = 0; i < buf_pages; i++) {
646                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
647                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
648                 dma += PAGE_SIZE_4K;
649         }
650 }
651
652 /* Converts interrupt delay in microseconds to multiplier value */
653 static u32 eq_delay_to_mult(u32 usec_delay)
654 {
655 #define MAX_INTR_RATE                   651042
656         const u32 round = 10;
657         u32 multiplier;
658
659         if (usec_delay == 0)
660                 multiplier = 0;
661         else {
662                 u32 interrupt_rate = 1000000 / usec_delay;
663                 /* Max delay, corresponding to the lowest interrupt rate */
664                 if (interrupt_rate == 0)
665                         multiplier = 1023;
666                 else {
667                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
668                         multiplier /= interrupt_rate;
669                         /* Round the multiplier to the closest value.*/
670                         multiplier = (multiplier + round/2) / round;
671                         multiplier = min(multiplier, (u32)1023);
672                 }
673         }
674         return multiplier;
675 }
676
677 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
678 {
679         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
680         struct be_mcc_wrb *wrb
681                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
682         memset(wrb, 0, sizeof(*wrb));
683         return wrb;
684 }
685
686 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
687 {
688         struct be_queue_info *mccq = &adapter->mcc_obj.q;
689         struct be_mcc_wrb *wrb;
690
691         if (!mccq->created)
692                 return NULL;
693
694         if (atomic_read(&mccq->used) >= mccq->len) {
695                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
696                 return NULL;
697         }
698
699         wrb = queue_head_node(mccq);
700         queue_head_inc(mccq);
701         atomic_inc(&mccq->used);
702         memset(wrb, 0, sizeof(*wrb));
703         return wrb;
704 }
705
706 /* Tell fw we're about to start firing cmds by writing a
707  * special pattern across the wrb hdr; uses mbox
708  */
709 int be_cmd_fw_init(struct be_adapter *adapter)
710 {
711         u8 *wrb;
712         int status;
713
714         if (lancer_chip(adapter))
715                 return 0;
716
717         if (mutex_lock_interruptible(&adapter->mbox_lock))
718                 return -1;
719
720         wrb = (u8 *)wrb_from_mbox(adapter);
721         *wrb++ = 0xFF;
722         *wrb++ = 0x12;
723         *wrb++ = 0x34;
724         *wrb++ = 0xFF;
725         *wrb++ = 0xFF;
726         *wrb++ = 0x56;
727         *wrb++ = 0x78;
728         *wrb = 0xFF;
729
730         status = be_mbox_notify_wait(adapter);
731
732         mutex_unlock(&adapter->mbox_lock);
733         return status;
734 }
735
736 /* Tell fw we're done with firing cmds by writing a
737  * special pattern across the wrb hdr; uses mbox
738  */
739 int be_cmd_fw_clean(struct be_adapter *adapter)
740 {
741         u8 *wrb;
742         int status;
743
744         if (lancer_chip(adapter))
745                 return 0;
746
747         if (mutex_lock_interruptible(&adapter->mbox_lock))
748                 return -1;
749
750         wrb = (u8 *)wrb_from_mbox(adapter);
751         *wrb++ = 0xFF;
752         *wrb++ = 0xAA;
753         *wrb++ = 0xBB;
754         *wrb++ = 0xFF;
755         *wrb++ = 0xFF;
756         *wrb++ = 0xCC;
757         *wrb++ = 0xDD;
758         *wrb = 0xFF;
759
760         status = be_mbox_notify_wait(adapter);
761
762         mutex_unlock(&adapter->mbox_lock);
763         return status;
764 }
765
766 int be_cmd_eq_create(struct be_adapter *adapter,
767                 struct be_queue_info *eq, int eq_delay)
768 {
769         struct be_mcc_wrb *wrb;
770         struct be_cmd_req_eq_create *req;
771         struct be_dma_mem *q_mem = &eq->dma_mem;
772         int status;
773
774         if (mutex_lock_interruptible(&adapter->mbox_lock))
775                 return -1;
776
777         wrb = wrb_from_mbox(adapter);
778         req = embedded_payload(wrb);
779
780         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
781                 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
782
783         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
784
785         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
786         /* 4byte eqe*/
787         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
788         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
789                         __ilog2_u32(eq->len/256));
790         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
791                         eq_delay_to_mult(eq_delay));
792         be_dws_cpu_to_le(req->context, sizeof(req->context));
793
794         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
795
796         status = be_mbox_notify_wait(adapter);
797         if (!status) {
798                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
799                 eq->id = le16_to_cpu(resp->eq_id);
800                 eq->created = true;
801         }
802
803         mutex_unlock(&adapter->mbox_lock);
804         return status;
805 }
806
807 /* Use MCC */
808 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
809                           bool permanent, u32 if_handle, u32 pmac_id)
810 {
811         struct be_mcc_wrb *wrb;
812         struct be_cmd_req_mac_query *req;
813         int status;
814
815         spin_lock_bh(&adapter->mcc_lock);
816
817         wrb = wrb_from_mccq(adapter);
818         if (!wrb) {
819                 status = -EBUSY;
820                 goto err;
821         }
822         req = embedded_payload(wrb);
823
824         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
825                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
826         req->type = MAC_ADDRESS_TYPE_NETWORK;
827         if (permanent) {
828                 req->permanent = 1;
829         } else {
830                 req->if_id = cpu_to_le16((u16) if_handle);
831                 req->pmac_id = cpu_to_le32(pmac_id);
832                 req->permanent = 0;
833         }
834
835         status = be_mcc_notify_wait(adapter);
836         if (!status) {
837                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
838                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
839         }
840
841 err:
842         spin_unlock_bh(&adapter->mcc_lock);
843         return status;
844 }
845
846 /* Uses synchronous MCCQ */
847 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
848                 u32 if_id, u32 *pmac_id, u32 domain)
849 {
850         struct be_mcc_wrb *wrb;
851         struct be_cmd_req_pmac_add *req;
852         int status;
853
854         spin_lock_bh(&adapter->mcc_lock);
855
856         wrb = wrb_from_mccq(adapter);
857         if (!wrb) {
858                 status = -EBUSY;
859                 goto err;
860         }
861         req = embedded_payload(wrb);
862
863         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
864                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
865
866         req->hdr.domain = domain;
867         req->if_id = cpu_to_le32(if_id);
868         memcpy(req->mac_address, mac_addr, ETH_ALEN);
869
870         status = be_mcc_notify_wait(adapter);
871         if (!status) {
872                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
873                 *pmac_id = le32_to_cpu(resp->pmac_id);
874         }
875
876 err:
877         spin_unlock_bh(&adapter->mcc_lock);
878
879          if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
880                 status = -EPERM;
881
882         return status;
883 }
884
885 /* Uses synchronous MCCQ */
886 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
887 {
888         struct be_mcc_wrb *wrb;
889         struct be_cmd_req_pmac_del *req;
890         int status;
891
892         if (pmac_id == -1)
893                 return 0;
894
895         spin_lock_bh(&adapter->mcc_lock);
896
897         wrb = wrb_from_mccq(adapter);
898         if (!wrb) {
899                 status = -EBUSY;
900                 goto err;
901         }
902         req = embedded_payload(wrb);
903
904         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
905                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
906
907         req->hdr.domain = dom;
908         req->if_id = cpu_to_le32(if_id);
909         req->pmac_id = cpu_to_le32(pmac_id);
910
911         status = be_mcc_notify_wait(adapter);
912
913 err:
914         spin_unlock_bh(&adapter->mcc_lock);
915         return status;
916 }
917
918 /* Uses Mbox */
919 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
920                 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
921 {
922         struct be_mcc_wrb *wrb;
923         struct be_cmd_req_cq_create *req;
924         struct be_dma_mem *q_mem = &cq->dma_mem;
925         void *ctxt;
926         int status;
927
928         if (mutex_lock_interruptible(&adapter->mbox_lock))
929                 return -1;
930
931         wrb = wrb_from_mbox(adapter);
932         req = embedded_payload(wrb);
933         ctxt = &req->context;
934
935         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
936                 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
937
938         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
939         if (lancer_chip(adapter)) {
940                 req->hdr.version = 2;
941                 req->page_size = 1; /* 1 for 4K */
942                 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
943                                                                 no_delay);
944                 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
945                                                 __ilog2_u32(cq->len/256));
946                 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
947                 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
948                                                                 ctxt, 1);
949                 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
950                                                                 ctxt, eq->id);
951         } else {
952                 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
953                                                                 coalesce_wm);
954                 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
955                                                                 ctxt, no_delay);
956                 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
957                                                 __ilog2_u32(cq->len/256));
958                 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
959                 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
960                 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
961         }
962
963         be_dws_cpu_to_le(ctxt, sizeof(req->context));
964
965         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
966
967         status = be_mbox_notify_wait(adapter);
968         if (!status) {
969                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
970                 cq->id = le16_to_cpu(resp->cq_id);
971                 cq->created = true;
972         }
973
974         mutex_unlock(&adapter->mbox_lock);
975
976         return status;
977 }
978
979 static u32 be_encoded_q_len(int q_len)
980 {
981         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
982         if (len_encoded == 16)
983                 len_encoded = 0;
984         return len_encoded;
985 }
986
987 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
988                         struct be_queue_info *mccq,
989                         struct be_queue_info *cq)
990 {
991         struct be_mcc_wrb *wrb;
992         struct be_cmd_req_mcc_ext_create *req;
993         struct be_dma_mem *q_mem = &mccq->dma_mem;
994         void *ctxt;
995         int status;
996
997         if (mutex_lock_interruptible(&adapter->mbox_lock))
998                 return -1;
999
1000         wrb = wrb_from_mbox(adapter);
1001         req = embedded_payload(wrb);
1002         ctxt = &req->context;
1003
1004         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1005                         OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1006
1007         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1008         if (lancer_chip(adapter)) {
1009                 req->hdr.version = 1;
1010                 req->cq_id = cpu_to_le16(cq->id);
1011
1012                 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1013                                                 be_encoded_q_len(mccq->len));
1014                 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1015                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1016                                                                 ctxt, cq->id);
1017                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1018                                                                  ctxt, 1);
1019
1020         } else {
1021                 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1022                 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1023                                                 be_encoded_q_len(mccq->len));
1024                 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1025         }
1026
1027         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1028         req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1029         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1030
1031         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1032
1033         status = be_mbox_notify_wait(adapter);
1034         if (!status) {
1035                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1036                 mccq->id = le16_to_cpu(resp->id);
1037                 mccq->created = true;
1038         }
1039         mutex_unlock(&adapter->mbox_lock);
1040
1041         return status;
1042 }
1043
1044 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1045                         struct be_queue_info *mccq,
1046                         struct be_queue_info *cq)
1047 {
1048         struct be_mcc_wrb *wrb;
1049         struct be_cmd_req_mcc_create *req;
1050         struct be_dma_mem *q_mem = &mccq->dma_mem;
1051         void *ctxt;
1052         int status;
1053
1054         if (mutex_lock_interruptible(&adapter->mbox_lock))
1055                 return -1;
1056
1057         wrb = wrb_from_mbox(adapter);
1058         req = embedded_payload(wrb);
1059         ctxt = &req->context;
1060
1061         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1062                         OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1063
1064         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1065
1066         AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1067         AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1068                         be_encoded_q_len(mccq->len));
1069         AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1070
1071         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1072
1073         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1074
1075         status = be_mbox_notify_wait(adapter);
1076         if (!status) {
1077                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1078                 mccq->id = le16_to_cpu(resp->id);
1079                 mccq->created = true;
1080         }
1081
1082         mutex_unlock(&adapter->mbox_lock);
1083         return status;
1084 }
1085
1086 int be_cmd_mccq_create(struct be_adapter *adapter,
1087                         struct be_queue_info *mccq,
1088                         struct be_queue_info *cq)
1089 {
1090         int status;
1091
1092         status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1093         if (status && !lancer_chip(adapter)) {
1094                 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1095                         "or newer to avoid conflicting priorities between NIC "
1096                         "and FCoE traffic");
1097                 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1098         }
1099         return status;
1100 }
1101
1102 int be_cmd_txq_create(struct be_adapter *adapter,
1103                         struct be_queue_info *txq,
1104                         struct be_queue_info *cq)
1105 {
1106         struct be_mcc_wrb *wrb;
1107         struct be_cmd_req_eth_tx_create *req;
1108         struct be_dma_mem *q_mem = &txq->dma_mem;
1109         void *ctxt;
1110         int status;
1111
1112         spin_lock_bh(&adapter->mcc_lock);
1113
1114         wrb = wrb_from_mccq(adapter);
1115         if (!wrb) {
1116                 status = -EBUSY;
1117                 goto err;
1118         }
1119
1120         req = embedded_payload(wrb);
1121         ctxt = &req->context;
1122
1123         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1124                 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1125
1126         if (lancer_chip(adapter)) {
1127                 req->hdr.version = 1;
1128                 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1129                                         adapter->if_handle);
1130         }
1131
1132         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1133         req->ulp_num = BE_ULP1_NUM;
1134         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1135
1136         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1137                 be_encoded_q_len(txq->len));
1138         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1139         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1140
1141         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1142
1143         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1144
1145         status = be_mcc_notify_wait(adapter);
1146         if (!status) {
1147                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1148                 txq->id = le16_to_cpu(resp->cid);
1149                 txq->created = true;
1150         }
1151
1152 err:
1153         spin_unlock_bh(&adapter->mcc_lock);
1154
1155         return status;
1156 }
1157
1158 /* Uses MCC */
1159 int be_cmd_rxq_create(struct be_adapter *adapter,
1160                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1161                 u32 if_id, u32 rss, u8 *rss_id)
1162 {
1163         struct be_mcc_wrb *wrb;
1164         struct be_cmd_req_eth_rx_create *req;
1165         struct be_dma_mem *q_mem = &rxq->dma_mem;
1166         int status;
1167
1168         spin_lock_bh(&adapter->mcc_lock);
1169
1170         wrb = wrb_from_mccq(adapter);
1171         if (!wrb) {
1172                 status = -EBUSY;
1173                 goto err;
1174         }
1175         req = embedded_payload(wrb);
1176
1177         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1178                                 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1179
1180         req->cq_id = cpu_to_le16(cq_id);
1181         req->frag_size = fls(frag_size) - 1;
1182         req->num_pages = 2;
1183         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1184         req->interface_id = cpu_to_le32(if_id);
1185         req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1186         req->rss_queue = cpu_to_le32(rss);
1187
1188         status = be_mcc_notify_wait(adapter);
1189         if (!status) {
1190                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1191                 rxq->id = le16_to_cpu(resp->id);
1192                 rxq->created = true;
1193                 *rss_id = resp->rss_id;
1194         }
1195
1196 err:
1197         spin_unlock_bh(&adapter->mcc_lock);
1198         return status;
1199 }
1200
1201 /* Generic destroyer function for all types of queues
1202  * Uses Mbox
1203  */
1204 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1205                 int queue_type)
1206 {
1207         struct be_mcc_wrb *wrb;
1208         struct be_cmd_req_q_destroy *req;
1209         u8 subsys = 0, opcode = 0;
1210         int status;
1211
1212         if (mutex_lock_interruptible(&adapter->mbox_lock))
1213                 return -1;
1214
1215         wrb = wrb_from_mbox(adapter);
1216         req = embedded_payload(wrb);
1217
1218         switch (queue_type) {
1219         case QTYPE_EQ:
1220                 subsys = CMD_SUBSYSTEM_COMMON;
1221                 opcode = OPCODE_COMMON_EQ_DESTROY;
1222                 break;
1223         case QTYPE_CQ:
1224                 subsys = CMD_SUBSYSTEM_COMMON;
1225                 opcode = OPCODE_COMMON_CQ_DESTROY;
1226                 break;
1227         case QTYPE_TXQ:
1228                 subsys = CMD_SUBSYSTEM_ETH;
1229                 opcode = OPCODE_ETH_TX_DESTROY;
1230                 break;
1231         case QTYPE_RXQ:
1232                 subsys = CMD_SUBSYSTEM_ETH;
1233                 opcode = OPCODE_ETH_RX_DESTROY;
1234                 break;
1235         case QTYPE_MCCQ:
1236                 subsys = CMD_SUBSYSTEM_COMMON;
1237                 opcode = OPCODE_COMMON_MCC_DESTROY;
1238                 break;
1239         default:
1240                 BUG();
1241         }
1242
1243         be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1244                                 NULL);
1245         req->id = cpu_to_le16(q->id);
1246
1247         status = be_mbox_notify_wait(adapter);
1248         q->created = false;
1249
1250         mutex_unlock(&adapter->mbox_lock);
1251         return status;
1252 }
1253
1254 /* Uses MCC */
1255 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1256 {
1257         struct be_mcc_wrb *wrb;
1258         struct be_cmd_req_q_destroy *req;
1259         int status;
1260
1261         spin_lock_bh(&adapter->mcc_lock);
1262
1263         wrb = wrb_from_mccq(adapter);
1264         if (!wrb) {
1265                 status = -EBUSY;
1266                 goto err;
1267         }
1268         req = embedded_payload(wrb);
1269
1270         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1271                         OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1272         req->id = cpu_to_le16(q->id);
1273
1274         status = be_mcc_notify_wait(adapter);
1275         q->created = false;
1276
1277 err:
1278         spin_unlock_bh(&adapter->mcc_lock);
1279         return status;
1280 }
1281
1282 /* Create an rx filtering policy configuration on an i/f
1283  * Uses MCCQ
1284  */
1285 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1286                      u32 *if_handle, u32 domain)
1287 {
1288         struct be_mcc_wrb *wrb;
1289         struct be_cmd_req_if_create *req;
1290         int status;
1291
1292         spin_lock_bh(&adapter->mcc_lock);
1293
1294         wrb = wrb_from_mccq(adapter);
1295         if (!wrb) {
1296                 status = -EBUSY;
1297                 goto err;
1298         }
1299         req = embedded_payload(wrb);
1300
1301         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1302                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1303         req->hdr.domain = domain;
1304         req->capability_flags = cpu_to_le32(cap_flags);
1305         req->enable_flags = cpu_to_le32(en_flags);
1306
1307         req->pmac_invalid = true;
1308
1309         status = be_mcc_notify_wait(adapter);
1310         if (!status) {
1311                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1312                 *if_handle = le32_to_cpu(resp->interface_id);
1313         }
1314
1315 err:
1316         spin_unlock_bh(&adapter->mcc_lock);
1317         return status;
1318 }
1319
1320 /* Uses MCCQ */
1321 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1322 {
1323         struct be_mcc_wrb *wrb;
1324         struct be_cmd_req_if_destroy *req;
1325         int status;
1326
1327         if (interface_id == -1)
1328                 return 0;
1329
1330         spin_lock_bh(&adapter->mcc_lock);
1331
1332         wrb = wrb_from_mccq(adapter);
1333         if (!wrb) {
1334                 status = -EBUSY;
1335                 goto err;
1336         }
1337         req = embedded_payload(wrb);
1338
1339         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1340                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1341         req->hdr.domain = domain;
1342         req->interface_id = cpu_to_le32(interface_id);
1343
1344         status = be_mcc_notify_wait(adapter);
1345 err:
1346         spin_unlock_bh(&adapter->mcc_lock);
1347         return status;
1348 }
1349
1350 /* Get stats is a non embedded command: the request is not embedded inside
1351  * WRB but is a separate dma memory block
1352  * Uses asynchronous MCC
1353  */
1354 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1355 {
1356         struct be_mcc_wrb *wrb;
1357         struct be_cmd_req_hdr *hdr;
1358         int status = 0;
1359
1360         spin_lock_bh(&adapter->mcc_lock);
1361
1362         wrb = wrb_from_mccq(adapter);
1363         if (!wrb) {
1364                 status = -EBUSY;
1365                 goto err;
1366         }
1367         hdr = nonemb_cmd->va;
1368
1369         be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1370                 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1371
1372         /* version 1 of the cmd is not supported only by BE2 */
1373         if (!BE2_chip(adapter))
1374                 hdr->version = 1;
1375
1376         be_mcc_notify(adapter);
1377         adapter->stats_cmd_sent = true;
1378
1379 err:
1380         spin_unlock_bh(&adapter->mcc_lock);
1381         return status;
1382 }
1383
1384 /* Lancer Stats */
1385 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1386                                 struct be_dma_mem *nonemb_cmd)
1387 {
1388
1389         struct be_mcc_wrb *wrb;
1390         struct lancer_cmd_req_pport_stats *req;
1391         int status = 0;
1392
1393         if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1394                             CMD_SUBSYSTEM_ETH))
1395                 return -EPERM;
1396
1397         spin_lock_bh(&adapter->mcc_lock);
1398
1399         wrb = wrb_from_mccq(adapter);
1400         if (!wrb) {
1401                 status = -EBUSY;
1402                 goto err;
1403         }
1404         req = nonemb_cmd->va;
1405
1406         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1407                         OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1408                         nonemb_cmd);
1409
1410         req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1411         req->cmd_params.params.reset_stats = 0;
1412
1413         be_mcc_notify(adapter);
1414         adapter->stats_cmd_sent = true;
1415
1416 err:
1417         spin_unlock_bh(&adapter->mcc_lock);
1418         return status;
1419 }
1420
1421 static int be_mac_to_link_speed(int mac_speed)
1422 {
1423         switch (mac_speed) {
1424         case PHY_LINK_SPEED_ZERO:
1425                 return 0;
1426         case PHY_LINK_SPEED_10MBPS:
1427                 return 10;
1428         case PHY_LINK_SPEED_100MBPS:
1429                 return 100;
1430         case PHY_LINK_SPEED_1GBPS:
1431                 return 1000;
1432         case PHY_LINK_SPEED_10GBPS:
1433                 return 10000;
1434         }
1435         return 0;
1436 }
1437
1438 /* Uses synchronous mcc
1439  * Returns link_speed in Mbps
1440  */
1441 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1442                              u8 *link_status, u32 dom)
1443 {
1444         struct be_mcc_wrb *wrb;
1445         struct be_cmd_req_link_status *req;
1446         int status;
1447
1448         spin_lock_bh(&adapter->mcc_lock);
1449
1450         if (link_status)
1451                 *link_status = LINK_DOWN;
1452
1453         wrb = wrb_from_mccq(adapter);
1454         if (!wrb) {
1455                 status = -EBUSY;
1456                 goto err;
1457         }
1458         req = embedded_payload(wrb);
1459
1460         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1461                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1462
1463         /* version 1 of the cmd is not supported only by BE2 */
1464         if (!BE2_chip(adapter))
1465                 req->hdr.version = 1;
1466
1467         req->hdr.domain = dom;
1468
1469         status = be_mcc_notify_wait(adapter);
1470         if (!status) {
1471                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1472                 if (link_speed) {
1473                         *link_speed = resp->link_speed ?
1474                                       le16_to_cpu(resp->link_speed) * 10 :
1475                                       be_mac_to_link_speed(resp->mac_speed);
1476
1477                         if (!resp->logical_link_status)
1478                                 *link_speed = 0;
1479                 }
1480                 if (link_status)
1481                         *link_status = resp->logical_link_status;
1482         }
1483
1484 err:
1485         spin_unlock_bh(&adapter->mcc_lock);
1486         return status;
1487 }
1488
1489 /* Uses synchronous mcc */
1490 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1491 {
1492         struct be_mcc_wrb *wrb;
1493         struct be_cmd_req_get_cntl_addnl_attribs *req;
1494         int status;
1495
1496         spin_lock_bh(&adapter->mcc_lock);
1497
1498         wrb = wrb_from_mccq(adapter);
1499         if (!wrb) {
1500                 status = -EBUSY;
1501                 goto err;
1502         }
1503         req = embedded_payload(wrb);
1504
1505         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1506                 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1507                 wrb, NULL);
1508
1509         be_mcc_notify(adapter);
1510
1511 err:
1512         spin_unlock_bh(&adapter->mcc_lock);
1513         return status;
1514 }
1515
1516 /* Uses synchronous mcc */
1517 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1518 {
1519         struct be_mcc_wrb *wrb;
1520         struct be_cmd_req_get_fat *req;
1521         int status;
1522
1523         spin_lock_bh(&adapter->mcc_lock);
1524
1525         wrb = wrb_from_mccq(adapter);
1526         if (!wrb) {
1527                 status = -EBUSY;
1528                 goto err;
1529         }
1530         req = embedded_payload(wrb);
1531
1532         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1533                 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1534         req->fat_operation = cpu_to_le32(QUERY_FAT);
1535         status = be_mcc_notify_wait(adapter);
1536         if (!status) {
1537                 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1538                 if (log_size && resp->log_size)
1539                         *log_size = le32_to_cpu(resp->log_size) -
1540                                         sizeof(u32);
1541         }
1542 err:
1543         spin_unlock_bh(&adapter->mcc_lock);
1544         return status;
1545 }
1546
1547 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1548 {
1549         struct be_dma_mem get_fat_cmd;
1550         struct be_mcc_wrb *wrb;
1551         struct be_cmd_req_get_fat *req;
1552         u32 offset = 0, total_size, buf_size,
1553                                 log_offset = sizeof(u32), payload_len;
1554         int status;
1555
1556         if (buf_len == 0)
1557                 return;
1558
1559         total_size = buf_len;
1560
1561         get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1562         get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1563                         get_fat_cmd.size,
1564                         &get_fat_cmd.dma);
1565         if (!get_fat_cmd.va) {
1566                 status = -ENOMEM;
1567                 dev_err(&adapter->pdev->dev,
1568                 "Memory allocation failure while retrieving FAT data\n");
1569                 return;
1570         }
1571
1572         spin_lock_bh(&adapter->mcc_lock);
1573
1574         while (total_size) {
1575                 buf_size = min(total_size, (u32)60*1024);
1576                 total_size -= buf_size;
1577
1578                 wrb = wrb_from_mccq(adapter);
1579                 if (!wrb) {
1580                         status = -EBUSY;
1581                         goto err;
1582                 }
1583                 req = get_fat_cmd.va;
1584
1585                 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1586                 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1587                                 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1588                                 &get_fat_cmd);
1589
1590                 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1591                 req->read_log_offset = cpu_to_le32(log_offset);
1592                 req->read_log_length = cpu_to_le32(buf_size);
1593                 req->data_buffer_size = cpu_to_le32(buf_size);
1594
1595                 status = be_mcc_notify_wait(adapter);
1596                 if (!status) {
1597                         struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1598                         memcpy(buf + offset,
1599                                 resp->data_buffer,
1600                                 le32_to_cpu(resp->read_log_length));
1601                 } else {
1602                         dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1603                         goto err;
1604                 }
1605                 offset += buf_size;
1606                 log_offset += buf_size;
1607         }
1608 err:
1609         pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1610                         get_fat_cmd.va,
1611                         get_fat_cmd.dma);
1612         spin_unlock_bh(&adapter->mcc_lock);
1613 }
1614
1615 /* Uses synchronous mcc */
1616 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1617                         char *fw_on_flash)
1618 {
1619         struct be_mcc_wrb *wrb;
1620         struct be_cmd_req_get_fw_version *req;
1621         int status;
1622
1623         spin_lock_bh(&adapter->mcc_lock);
1624
1625         wrb = wrb_from_mccq(adapter);
1626         if (!wrb) {
1627                 status = -EBUSY;
1628                 goto err;
1629         }
1630
1631         req = embedded_payload(wrb);
1632
1633         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1634                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1635         status = be_mcc_notify_wait(adapter);
1636         if (!status) {
1637                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1638                 strcpy(fw_ver, resp->firmware_version_string);
1639                 if (fw_on_flash)
1640                         strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1641         }
1642 err:
1643         spin_unlock_bh(&adapter->mcc_lock);
1644         return status;
1645 }
1646
1647 /* set the EQ delay interval of an EQ to specified value
1648  * Uses async mcc
1649  */
1650 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1651 {
1652         struct be_mcc_wrb *wrb;
1653         struct be_cmd_req_modify_eq_delay *req;
1654         int status = 0;
1655
1656         spin_lock_bh(&adapter->mcc_lock);
1657
1658         wrb = wrb_from_mccq(adapter);
1659         if (!wrb) {
1660                 status = -EBUSY;
1661                 goto err;
1662         }
1663         req = embedded_payload(wrb);
1664
1665         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1666                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1667
1668         req->num_eq = cpu_to_le32(1);
1669         req->delay[0].eq_id = cpu_to_le32(eq_id);
1670         req->delay[0].phase = 0;
1671         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1672
1673         be_mcc_notify(adapter);
1674
1675 err:
1676         spin_unlock_bh(&adapter->mcc_lock);
1677         return status;
1678 }
1679
1680 /* Uses sycnhronous mcc */
1681 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1682                         u32 num, bool untagged, bool promiscuous)
1683 {
1684         struct be_mcc_wrb *wrb;
1685         struct be_cmd_req_vlan_config *req;
1686         int status;
1687
1688         spin_lock_bh(&adapter->mcc_lock);
1689
1690         wrb = wrb_from_mccq(adapter);
1691         if (!wrb) {
1692                 status = -EBUSY;
1693                 goto err;
1694         }
1695         req = embedded_payload(wrb);
1696
1697         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1698                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1699
1700         req->interface_id = if_id;
1701         req->promiscuous = promiscuous;
1702         req->untagged = untagged;
1703         req->num_vlan = num;
1704         if (!promiscuous) {
1705                 memcpy(req->normal_vlan, vtag_array,
1706                         req->num_vlan * sizeof(vtag_array[0]));
1707         }
1708
1709         status = be_mcc_notify_wait(adapter);
1710
1711 err:
1712         spin_unlock_bh(&adapter->mcc_lock);
1713         return status;
1714 }
1715
1716 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1717 {
1718         struct be_mcc_wrb *wrb;
1719         struct be_dma_mem *mem = &adapter->rx_filter;
1720         struct be_cmd_req_rx_filter *req = mem->va;
1721         int status;
1722
1723         spin_lock_bh(&adapter->mcc_lock);
1724
1725         wrb = wrb_from_mccq(adapter);
1726         if (!wrb) {
1727                 status = -EBUSY;
1728                 goto err;
1729         }
1730         memset(req, 0, sizeof(*req));
1731         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1732                                 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1733                                 wrb, mem);
1734
1735         req->if_id = cpu_to_le32(adapter->if_handle);
1736         if (flags & IFF_PROMISC) {
1737                 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1738                                         BE_IF_FLAGS_VLAN_PROMISCUOUS);
1739                 if (value == ON)
1740                         req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1741                                                 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1742         } else if (flags & IFF_ALLMULTI) {
1743                 req->if_flags_mask = req->if_flags =
1744                                 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1745         } else {
1746                 struct netdev_hw_addr *ha;
1747                 int i = 0;
1748
1749                 req->if_flags_mask = req->if_flags =
1750                                 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1751
1752                 /* Reset mcast promisc mode if already set by setting mask
1753                  * and not setting flags field
1754                  */
1755                 req->if_flags_mask |=
1756                         cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1757                                     adapter->if_cap_flags);
1758
1759                 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1760                 netdev_for_each_mc_addr(ha, adapter->netdev)
1761                         memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1762         }
1763
1764         status = be_mcc_notify_wait(adapter);
1765 err:
1766         spin_unlock_bh(&adapter->mcc_lock);
1767         return status;
1768 }
1769
1770 /* Uses synchrounous mcc */
1771 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1772 {
1773         struct be_mcc_wrb *wrb;
1774         struct be_cmd_req_set_flow_control *req;
1775         int status;
1776
1777         if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1778                             CMD_SUBSYSTEM_COMMON))
1779                 return -EPERM;
1780
1781         spin_lock_bh(&adapter->mcc_lock);
1782
1783         wrb = wrb_from_mccq(adapter);
1784         if (!wrb) {
1785                 status = -EBUSY;
1786                 goto err;
1787         }
1788         req = embedded_payload(wrb);
1789
1790         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1791                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1792
1793         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1794         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1795
1796         status = be_mcc_notify_wait(adapter);
1797
1798 err:
1799         spin_unlock_bh(&adapter->mcc_lock);
1800         return status;
1801 }
1802
1803 /* Uses sycn mcc */
1804 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1805 {
1806         struct be_mcc_wrb *wrb;
1807         struct be_cmd_req_get_flow_control *req;
1808         int status;
1809
1810         if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1811                             CMD_SUBSYSTEM_COMMON))
1812                 return -EPERM;
1813
1814         spin_lock_bh(&adapter->mcc_lock);
1815
1816         wrb = wrb_from_mccq(adapter);
1817         if (!wrb) {
1818                 status = -EBUSY;
1819                 goto err;
1820         }
1821         req = embedded_payload(wrb);
1822
1823         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1824                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1825
1826         status = be_mcc_notify_wait(adapter);
1827         if (!status) {
1828                 struct be_cmd_resp_get_flow_control *resp =
1829                                                 embedded_payload(wrb);
1830                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1831                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1832         }
1833
1834 err:
1835         spin_unlock_bh(&adapter->mcc_lock);
1836         return status;
1837 }
1838
1839 /* Uses mbox */
1840 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1841                 u32 *mode, u32 *caps)
1842 {
1843         struct be_mcc_wrb *wrb;
1844         struct be_cmd_req_query_fw_cfg *req;
1845         int status;
1846
1847         if (mutex_lock_interruptible(&adapter->mbox_lock))
1848                 return -1;
1849
1850         wrb = wrb_from_mbox(adapter);
1851         req = embedded_payload(wrb);
1852
1853         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1854                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1855
1856         status = be_mbox_notify_wait(adapter);
1857         if (!status) {
1858                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1859                 *port_num = le32_to_cpu(resp->phys_port);
1860                 *mode = le32_to_cpu(resp->function_mode);
1861                 *caps = le32_to_cpu(resp->function_caps);
1862         }
1863
1864         mutex_unlock(&adapter->mbox_lock);
1865         return status;
1866 }
1867
1868 /* Uses mbox */
1869 int be_cmd_reset_function(struct be_adapter *adapter)
1870 {
1871         struct be_mcc_wrb *wrb;
1872         struct be_cmd_req_hdr *req;
1873         int status;
1874
1875         if (lancer_chip(adapter)) {
1876                 status = lancer_wait_ready(adapter);
1877                 if (!status) {
1878                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
1879                                   adapter->db + SLIPORT_CONTROL_OFFSET);
1880                         status = lancer_test_and_set_rdy_state(adapter);
1881                 }
1882                 if (status) {
1883                         dev_err(&adapter->pdev->dev,
1884                                 "Adapter in non recoverable error\n");
1885                 }
1886                 return status;
1887         }
1888
1889         if (mutex_lock_interruptible(&adapter->mbox_lock))
1890                 return -1;
1891
1892         wrb = wrb_from_mbox(adapter);
1893         req = embedded_payload(wrb);
1894
1895         be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1896                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1897
1898         status = be_mbox_notify_wait(adapter);
1899
1900         mutex_unlock(&adapter->mbox_lock);
1901         return status;
1902 }
1903
1904 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1905 {
1906         struct be_mcc_wrb *wrb;
1907         struct be_cmd_req_rss_config *req;
1908         u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1909                         0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1910                         0x3ea83c02, 0x4a110304};
1911         int status;
1912
1913         if (mutex_lock_interruptible(&adapter->mbox_lock))
1914                 return -1;
1915
1916         wrb = wrb_from_mbox(adapter);
1917         req = embedded_payload(wrb);
1918
1919         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1920                 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1921
1922         req->if_id = cpu_to_le32(adapter->if_handle);
1923         req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1924                                       RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1925
1926         if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1927                 req->hdr.version = 1;
1928                 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1929                                                RSS_ENABLE_UDP_IPV6);
1930         }
1931
1932         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1933         memcpy(req->cpu_table, rsstable, table_size);
1934         memcpy(req->hash, myhash, sizeof(myhash));
1935         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1936
1937         status = be_mbox_notify_wait(adapter);
1938
1939         mutex_unlock(&adapter->mbox_lock);
1940         return status;
1941 }
1942
1943 /* Uses sync mcc */
1944 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1945                         u8 bcn, u8 sts, u8 state)
1946 {
1947         struct be_mcc_wrb *wrb;
1948         struct be_cmd_req_enable_disable_beacon *req;
1949         int status;
1950
1951         spin_lock_bh(&adapter->mcc_lock);
1952
1953         wrb = wrb_from_mccq(adapter);
1954         if (!wrb) {
1955                 status = -EBUSY;
1956                 goto err;
1957         }
1958         req = embedded_payload(wrb);
1959
1960         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1961                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1962
1963         req->port_num = port_num;
1964         req->beacon_state = state;
1965         req->beacon_duration = bcn;
1966         req->status_duration = sts;
1967
1968         status = be_mcc_notify_wait(adapter);
1969
1970 err:
1971         spin_unlock_bh(&adapter->mcc_lock);
1972         return status;
1973 }
1974
1975 /* Uses sync mcc */
1976 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1977 {
1978         struct be_mcc_wrb *wrb;
1979         struct be_cmd_req_get_beacon_state *req;
1980         int status;
1981
1982         spin_lock_bh(&adapter->mcc_lock);
1983
1984         wrb = wrb_from_mccq(adapter);
1985         if (!wrb) {
1986                 status = -EBUSY;
1987                 goto err;
1988         }
1989         req = embedded_payload(wrb);
1990
1991         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1992                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1993
1994         req->port_num = port_num;
1995
1996         status = be_mcc_notify_wait(adapter);
1997         if (!status) {
1998                 struct be_cmd_resp_get_beacon_state *resp =
1999                                                 embedded_payload(wrb);
2000                 *state = resp->beacon_state;
2001         }
2002
2003 err:
2004         spin_unlock_bh(&adapter->mcc_lock);
2005         return status;
2006 }
2007
2008 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2009                             u32 data_size, u32 data_offset,
2010                             const char *obj_name, u32 *data_written,
2011                             u8 *change_status, u8 *addn_status)
2012 {
2013         struct be_mcc_wrb *wrb;
2014         struct lancer_cmd_req_write_object *req;
2015         struct lancer_cmd_resp_write_object *resp;
2016         void *ctxt = NULL;
2017         int status;
2018
2019         spin_lock_bh(&adapter->mcc_lock);
2020         adapter->flash_status = 0;
2021
2022         wrb = wrb_from_mccq(adapter);
2023         if (!wrb) {
2024                 status = -EBUSY;
2025                 goto err_unlock;
2026         }
2027
2028         req = embedded_payload(wrb);
2029
2030         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2031                                 OPCODE_COMMON_WRITE_OBJECT,
2032                                 sizeof(struct lancer_cmd_req_write_object), wrb,
2033                                 NULL);
2034
2035         ctxt = &req->context;
2036         AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2037                         write_length, ctxt, data_size);
2038
2039         if (data_size == 0)
2040                 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2041                                 eof, ctxt, 1);
2042         else
2043                 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2044                                 eof, ctxt, 0);
2045
2046         be_dws_cpu_to_le(ctxt, sizeof(req->context));
2047         req->write_offset = cpu_to_le32(data_offset);
2048         strcpy(req->object_name, obj_name);
2049         req->descriptor_count = cpu_to_le32(1);
2050         req->buf_len = cpu_to_le32(data_size);
2051         req->addr_low = cpu_to_le32((cmd->dma +
2052                                 sizeof(struct lancer_cmd_req_write_object))
2053                                 & 0xFFFFFFFF);
2054         req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2055                                 sizeof(struct lancer_cmd_req_write_object)));
2056
2057         be_mcc_notify(adapter);
2058         spin_unlock_bh(&adapter->mcc_lock);
2059
2060         if (!wait_for_completion_timeout(&adapter->flash_compl,
2061                                          msecs_to_jiffies(30000)))
2062                 status = -1;
2063         else
2064                 status = adapter->flash_status;
2065
2066         resp = embedded_payload(wrb);
2067         if (!status) {
2068                 *data_written = le32_to_cpu(resp->actual_write_len);
2069                 *change_status = resp->change_status;
2070         } else {
2071                 *addn_status = resp->additional_status;
2072         }
2073
2074         return status;
2075
2076 err_unlock:
2077         spin_unlock_bh(&adapter->mcc_lock);
2078         return status;
2079 }
2080
2081 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2082                 u32 data_size, u32 data_offset, const char *obj_name,
2083                 u32 *data_read, u32 *eof, u8 *addn_status)
2084 {
2085         struct be_mcc_wrb *wrb;
2086         struct lancer_cmd_req_read_object *req;
2087         struct lancer_cmd_resp_read_object *resp;
2088         int status;
2089
2090         spin_lock_bh(&adapter->mcc_lock);
2091
2092         wrb = wrb_from_mccq(adapter);
2093         if (!wrb) {
2094                 status = -EBUSY;
2095                 goto err_unlock;
2096         }
2097
2098         req = embedded_payload(wrb);
2099
2100         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2101                         OPCODE_COMMON_READ_OBJECT,
2102                         sizeof(struct lancer_cmd_req_read_object), wrb,
2103                         NULL);
2104
2105         req->desired_read_len = cpu_to_le32(data_size);
2106         req->read_offset = cpu_to_le32(data_offset);
2107         strcpy(req->object_name, obj_name);
2108         req->descriptor_count = cpu_to_le32(1);
2109         req->buf_len = cpu_to_le32(data_size);
2110         req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2111         req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2112
2113         status = be_mcc_notify_wait(adapter);
2114
2115         resp = embedded_payload(wrb);
2116         if (!status) {
2117                 *data_read = le32_to_cpu(resp->actual_read_len);
2118                 *eof = le32_to_cpu(resp->eof);
2119         } else {
2120                 *addn_status = resp->additional_status;
2121         }
2122
2123 err_unlock:
2124         spin_unlock_bh(&adapter->mcc_lock);
2125         return status;
2126 }
2127
2128 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2129                         u32 flash_type, u32 flash_opcode, u32 buf_size)
2130 {
2131         struct be_mcc_wrb *wrb;
2132         struct be_cmd_write_flashrom *req;
2133         int status;
2134
2135         spin_lock_bh(&adapter->mcc_lock);
2136         adapter->flash_status = 0;
2137
2138         wrb = wrb_from_mccq(adapter);
2139         if (!wrb) {
2140                 status = -EBUSY;
2141                 goto err_unlock;
2142         }
2143         req = cmd->va;
2144
2145         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2146                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2147
2148         req->params.op_type = cpu_to_le32(flash_type);
2149         req->params.op_code = cpu_to_le32(flash_opcode);
2150         req->params.data_buf_size = cpu_to_le32(buf_size);
2151
2152         be_mcc_notify(adapter);
2153         spin_unlock_bh(&adapter->mcc_lock);
2154
2155         if (!wait_for_completion_timeout(&adapter->flash_compl,
2156                         msecs_to_jiffies(40000)))
2157                 status = -1;
2158         else
2159                 status = adapter->flash_status;
2160
2161         return status;
2162
2163 err_unlock:
2164         spin_unlock_bh(&adapter->mcc_lock);
2165         return status;
2166 }
2167
2168 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2169                          int offset)
2170 {
2171         struct be_mcc_wrb *wrb;
2172         struct be_cmd_read_flash_crc *req;
2173         int status;
2174
2175         spin_lock_bh(&adapter->mcc_lock);
2176
2177         wrb = wrb_from_mccq(adapter);
2178         if (!wrb) {
2179                 status = -EBUSY;
2180                 goto err;
2181         }
2182         req = embedded_payload(wrb);
2183
2184         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2185                                OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2186                                wrb, NULL);
2187
2188         req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2189         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2190         req->params.offset = cpu_to_le32(offset);
2191         req->params.data_buf_size = cpu_to_le32(0x4);
2192
2193         status = be_mcc_notify_wait(adapter);
2194         if (!status)
2195                 memcpy(flashed_crc, req->crc, 4);
2196
2197 err:
2198         spin_unlock_bh(&adapter->mcc_lock);
2199         return status;
2200 }
2201
2202 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2203                                 struct be_dma_mem *nonemb_cmd)
2204 {
2205         struct be_mcc_wrb *wrb;
2206         struct be_cmd_req_acpi_wol_magic_config *req;
2207         int status;
2208
2209         spin_lock_bh(&adapter->mcc_lock);
2210
2211         wrb = wrb_from_mccq(adapter);
2212         if (!wrb) {
2213                 status = -EBUSY;
2214                 goto err;
2215         }
2216         req = nonemb_cmd->va;
2217
2218         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2219                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2220                 nonemb_cmd);
2221         memcpy(req->magic_mac, mac, ETH_ALEN);
2222
2223         status = be_mcc_notify_wait(adapter);
2224
2225 err:
2226         spin_unlock_bh(&adapter->mcc_lock);
2227         return status;
2228 }
2229
2230 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2231                         u8 loopback_type, u8 enable)
2232 {
2233         struct be_mcc_wrb *wrb;
2234         struct be_cmd_req_set_lmode *req;
2235         int status;
2236
2237         spin_lock_bh(&adapter->mcc_lock);
2238
2239         wrb = wrb_from_mccq(adapter);
2240         if (!wrb) {
2241                 status = -EBUSY;
2242                 goto err;
2243         }
2244
2245         req = embedded_payload(wrb);
2246
2247         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2248                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2249                         NULL);
2250
2251         req->src_port = port_num;
2252         req->dest_port = port_num;
2253         req->loopback_type = loopback_type;
2254         req->loopback_state = enable;
2255
2256         status = be_mcc_notify_wait(adapter);
2257 err:
2258         spin_unlock_bh(&adapter->mcc_lock);
2259         return status;
2260 }
2261
2262 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2263                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2264 {
2265         struct be_mcc_wrb *wrb;
2266         struct be_cmd_req_loopback_test *req;
2267         int status;
2268
2269         spin_lock_bh(&adapter->mcc_lock);
2270
2271         wrb = wrb_from_mccq(adapter);
2272         if (!wrb) {
2273                 status = -EBUSY;
2274                 goto err;
2275         }
2276
2277         req = embedded_payload(wrb);
2278
2279         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2280                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2281         req->hdr.timeout = cpu_to_le32(4);
2282
2283         req->pattern = cpu_to_le64(pattern);
2284         req->src_port = cpu_to_le32(port_num);
2285         req->dest_port = cpu_to_le32(port_num);
2286         req->pkt_size = cpu_to_le32(pkt_size);
2287         req->num_pkts = cpu_to_le32(num_pkts);
2288         req->loopback_type = cpu_to_le32(loopback_type);
2289
2290         status = be_mcc_notify_wait(adapter);
2291         if (!status) {
2292                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2293                 status = le32_to_cpu(resp->status);
2294         }
2295
2296 err:
2297         spin_unlock_bh(&adapter->mcc_lock);
2298         return status;
2299 }
2300
2301 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2302                                 u32 byte_cnt, struct be_dma_mem *cmd)
2303 {
2304         struct be_mcc_wrb *wrb;
2305         struct be_cmd_req_ddrdma_test *req;
2306         int status;
2307         int i, j = 0;
2308
2309         spin_lock_bh(&adapter->mcc_lock);
2310
2311         wrb = wrb_from_mccq(adapter);
2312         if (!wrb) {
2313                 status = -EBUSY;
2314                 goto err;
2315         }
2316         req = cmd->va;
2317         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2318                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2319
2320         req->pattern = cpu_to_le64(pattern);
2321         req->byte_count = cpu_to_le32(byte_cnt);
2322         for (i = 0; i < byte_cnt; i++) {
2323                 req->snd_buff[i] = (u8)(pattern >> (j*8));
2324                 j++;
2325                 if (j > 7)
2326                         j = 0;
2327         }
2328
2329         status = be_mcc_notify_wait(adapter);
2330
2331         if (!status) {
2332                 struct be_cmd_resp_ddrdma_test *resp;
2333                 resp = cmd->va;
2334                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2335                                 resp->snd_err) {
2336                         status = -1;
2337                 }
2338         }
2339
2340 err:
2341         spin_unlock_bh(&adapter->mcc_lock);
2342         return status;
2343 }
2344
2345 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2346                                 struct be_dma_mem *nonemb_cmd)
2347 {
2348         struct be_mcc_wrb *wrb;
2349         struct be_cmd_req_seeprom_read *req;
2350         struct be_sge *sge;
2351         int status;
2352
2353         spin_lock_bh(&adapter->mcc_lock);
2354
2355         wrb = wrb_from_mccq(adapter);
2356         if (!wrb) {
2357                 status = -EBUSY;
2358                 goto err;
2359         }
2360         req = nonemb_cmd->va;
2361         sge = nonembedded_sgl(wrb);
2362
2363         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2364                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2365                         nonemb_cmd);
2366
2367         status = be_mcc_notify_wait(adapter);
2368
2369 err:
2370         spin_unlock_bh(&adapter->mcc_lock);
2371         return status;
2372 }
2373
2374 int be_cmd_get_phy_info(struct be_adapter *adapter)
2375 {
2376         struct be_mcc_wrb *wrb;
2377         struct be_cmd_req_get_phy_info *req;
2378         struct be_dma_mem cmd;
2379         int status;
2380
2381         if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2382                             CMD_SUBSYSTEM_COMMON))
2383                 return -EPERM;
2384
2385         spin_lock_bh(&adapter->mcc_lock);
2386
2387         wrb = wrb_from_mccq(adapter);
2388         if (!wrb) {
2389                 status = -EBUSY;
2390                 goto err;
2391         }
2392         cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2393         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2394                                         &cmd.dma);
2395         if (!cmd.va) {
2396                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2397                 status = -ENOMEM;
2398                 goto err;
2399         }
2400
2401         req = cmd.va;
2402
2403         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2404                         OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2405                         wrb, &cmd);
2406
2407         status = be_mcc_notify_wait(adapter);
2408         if (!status) {
2409                 struct be_phy_info *resp_phy_info =
2410                                 cmd.va + sizeof(struct be_cmd_req_hdr);
2411                 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2412                 adapter->phy.interface_type =
2413                         le16_to_cpu(resp_phy_info->interface_type);
2414                 adapter->phy.auto_speeds_supported =
2415                         le16_to_cpu(resp_phy_info->auto_speeds_supported);
2416                 adapter->phy.fixed_speeds_supported =
2417                         le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2418                 adapter->phy.misc_params =
2419                         le32_to_cpu(resp_phy_info->misc_params);
2420         }
2421         pci_free_consistent(adapter->pdev, cmd.size,
2422                                 cmd.va, cmd.dma);
2423 err:
2424         spin_unlock_bh(&adapter->mcc_lock);
2425         return status;
2426 }
2427
2428 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2429 {
2430         struct be_mcc_wrb *wrb;
2431         struct be_cmd_req_set_qos *req;
2432         int status;
2433
2434         spin_lock_bh(&adapter->mcc_lock);
2435
2436         wrb = wrb_from_mccq(adapter);
2437         if (!wrb) {
2438                 status = -EBUSY;
2439                 goto err;
2440         }
2441
2442         req = embedded_payload(wrb);
2443
2444         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2445                         OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2446
2447         req->hdr.domain = domain;
2448         req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2449         req->max_bps_nic = cpu_to_le32(bps);
2450
2451         status = be_mcc_notify_wait(adapter);
2452
2453 err:
2454         spin_unlock_bh(&adapter->mcc_lock);
2455         return status;
2456 }
2457
2458 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2459 {
2460         struct be_mcc_wrb *wrb;
2461         struct be_cmd_req_cntl_attribs *req;
2462         struct be_cmd_resp_cntl_attribs *resp;
2463         int status;
2464         int payload_len = max(sizeof(*req), sizeof(*resp));
2465         struct mgmt_controller_attrib *attribs;
2466         struct be_dma_mem attribs_cmd;
2467
2468         memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2469         attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2470         attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2471                                                 &attribs_cmd.dma);
2472         if (!attribs_cmd.va) {
2473                 dev_err(&adapter->pdev->dev,
2474                                 "Memory allocation failure\n");
2475                 return -ENOMEM;
2476         }
2477
2478         if (mutex_lock_interruptible(&adapter->mbox_lock))
2479                 return -1;
2480
2481         wrb = wrb_from_mbox(adapter);
2482         if (!wrb) {
2483                 status = -EBUSY;
2484                 goto err;
2485         }
2486         req = attribs_cmd.va;
2487
2488         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2489                          OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2490                         &attribs_cmd);
2491
2492         status = be_mbox_notify_wait(adapter);
2493         if (!status) {
2494                 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2495                 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2496         }
2497
2498 err:
2499         mutex_unlock(&adapter->mbox_lock);
2500         pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2501                                         attribs_cmd.dma);
2502         return status;
2503 }
2504
2505 /* Uses mbox */
2506 int be_cmd_req_native_mode(struct be_adapter *adapter)
2507 {
2508         struct be_mcc_wrb *wrb;
2509         struct be_cmd_req_set_func_cap *req;
2510         int status;
2511
2512         if (mutex_lock_interruptible(&adapter->mbox_lock))
2513                 return -1;
2514
2515         wrb = wrb_from_mbox(adapter);
2516         if (!wrb) {
2517                 status = -EBUSY;
2518                 goto err;
2519         }
2520
2521         req = embedded_payload(wrb);
2522
2523         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2524                 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2525
2526         req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2527                                 CAPABILITY_BE3_NATIVE_ERX_API);
2528         req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2529
2530         status = be_mbox_notify_wait(adapter);
2531         if (!status) {
2532                 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2533                 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2534                                         CAPABILITY_BE3_NATIVE_ERX_API;
2535                 if (!adapter->be3_native)
2536                         dev_warn(&adapter->pdev->dev,
2537                                  "adapter not in advanced mode\n");
2538         }
2539 err:
2540         mutex_unlock(&adapter->mbox_lock);
2541         return status;
2542 }
2543
2544 /* Get privilege(s) for a function */
2545 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2546                              u32 domain)
2547 {
2548         struct be_mcc_wrb *wrb;
2549         struct be_cmd_req_get_fn_privileges *req;
2550         int status;
2551
2552         spin_lock_bh(&adapter->mcc_lock);
2553
2554         wrb = wrb_from_mccq(adapter);
2555         if (!wrb) {
2556                 status = -EBUSY;
2557                 goto err;
2558         }
2559
2560         req = embedded_payload(wrb);
2561
2562         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2563                                OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2564                                wrb, NULL);
2565
2566         req->hdr.domain = domain;
2567
2568         status = be_mcc_notify_wait(adapter);
2569         if (!status) {
2570                 struct be_cmd_resp_get_fn_privileges *resp =
2571                                                 embedded_payload(wrb);
2572                 *privilege = le32_to_cpu(resp->privilege_mask);
2573         }
2574
2575 err:
2576         spin_unlock_bh(&adapter->mcc_lock);
2577         return status;
2578 }
2579
2580 /* Uses synchronous MCCQ */
2581 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2582                              bool *pmac_id_active, u32 *pmac_id, u8 domain)
2583 {
2584         struct be_mcc_wrb *wrb;
2585         struct be_cmd_req_get_mac_list *req;
2586         int status;
2587         int mac_count;
2588         struct be_dma_mem get_mac_list_cmd;
2589         int i;
2590
2591         memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2592         get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2593         get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2594                         get_mac_list_cmd.size,
2595                         &get_mac_list_cmd.dma);
2596
2597         if (!get_mac_list_cmd.va) {
2598                 dev_err(&adapter->pdev->dev,
2599                                 "Memory allocation failure during GET_MAC_LIST\n");
2600                 return -ENOMEM;
2601         }
2602
2603         spin_lock_bh(&adapter->mcc_lock);
2604
2605         wrb = wrb_from_mccq(adapter);
2606         if (!wrb) {
2607                 status = -EBUSY;
2608                 goto out;
2609         }
2610
2611         req = get_mac_list_cmd.va;
2612
2613         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2614                                 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2615                                 wrb, &get_mac_list_cmd);
2616
2617         req->hdr.domain = domain;
2618         req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2619         req->perm_override = 1;
2620
2621         status = be_mcc_notify_wait(adapter);
2622         if (!status) {
2623                 struct be_cmd_resp_get_mac_list *resp =
2624                                                 get_mac_list_cmd.va;
2625                 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2626                 /* Mac list returned could contain one or more active mac_ids
2627                  * or one or more true or pseudo permanant mac addresses.
2628                  * If an active mac_id is present, return first active mac_id
2629                  * found.
2630                  */
2631                 for (i = 0; i < mac_count; i++) {
2632                         struct get_list_macaddr *mac_entry;
2633                         u16 mac_addr_size;
2634                         u32 mac_id;
2635
2636                         mac_entry = &resp->macaddr_list[i];
2637                         mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2638                         /* mac_id is a 32 bit value and mac_addr size
2639                          * is 6 bytes
2640                          */
2641                         if (mac_addr_size == sizeof(u32)) {
2642                                 *pmac_id_active = true;
2643                                 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2644                                 *pmac_id = le32_to_cpu(mac_id);
2645                                 goto out;
2646                         }
2647                 }
2648                 /* If no active mac_id found, return first mac addr */
2649                 *pmac_id_active = false;
2650                 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2651                                                                 ETH_ALEN);
2652         }
2653
2654 out:
2655         spin_unlock_bh(&adapter->mcc_lock);
2656         pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2657                         get_mac_list_cmd.va, get_mac_list_cmd.dma);
2658         return status;
2659 }
2660
2661 /* Uses synchronous MCCQ */
2662 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2663                         u8 mac_count, u32 domain)
2664 {
2665         struct be_mcc_wrb *wrb;
2666         struct be_cmd_req_set_mac_list *req;
2667         int status;
2668         struct be_dma_mem cmd;
2669
2670         memset(&cmd, 0, sizeof(struct be_dma_mem));
2671         cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2672         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2673                         &cmd.dma, GFP_KERNEL);
2674         if (!cmd.va) {
2675                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2676                 return -ENOMEM;
2677         }
2678
2679         spin_lock_bh(&adapter->mcc_lock);
2680
2681         wrb = wrb_from_mccq(adapter);
2682         if (!wrb) {
2683                 status = -EBUSY;
2684                 goto err;
2685         }
2686
2687         req = cmd.va;
2688         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2689                                 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2690                                 wrb, &cmd);
2691
2692         req->hdr.domain = domain;
2693         req->mac_count = mac_count;
2694         if (mac_count)
2695                 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2696
2697         status = be_mcc_notify_wait(adapter);
2698
2699 err:
2700         dma_free_coherent(&adapter->pdev->dev, cmd.size,
2701                                 cmd.va, cmd.dma);
2702         spin_unlock_bh(&adapter->mcc_lock);
2703         return status;
2704 }
2705
2706 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2707                         u32 domain, u16 intf_id)
2708 {
2709         struct be_mcc_wrb *wrb;
2710         struct be_cmd_req_set_hsw_config *req;
2711         void *ctxt;
2712         int status;
2713
2714         spin_lock_bh(&adapter->mcc_lock);
2715
2716         wrb = wrb_from_mccq(adapter);
2717         if (!wrb) {
2718                 status = -EBUSY;
2719                 goto err;
2720         }
2721
2722         req = embedded_payload(wrb);
2723         ctxt = &req->context;
2724
2725         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2726                         OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2727
2728         req->hdr.domain = domain;
2729         AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2730         if (pvid) {
2731                 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2732                 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2733         }
2734
2735         be_dws_cpu_to_le(req->context, sizeof(req->context));
2736         status = be_mcc_notify_wait(adapter);
2737
2738 err:
2739         spin_unlock_bh(&adapter->mcc_lock);
2740         return status;
2741 }
2742
2743 /* Get Hyper switch config */
2744 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2745                         u32 domain, u16 intf_id)
2746 {
2747         struct be_mcc_wrb *wrb;
2748         struct be_cmd_req_get_hsw_config *req;
2749         void *ctxt;
2750         int status;
2751         u16 vid;
2752
2753         spin_lock_bh(&adapter->mcc_lock);
2754
2755         wrb = wrb_from_mccq(adapter);
2756         if (!wrb) {
2757                 status = -EBUSY;
2758                 goto err;
2759         }
2760
2761         req = embedded_payload(wrb);
2762         ctxt = &req->context;
2763
2764         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2765                         OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2766
2767         req->hdr.domain = domain;
2768         AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2769                                                                 intf_id);
2770         AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2771         be_dws_cpu_to_le(req->context, sizeof(req->context));
2772
2773         status = be_mcc_notify_wait(adapter);
2774         if (!status) {
2775                 struct be_cmd_resp_get_hsw_config *resp =
2776                                                 embedded_payload(wrb);
2777                 be_dws_le_to_cpu(&resp->context,
2778                                                 sizeof(resp->context));
2779                 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2780                                                         pvid, &resp->context);
2781                 *pvid = le16_to_cpu(vid);
2782         }
2783
2784 err:
2785         spin_unlock_bh(&adapter->mcc_lock);
2786         return status;
2787 }
2788
2789 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2790 {
2791         struct be_mcc_wrb *wrb;
2792         struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2793         int status;
2794         int payload_len = sizeof(*req);
2795         struct be_dma_mem cmd;
2796
2797         if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2798                             CMD_SUBSYSTEM_ETH))
2799                 return -EPERM;
2800
2801         memset(&cmd, 0, sizeof(struct be_dma_mem));
2802         cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2803         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2804                                                &cmd.dma);
2805         if (!cmd.va) {
2806                 dev_err(&adapter->pdev->dev,
2807                                 "Memory allocation failure\n");
2808                 return -ENOMEM;
2809         }
2810
2811         if (mutex_lock_interruptible(&adapter->mbox_lock))
2812                 return -1;
2813
2814         wrb = wrb_from_mbox(adapter);
2815         if (!wrb) {
2816                 status = -EBUSY;
2817                 goto err;
2818         }
2819
2820         req = cmd.va;
2821
2822         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2823                                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2824                                payload_len, wrb, &cmd);
2825
2826         req->hdr.version = 1;
2827         req->query_options = BE_GET_WOL_CAP;
2828
2829         status = be_mbox_notify_wait(adapter);
2830         if (!status) {
2831                 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2832                 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2833
2834                 /* the command could succeed misleadingly on old f/w
2835                  * which is not aware of the V1 version. fake an error. */
2836                 if (resp->hdr.response_length < payload_len) {
2837                         status = -1;
2838                         goto err;
2839                 }
2840                 adapter->wol_cap = resp->wol_settings;
2841         }
2842 err:
2843         mutex_unlock(&adapter->mbox_lock);
2844         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2845         return status;
2846
2847 }
2848 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2849                                    struct be_dma_mem *cmd)
2850 {
2851         struct be_mcc_wrb *wrb;
2852         struct be_cmd_req_get_ext_fat_caps *req;
2853         int status;
2854
2855         if (mutex_lock_interruptible(&adapter->mbox_lock))
2856                 return -1;
2857
2858         wrb = wrb_from_mbox(adapter);
2859         if (!wrb) {
2860                 status = -EBUSY;
2861                 goto err;
2862         }
2863
2864         req = cmd->va;
2865         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2866                                OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2867                                cmd->size, wrb, cmd);
2868         req->parameter_type = cpu_to_le32(1);
2869
2870         status = be_mbox_notify_wait(adapter);
2871 err:
2872         mutex_unlock(&adapter->mbox_lock);
2873         return status;
2874 }
2875
2876 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2877                                    struct be_dma_mem *cmd,
2878                                    struct be_fat_conf_params *configs)
2879 {
2880         struct be_mcc_wrb *wrb;
2881         struct be_cmd_req_set_ext_fat_caps *req;
2882         int status;
2883
2884         spin_lock_bh(&adapter->mcc_lock);
2885
2886         wrb = wrb_from_mccq(adapter);
2887         if (!wrb) {
2888                 status = -EBUSY;
2889                 goto err;
2890         }
2891
2892         req = cmd->va;
2893         memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2894         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2895                                OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2896                                cmd->size, wrb, cmd);
2897
2898         status = be_mcc_notify_wait(adapter);
2899 err:
2900         spin_unlock_bh(&adapter->mcc_lock);
2901         return status;
2902 }
2903
2904 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2905 {
2906         struct be_mcc_wrb *wrb;
2907         struct be_cmd_req_get_port_name *req;
2908         int status;
2909
2910         if (!lancer_chip(adapter)) {
2911                 *port_name = adapter->hba_port_num + '0';
2912                 return 0;
2913         }
2914
2915         spin_lock_bh(&adapter->mcc_lock);
2916
2917         wrb = wrb_from_mccq(adapter);
2918         if (!wrb) {
2919                 status = -EBUSY;
2920                 goto err;
2921         }
2922
2923         req = embedded_payload(wrb);
2924
2925         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2926                                OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2927                                NULL);
2928         req->hdr.version = 1;
2929
2930         status = be_mcc_notify_wait(adapter);
2931         if (!status) {
2932                 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2933                 *port_name = resp->port_name[adapter->hba_port_num];
2934         } else {
2935                 *port_name = adapter->hba_port_num + '0';
2936         }
2937 err:
2938         spin_unlock_bh(&adapter->mcc_lock);
2939         return status;
2940 }
2941
2942 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2943                                                     u32 max_buf_size)
2944 {
2945         struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2946         int i;
2947
2948         for (i = 0; i < desc_count; i++) {
2949                 desc->desc_len = RESOURCE_DESC_SIZE;
2950                 if (((void *)desc + desc->desc_len) >
2951                     (void *)(buf + max_buf_size)) {
2952                         desc = NULL;
2953                         break;
2954                 }
2955
2956                 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2957                         break;
2958
2959                 desc = (void *)desc + desc->desc_len;
2960         }
2961
2962         if (!desc || i == MAX_RESOURCE_DESC)
2963                 return NULL;
2964
2965         return desc;
2966 }
2967
2968 /* Uses Mbox */
2969 int be_cmd_get_func_config(struct be_adapter *adapter)
2970 {
2971         struct be_mcc_wrb *wrb;
2972         struct be_cmd_req_get_func_config *req;
2973         int status;
2974         struct be_dma_mem cmd;
2975
2976         memset(&cmd, 0, sizeof(struct be_dma_mem));
2977         cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2978         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2979                                       &cmd.dma);
2980         if (!cmd.va) {
2981                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2982                 return -ENOMEM;
2983         }
2984         if (mutex_lock_interruptible(&adapter->mbox_lock))
2985                 return -1;
2986
2987         wrb = wrb_from_mbox(adapter);
2988         if (!wrb) {
2989                 status = -EBUSY;
2990                 goto err;
2991         }
2992
2993         req = cmd.va;
2994
2995         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2996                                OPCODE_COMMON_GET_FUNC_CONFIG,
2997                                cmd.size, wrb, &cmd);
2998
2999         status = be_mbox_notify_wait(adapter);
3000         if (!status) {
3001                 struct be_cmd_resp_get_func_config *resp = cmd.va;
3002                 u32 desc_count = le32_to_cpu(resp->desc_count);
3003                 struct be_nic_resource_desc *desc;
3004
3005                 desc = be_get_nic_desc(resp->func_param, desc_count,
3006                                        sizeof(resp->func_param));
3007                 if (!desc) {
3008                         status = -EINVAL;
3009                         goto err;
3010                 }
3011
3012                 adapter->pf_number = desc->pf_num;
3013                 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3014                 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3015                 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3016                 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3017                 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3018                 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3019
3020                 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3021                 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3022         }
3023 err:
3024         mutex_unlock(&adapter->mbox_lock);
3025         pci_free_consistent(adapter->pdev, cmd.size,
3026                             cmd.va, cmd.dma);
3027         return status;
3028 }
3029
3030  /* Uses sync mcc */
3031 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3032                               u8 domain)
3033 {
3034         struct be_mcc_wrb *wrb;
3035         struct be_cmd_req_get_profile_config *req;
3036         int status;
3037         struct be_dma_mem cmd;
3038
3039         memset(&cmd, 0, sizeof(struct be_dma_mem));
3040         cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3041         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3042                                       &cmd.dma);
3043         if (!cmd.va) {
3044                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3045                 return -ENOMEM;
3046         }
3047
3048         spin_lock_bh(&adapter->mcc_lock);
3049
3050         wrb = wrb_from_mccq(adapter);
3051         if (!wrb) {
3052                 status = -EBUSY;
3053                 goto err;
3054         }
3055
3056         req = cmd.va;
3057
3058         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3059                                OPCODE_COMMON_GET_PROFILE_CONFIG,
3060                                cmd.size, wrb, &cmd);
3061
3062         req->type = ACTIVE_PROFILE_TYPE;
3063         req->hdr.domain = domain;
3064
3065         status = be_mcc_notify_wait(adapter);
3066         if (!status) {
3067                 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3068                 u32 desc_count = le32_to_cpu(resp->desc_count);
3069                 struct be_nic_resource_desc *desc;
3070
3071                 desc = be_get_nic_desc(resp->func_param, desc_count,
3072                                        sizeof(resp->func_param));
3073
3074                 if (!desc) {
3075                         status = -EINVAL;
3076                         goto err;
3077                 }
3078                 *cap_flags = le32_to_cpu(desc->cap_flags);
3079         }
3080 err:
3081         spin_unlock_bh(&adapter->mcc_lock);
3082         pci_free_consistent(adapter->pdev, cmd.size,
3083                             cmd.va, cmd.dma);
3084         return status;
3085 }
3086
3087 /* Uses sync mcc */
3088 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3089                               u8 domain)
3090 {
3091         struct be_mcc_wrb *wrb;
3092         struct be_cmd_req_set_profile_config *req;
3093         int status;
3094
3095         spin_lock_bh(&adapter->mcc_lock);
3096
3097         wrb = wrb_from_mccq(adapter);
3098         if (!wrb) {
3099                 status = -EBUSY;
3100                 goto err;
3101         }
3102
3103         req = embedded_payload(wrb);
3104
3105         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3106                                OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3107                                wrb, NULL);
3108
3109         req->hdr.domain = domain;
3110         req->desc_count = cpu_to_le32(1);
3111
3112         req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3113         req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3114         req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3115         req->nic_desc.pf_num = adapter->pf_number;
3116         req->nic_desc.vf_num = domain;
3117
3118         /* Mark fields invalid */
3119         req->nic_desc.unicast_mac_count = 0xFFFF;
3120         req->nic_desc.mcc_count = 0xFFFF;
3121         req->nic_desc.vlan_count = 0xFFFF;
3122         req->nic_desc.mcast_mac_count = 0xFFFF;
3123         req->nic_desc.txq_count = 0xFFFF;
3124         req->nic_desc.rq_count = 0xFFFF;
3125         req->nic_desc.rssq_count = 0xFFFF;
3126         req->nic_desc.lro_count = 0xFFFF;
3127         req->nic_desc.cq_count = 0xFFFF;
3128         req->nic_desc.toe_conn_count = 0xFFFF;
3129         req->nic_desc.eq_count = 0xFFFF;
3130         req->nic_desc.link_param = 0xFF;
3131         req->nic_desc.bw_min = 0xFFFFFFFF;
3132         req->nic_desc.acpi_params = 0xFF;
3133         req->nic_desc.wol_param = 0x0F;
3134
3135         /* Change BW */
3136         req->nic_desc.bw_min = cpu_to_le32(bps);
3137         req->nic_desc.bw_max = cpu_to_le32(bps);
3138         status = be_mcc_notify_wait(adapter);
3139 err:
3140         spin_unlock_bh(&adapter->mcc_lock);
3141         return status;
3142 }
3143
3144 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3145                      int vf_num)
3146 {
3147         struct be_mcc_wrb *wrb;
3148         struct be_cmd_req_get_iface_list *req;
3149         struct be_cmd_resp_get_iface_list *resp;
3150         int status;
3151
3152         spin_lock_bh(&adapter->mcc_lock);
3153
3154         wrb = wrb_from_mccq(adapter);
3155         if (!wrb) {
3156                 status = -EBUSY;
3157                 goto err;
3158         }
3159         req = embedded_payload(wrb);
3160
3161         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3162                                OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3163                                wrb, NULL);
3164         req->hdr.domain = vf_num + 1;
3165
3166         status = be_mcc_notify_wait(adapter);
3167         if (!status) {
3168                 resp = (struct be_cmd_resp_get_iface_list *)req;
3169                 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3170         }
3171
3172 err:
3173         spin_unlock_bh(&adapter->mcc_lock);
3174         return status;
3175 }
3176
3177 /* Uses sync mcc */
3178 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3179 {
3180         struct be_mcc_wrb *wrb;
3181         struct be_cmd_enable_disable_vf *req;
3182         int status;
3183
3184         if (!lancer_chip(adapter))
3185                 return 0;
3186
3187         spin_lock_bh(&adapter->mcc_lock);
3188
3189         wrb = wrb_from_mccq(adapter);
3190         if (!wrb) {
3191                 status = -EBUSY;
3192                 goto err;
3193         }
3194
3195         req = embedded_payload(wrb);
3196
3197         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3198                                OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3199                                wrb, NULL);
3200
3201         req->hdr.domain = domain;
3202         req->enable = 1;
3203         status = be_mcc_notify_wait(adapter);
3204 err:
3205         spin_unlock_bh(&adapter->mcc_lock);
3206         return status;
3207 }
3208
3209 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3210                         int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3211 {
3212         struct be_adapter *adapter = netdev_priv(netdev_handle);
3213         struct be_mcc_wrb *wrb;
3214         struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3215         struct be_cmd_req_hdr *req;
3216         struct be_cmd_resp_hdr *resp;
3217         int status;
3218
3219         spin_lock_bh(&adapter->mcc_lock);
3220
3221         wrb = wrb_from_mccq(adapter);
3222         if (!wrb) {
3223                 status = -EBUSY;
3224                 goto err;
3225         }
3226         req = embedded_payload(wrb);
3227         resp = embedded_payload(wrb);
3228
3229         be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3230                                hdr->opcode, wrb_payload_size, wrb, NULL);
3231         memcpy(req, wrb_payload, wrb_payload_size);
3232         be_dws_cpu_to_le(req, wrb_payload_size);
3233
3234         status = be_mcc_notify_wait(adapter);
3235         if (cmd_status)
3236                 *cmd_status = (status & 0xffff);
3237         if (ext_status)
3238                 *ext_status = 0;
3239         memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3240         be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3241 err:
3242         spin_unlock_bh(&adapter->mcc_lock);
3243         return status;
3244 }
3245 EXPORT_SYMBOL(be_roce_mcc_cmd);