packaging: release out (3.8.3)
[profile/ivi/kernel-adaptation-intel-automotive.git] / drivers / net / wireless / iwlwifi / iwl-test.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63
64 #include <linux/export.h>
65 #include <net/netlink.h>
66
67 #include "iwl-io.h"
68 #include "iwl-fh.h"
69 #include "iwl-prph.h"
70 #include "iwl-trans.h"
71 #include "iwl-test.h"
72 #include "iwl-csr.h"
73 #include "iwl-testmode.h"
74
75 /*
76  * Periphery registers absolute lower bound. This is used in order to
77  * differentiate registery access through HBUS_TARG_PRPH_* and
78  * HBUS_TARG_MEM_* accesses.
79  */
80 #define IWL_ABS_PRPH_START (0xA00000)
81
82 /*
83  * The TLVs used in the gnl message policy between the kernel module and
84  * user space application. iwl_testmode_gnl_msg_policy is to be carried
85  * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
86  * See iwl-testmode.h
87  */
88 static
89 struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
90         [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
91
92         [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
93         [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
94
95         [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
96         [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
97         [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
98
99         [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
100         [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
101
102         [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
103
104         [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
105         [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
106         [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
107
108         [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
109
110         [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
111
112         [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
113         [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
114         [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
115
116         [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
117         [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
118         [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
119         [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
120         [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
121
122         [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
123 };
124
125 static inline void iwl_test_trace_clear(struct iwl_test *tst)
126 {
127         memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
128 }
129
130 static void iwl_test_trace_stop(struct iwl_test *tst)
131 {
132         if (!tst->trace.enabled)
133                 return;
134
135         if (tst->trace.cpu_addr && tst->trace.dma_addr)
136                 dma_free_coherent(tst->trans->dev,
137                                   tst->trace.tsize,
138                                   tst->trace.cpu_addr,
139                                   tst->trace.dma_addr);
140
141         iwl_test_trace_clear(tst);
142 }
143
144 static inline void iwl_test_mem_clear(struct iwl_test *tst)
145 {
146         memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
147 }
148
149 static inline void iwl_test_mem_stop(struct iwl_test *tst)
150 {
151         if (!tst->mem.in_read)
152                 return;
153
154         iwl_test_mem_clear(tst);
155 }
156
157 /*
158  * Initializes the test object
159  * During the lifetime of the test object it is assumed that the transport is
160  * started. The test object should be stopped before the transport is stopped.
161  */
162 void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
163                    struct iwl_test_ops *ops)
164 {
165         tst->trans = trans;
166         tst->ops = ops;
167
168         iwl_test_trace_clear(tst);
169         iwl_test_mem_clear(tst);
170 }
171 EXPORT_SYMBOL_GPL(iwl_test_init);
172
173 /*
174  * Stop the test object
175  */
176 void iwl_test_free(struct iwl_test *tst)
177 {
178         iwl_test_mem_stop(tst);
179         iwl_test_trace_stop(tst);
180 }
181 EXPORT_SYMBOL_GPL(iwl_test_free);
182
183 static inline int iwl_test_send_cmd(struct iwl_test *tst,
184                                     struct iwl_host_cmd *cmd)
185 {
186         return tst->ops->send_cmd(tst->trans->op_mode, cmd);
187 }
188
189 static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
190 {
191         return tst->ops->valid_hw_addr(addr);
192 }
193
194 static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
195 {
196         return tst->ops->get_fw_ver(tst->trans->op_mode);
197 }
198
199 static inline struct sk_buff*
200 iwl_test_alloc_reply(struct iwl_test *tst, int len)
201 {
202         return tst->ops->alloc_reply(tst->trans->op_mode, len);
203 }
204
205 static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
206 {
207         return tst->ops->reply(tst->trans->op_mode, skb);
208 }
209
210 static inline struct sk_buff*
211 iwl_test_alloc_event(struct iwl_test *tst, int len)
212 {
213         return tst->ops->alloc_event(tst->trans->op_mode, len);
214 }
215
216 static inline void
217 iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
218 {
219         return tst->ops->event(tst->trans->op_mode, skb);
220 }
221
222 /*
223  * This function handles the user application commands to the fw. The fw
224  * commands are sent in a synchronuous manner. In case that the user requested
225  * to get commands response, it is send to the user.
226  */
227 static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
228 {
229         struct iwl_host_cmd cmd;
230         struct iwl_rx_packet *pkt;
231         struct sk_buff *skb;
232         void *reply_buf;
233         u32 reply_len;
234         int ret;
235         bool cmd_want_skb;
236
237         memset(&cmd, 0, sizeof(struct iwl_host_cmd));
238
239         if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
240             !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
241                 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
242                 return -ENOMSG;
243         }
244
245         cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
246         cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
247         if (cmd_want_skb)
248                 cmd.flags |= CMD_WANT_SKB;
249
250         cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
251         cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
252         cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253         cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
254         IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255                        cmd.id, cmd.flags, cmd.len[0]);
256
257         ret = iwl_test_send_cmd(tst, &cmd);
258         if (ret) {
259                 IWL_ERR(tst->trans, "Failed to send hcmd\n");
260                 return ret;
261         }
262         if (!cmd_want_skb)
263                 return ret;
264
265         /* Handling return of SKB to the user */
266         pkt = cmd.resp_pkt;
267         if (!pkt) {
268                 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
269                 return ret;
270         }
271
272         reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
273         skb = iwl_test_alloc_reply(tst, reply_len + 20);
274         reply_buf = kmalloc(reply_len, GFP_KERNEL);
275         if (!skb || !reply_buf) {
276                 kfree_skb(skb);
277                 kfree(reply_buf);
278                 return -ENOMEM;
279         }
280
281         /* The reply is in a page, that we cannot send to user space. */
282         memcpy(reply_buf, &(pkt->hdr), reply_len);
283         iwl_free_resp(&cmd);
284
285         if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286                         IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287             nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288                 goto nla_put_failure;
289         return iwl_test_reply(tst, skb);
290
291 nla_put_failure:
292         IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293         kfree(reply_buf);
294         kfree_skb(skb);
295         return -ENOMSG;
296 }
297
298 /*
299  * Handles the user application commands for register access.
300  */
301 static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
302 {
303         u32 ofs, val32, cmd;
304         u8 val8;
305         struct sk_buff *skb;
306         int status = 0;
307         struct iwl_trans *trans = tst->trans;
308
309         if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310                 IWL_ERR(trans, "Missing reg offset\n");
311                 return -ENOMSG;
312         }
313
314         ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315         IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317         cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319         /*
320          * Allow access only to FH/CSR/HBUS in direct mode.
321          * Since we don't have the upper bounds for the CSR and HBUS segments,
322          * we will use only the upper bound of FH for sanity check.
323          */
324         if (ofs >= FH_MEM_UPPER_BOUND) {
325                 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326                         FH_MEM_UPPER_BOUND);
327                 return -EINVAL;
328         }
329
330         switch (cmd) {
331         case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332                 val32 = iwl_read_direct32(tst->trans, ofs);
333                 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
335                 skb = iwl_test_alloc_reply(tst, 20);
336                 if (!skb) {
337                         IWL_ERR(trans, "Memory allocation fail\n");
338                         return -ENOMEM;
339                 }
340                 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341                         goto nla_put_failure;
342                 status = iwl_test_reply(tst, skb);
343                 if (status < 0)
344                         IWL_ERR(trans, "Error sending msg : %d\n", status);
345                 break;
346
347         case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348                 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349                         IWL_ERR(trans, "Missing value to write\n");
350                         return -ENOMSG;
351                 } else {
352                         val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353                         IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354                         iwl_write_direct32(tst->trans, ofs, val32);
355                 }
356                 break;
357
358         case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359                 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360                         IWL_ERR(trans, "Missing value to write\n");
361                         return -ENOMSG;
362                 } else {
363                         val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364                         IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365                         iwl_write8(tst->trans, ofs, val8);
366                 }
367                 break;
368
369         default:
370                 IWL_ERR(trans, "Unknown test register cmd ID\n");
371                 return -ENOMSG;
372         }
373
374         return status;
375
376 nla_put_failure:
377         kfree_skb(skb);
378         return -EMSGSIZE;
379 }
380
381 /*
382  * Handles the request to start FW tracing. Allocates of the trace buffer
383  * and sends a reply to user space with the address of the allocated buffer.
384  */
385 static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
386 {
387         struct sk_buff *skb;
388         int status = 0;
389
390         if (tst->trace.enabled)
391                 return -EBUSY;
392
393         if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394                 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395         else
396                 tst->trace.size =
397                         nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399         if (!tst->trace.size)
400                 return -EINVAL;
401
402         if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403             tst->trace.size > TRACE_BUFF_SIZE_MAX)
404                 return -EINVAL;
405
406         tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407         tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408                                                  tst->trace.tsize,
409                                                  &tst->trace.dma_addr,
410                                                  GFP_KERNEL);
411         if (!tst->trace.cpu_addr)
412                 return -ENOMEM;
413
414         tst->trace.enabled = true;
415         tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417         memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
419         skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
420         if (!skb) {
421                 IWL_ERR(tst->trans, "Memory allocation fail\n");
422                 iwl_test_trace_stop(tst);
423                 return -ENOMEM;
424         }
425
426         if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427                     sizeof(tst->trace.dma_addr),
428                     (u64 *)&tst->trace.dma_addr))
429                 goto nla_put_failure;
430
431         status = iwl_test_reply(tst, skb);
432         if (status < 0)
433                 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435         tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436                                           DUMP_CHUNK_SIZE);
437
438         return status;
439
440 nla_put_failure:
441         kfree_skb(skb);
442         if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443             IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444                 iwl_test_trace_stop(tst);
445         return -EMSGSIZE;
446 }
447
448 /*
449  * Handles indirect read from the periphery or the SRAM. The read is performed
450  * to a temporary buffer. The user space application should later issue a dump
451  */
452 static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453 {
454         struct iwl_trans *trans = tst->trans;
455         unsigned long flags;
456         int i;
457
458         if (size & 0x3)
459                 return -EINVAL;
460
461         tst->mem.size = size;
462         tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463         if (tst->mem.addr == NULL)
464                 return -ENOMEM;
465
466         /* Hard-coded periphery absolute address */
467         if (IWL_ABS_PRPH_START <= addr &&
468             addr < IWL_ABS_PRPH_START + PRPH_END) {
469                         spin_lock_irqsave(&trans->reg_lock, flags);
470                         iwl_grab_nic_access(trans);
471                         iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
472                                     addr | (3 << 24));
473                         for (i = 0; i < size; i += 4)
474                                 *(u32 *)(tst->mem.addr + i) =
475                                         iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
476                         iwl_release_nic_access(trans);
477                         spin_unlock_irqrestore(&trans->reg_lock, flags);
478         } else { /* target memory (SRAM) */
479                 _iwl_read_targ_mem_dwords(trans, addr,
480                                           tst->mem.addr,
481                                           tst->mem.size / 4);
482         }
483
484         tst->mem.nchunks =
485                 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
486         tst->mem.in_read = true;
487         return 0;
488
489 }
490
491 /*
492  * Handles indirect write to the periphery or SRAM. The  is performed to a
493  * temporary buffer.
494  */
495 static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
496         u32 size, unsigned char *buf)
497 {
498         struct iwl_trans *trans = tst->trans;
499         u32 val, i;
500         unsigned long flags;
501
502         if (IWL_ABS_PRPH_START <= addr &&
503             addr < IWL_ABS_PRPH_START + PRPH_END) {
504                         /* Periphery writes can be 1-3 bytes long, or DWORDs */
505                         if (size < 4) {
506                                 memcpy(&val, buf, size);
507                                 spin_lock_irqsave(&trans->reg_lock, flags);
508                                 iwl_grab_nic_access(trans);
509                                 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
510                                             (addr & 0x0000FFFF) |
511                                             ((size - 1) << 24));
512                                 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
513                                 iwl_release_nic_access(trans);
514                                 /* needed after consecutive writes w/o read */
515                                 mmiowb();
516                                 spin_unlock_irqrestore(&trans->reg_lock, flags);
517                         } else {
518                                 if (size % 4)
519                                         return -EINVAL;
520                                 for (i = 0; i < size; i += 4)
521                                         iwl_write_prph(trans, addr+i,
522                                                        *(u32 *)(buf+i));
523                         }
524         } else if (iwl_test_valid_hw_addr(tst, addr)) {
525                 _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
526         } else {
527                 return -EINVAL;
528         }
529         return 0;
530 }
531
532 /*
533  * Handles the user application commands for indirect read/write
534  * to/from the periphery or the SRAM.
535  */
536 static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
537 {
538         u32 addr, size, cmd;
539         unsigned char *buf;
540
541         /* Both read and write should be blocked, for atomicity */
542         if (tst->mem.in_read)
543                 return -EBUSY;
544
545         cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
546         if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
547                 IWL_ERR(tst->trans, "Error finding memory offset address\n");
548                 return -ENOMSG;
549         }
550         addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
551         if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
552                 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
553                 return -ENOMSG;
554         }
555         size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
556
557         if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
558                 return iwl_test_indirect_read(tst, addr,  size);
559         } else {
560                 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
561                         return -EINVAL;
562                 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
563                 return iwl_test_indirect_write(tst, addr, size, buf);
564         }
565 }
566
567 /*
568  * Enable notifications to user space
569  */
570 static int iwl_test_notifications(struct iwl_test *tst,
571                                   struct nlattr **tb)
572 {
573         tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
574         return 0;
575 }
576
577 /*
578  * Handles the request to get the device id
579  */
580 static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
581 {
582         u32 devid = tst->trans->hw_id;
583         struct sk_buff *skb;
584         int status;
585
586         IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
587
588         skb = iwl_test_alloc_reply(tst, 20);
589         if (!skb) {
590                 IWL_ERR(tst->trans, "Memory allocation fail\n");
591                 return -ENOMEM;
592         }
593
594         if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
595                 goto nla_put_failure;
596         status = iwl_test_reply(tst, skb);
597         if (status < 0)
598                 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
599
600         return 0;
601
602 nla_put_failure:
603         kfree_skb(skb);
604         return -EMSGSIZE;
605 }
606
607 /*
608  * Handles the request to get the FW version
609  */
610 static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
611 {
612         struct sk_buff *skb;
613         int status;
614         u32 ver = iwl_test_fw_ver(tst);
615
616         IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
617
618         skb = iwl_test_alloc_reply(tst, 20);
619         if (!skb) {
620                 IWL_ERR(tst->trans, "Memory allocation fail\n");
621                 return -ENOMEM;
622         }
623
624         if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
625                 goto nla_put_failure;
626
627         status = iwl_test_reply(tst, skb);
628         if (status < 0)
629                 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
630
631         return 0;
632
633 nla_put_failure:
634         kfree_skb(skb);
635         return -EMSGSIZE;
636 }
637
638 /*
639  * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
640  */
641 int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
642                    void *data, int len)
643 {
644         int result;
645
646         result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
647                         iwl_testmode_gnl_msg_policy);
648         if (result) {
649                 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
650                 return result;
651         }
652
653         /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
654         if (!tb[IWL_TM_ATTR_COMMAND]) {
655                 IWL_ERR(tst->trans, "Missing testmode command type\n");
656                 return -ENOMSG;
657         }
658         return 0;
659 }
660 EXPORT_SYMBOL_GPL(iwl_test_parse);
661
662 /*
663  * Handle test commands.
664  * Returns 1 for unknown commands (not handled by the test object); negative
665  * value in case of error.
666  */
667 int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
668 {
669         int result;
670
671         switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
672         case IWL_TM_CMD_APP2DEV_UCODE:
673                 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
674                 result = iwl_test_fw_cmd(tst, tb);
675                 break;
676
677         case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
678         case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
679         case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
680                 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
681                 result = iwl_test_reg(tst, tb);
682                 break;
683
684         case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
685                 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
686                 result = iwl_test_trace_begin(tst, tb);
687                 break;
688
689         case IWL_TM_CMD_APP2DEV_END_TRACE:
690                 iwl_test_trace_stop(tst);
691                 result = 0;
692                 break;
693
694         case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
695         case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
696                 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
697                 result = iwl_test_indirect_mem(tst, tb);
698                 break;
699
700         case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
701                 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
702                 result = iwl_test_notifications(tst, tb);
703                 break;
704
705         case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
706                 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
707                 result = iwl_test_get_fw_ver(tst, tb);
708                 break;
709
710         case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
711                 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
712                 result = iwl_test_get_dev_id(tst, tb);
713                 break;
714
715         default:
716                 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
717                 result = 1;
718                 break;
719         }
720         return result;
721 }
722 EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
723
724 static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
725                                struct netlink_callback *cb)
726 {
727         int idx, length;
728
729         if (!tst->trace.enabled || !tst->trace.trace_addr)
730                 return -EFAULT;
731
732         idx = cb->args[4];
733         if (idx >= tst->trace.nchunks)
734                 return -ENOENT;
735
736         length = DUMP_CHUNK_SIZE;
737         if (((idx + 1) == tst->trace.nchunks) &&
738             (tst->trace.size % DUMP_CHUNK_SIZE))
739                 length = tst->trace.size %
740                         DUMP_CHUNK_SIZE;
741
742         if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
743                     tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
744                 goto nla_put_failure;
745
746         cb->args[4] = ++idx;
747         return 0;
748
749  nla_put_failure:
750         return -ENOBUFS;
751 }
752
753 static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
754                                 struct netlink_callback *cb)
755 {
756         int idx, length;
757
758         if (!tst->mem.in_read)
759                 return -EFAULT;
760
761         idx = cb->args[4];
762         if (idx >= tst->mem.nchunks) {
763                 iwl_test_mem_stop(tst);
764                 return -ENOENT;
765         }
766
767         length = DUMP_CHUNK_SIZE;
768         if (((idx + 1) == tst->mem.nchunks) &&
769             (tst->mem.size % DUMP_CHUNK_SIZE))
770                 length = tst->mem.size % DUMP_CHUNK_SIZE;
771
772         if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
773                     tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
774                 goto nla_put_failure;
775
776         cb->args[4] = ++idx;
777         return 0;
778
779  nla_put_failure:
780         return -ENOBUFS;
781 }
782
783 /*
784  * Handle dump commands.
785  * Returns 1 for unknown commands (not handled by the test object); negative
786  * value in case of error.
787  */
788 int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
789                   struct netlink_callback *cb)
790 {
791         int result;
792
793         switch (cmd) {
794         case IWL_TM_CMD_APP2DEV_READ_TRACE:
795                 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
796                 result = iwl_test_trace_dump(tst, skb, cb);
797                 break;
798
799         case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
800                 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
801                 result = iwl_test_buffer_dump(tst, skb, cb);
802                 break;
803
804         default:
805                 result = 1;
806                 break;
807         }
808         return result;
809 }
810 EXPORT_SYMBOL_GPL(iwl_test_dump);
811
812 /*
813  * Multicast a spontaneous messages from the device to the user space.
814  */
815 static void iwl_test_send_rx(struct iwl_test *tst,
816                              struct iwl_rx_cmd_buffer *rxb)
817 {
818         struct sk_buff *skb;
819         struct iwl_rx_packet *data;
820         int length;
821
822         data = rxb_addr(rxb);
823         length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
824
825         /* the length doesn't include len_n_flags field, so add it manually */
826         length += sizeof(__le32);
827
828         skb = iwl_test_alloc_event(tst, length + 20);
829         if (skb == NULL) {
830                 IWL_ERR(tst->trans, "Out of memory for message to user\n");
831                 return;
832         }
833
834         if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
835                         IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
836             nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
837                 goto nla_put_failure;
838
839         iwl_test_event(tst, skb);
840         return;
841
842 nla_put_failure:
843         kfree_skb(skb);
844         IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
845 }
846
847 /*
848  * Called whenever a Rx frames is recevied from the device. If notifications to
849  * the user space are requested, sends the frames to the user.
850  */
851 void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
852 {
853         if (tst->notify)
854                 iwl_test_send_rx(tst, rxb);
855 }
856 EXPORT_SYMBOL_GPL(iwl_test_rx);