Merge tag 'for-6.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[platform/kernel/linux-starfive.git] / drivers / net / wireless / intel / iwlwifi / queue / tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 #include <net/tso.h>
6 #include <linux/tcp.h>
7
8 #include "iwl-debug.h"
9 #include "iwl-io.h"
10 #include "fw/api/commands.h"
11 #include "fw/api/tx.h"
12 #include "fw/api/datapath.h"
13 #include "queue/tx.h"
14 #include "iwl-fh.h"
15 #include "iwl-scd.h"
16 #include <linux/dmapool.h>
17
18 /*
19  * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
20  */
21 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
22                                           struct iwl_txq *txq, u16 byte_cnt,
23                                           int num_tbs)
24 {
25         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
26         u8 filled_tfd_size, num_fetch_chunks;
27         u16 len = byte_cnt;
28         __le16 bc_ent;
29
30         if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
31                 return;
32
33         filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
34                           num_tbs * sizeof(struct iwl_tfh_tb);
35         /*
36          * filled_tfd_size contains the number of filled bytes in the TFD.
37          * Dividing it by 64 will give the number of chunks to fetch
38          * to SRAM- 0 for one chunk, 1 for 2 and so on.
39          * If, for example, TFD contains only 3 TBs then 32 bytes
40          * of the TFD are used, and only one chunk of 64 bytes should
41          * be fetched
42          */
43         num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
44
45         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
46                 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
47
48                 /* Starting from AX210, the HW expects bytes */
49                 WARN_ON(trans->txqs.bc_table_dword);
50                 WARN_ON(len > 0x3FFF);
51                 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
52                 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
53         } else {
54                 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
55
56                 /* Before AX210, the HW expects DW */
57                 WARN_ON(!trans->txqs.bc_table_dword);
58                 len = DIV_ROUND_UP(len, 4);
59                 WARN_ON(len > 0xFFF);
60                 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
61                 scd_bc_tbl->tfd_offset[idx] = bc_ent;
62         }
63 }
64
65 /*
66  * iwl_txq_inc_wr_ptr - Send new write index to hardware
67  */
68 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
69 {
70         lockdep_assert_held(&txq->lock);
71
72         IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
73
74         /*
75          * if not in power-save mode, uCode will never sleep when we're
76          * trying to tx (during RFKILL, we're not trying to tx).
77          */
78         iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
79 }
80
81 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
82                                    struct iwl_tfh_tfd *tfd)
83 {
84         return le16_to_cpu(tfd->num_tbs) & 0x1f;
85 }
86
87 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
88                             struct iwl_tfh_tfd *tfd)
89 {
90         int i, num_tbs;
91
92         /* Sanity check on number of chunks */
93         num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
94
95         if (num_tbs > trans->txqs.tfd.max_tbs) {
96                 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
97                 return;
98         }
99
100         /* first TB is never freed - it's the bidirectional DMA data */
101         for (i = 1; i < num_tbs; i++) {
102                 if (meta->tbs & BIT(i))
103                         dma_unmap_page(trans->dev,
104                                        le64_to_cpu(tfd->tbs[i].addr),
105                                        le16_to_cpu(tfd->tbs[i].tb_len),
106                                        DMA_TO_DEVICE);
107                 else
108                         dma_unmap_single(trans->dev,
109                                          le64_to_cpu(tfd->tbs[i].addr),
110                                          le16_to_cpu(tfd->tbs[i].tb_len),
111                                          DMA_TO_DEVICE);
112         }
113
114         tfd->num_tbs = 0;
115 }
116
117 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
118 {
119         /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
120          * idx is bounded by n_window
121          */
122         int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
123         struct sk_buff *skb;
124
125         lockdep_assert_held(&txq->lock);
126
127         if (!txq->entries)
128                 return;
129
130         iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
131                                iwl_txq_get_tfd(trans, txq, idx));
132
133         skb = txq->entries[idx].skb;
134
135         /* Can be called from irqs-disabled context
136          * If skb is not NULL, it means that the whole queue is being
137          * freed and that the queue is not empty - free the skb
138          */
139         if (skb) {
140                 iwl_op_mode_free_skb(trans->op_mode, skb);
141                 txq->entries[idx].skb = NULL;
142         }
143 }
144
145 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
146                         dma_addr_t addr, u16 len)
147 {
148         int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
149         struct iwl_tfh_tb *tb;
150
151         /*
152          * Only WARN here so we know about the issue, but we mess up our
153          * unmap path because not every place currently checks for errors
154          * returned from this function - it can only return an error if
155          * there's no more space, and so when we know there is enough we
156          * don't always check ...
157          */
158         WARN(iwl_txq_crosses_4g_boundary(addr, len),
159              "possible DMA problem with iova:0x%llx, len:%d\n",
160              (unsigned long long)addr, len);
161
162         if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
163                 return -EINVAL;
164         tb = &tfd->tbs[idx];
165
166         /* Each TFD can point to a maximum max_tbs Tx buffers */
167         if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
168                 IWL_ERR(trans, "Error can not send more than %d chunks\n",
169                         trans->txqs.tfd.max_tbs);
170                 return -EINVAL;
171         }
172
173         put_unaligned_le64(addr, &tb->addr);
174         tb->tb_len = cpu_to_le16(len);
175
176         tfd->num_tbs = cpu_to_le16(idx + 1);
177
178         return idx;
179 }
180
181 static struct page *get_workaround_page(struct iwl_trans *trans,
182                                         struct sk_buff *skb)
183 {
184         struct page **page_ptr;
185         struct page *ret;
186
187         page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
188
189         ret = alloc_page(GFP_ATOMIC);
190         if (!ret)
191                 return NULL;
192
193         /* set the chaining pointer to the previous page if there */
194         *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
195         *page_ptr = ret;
196
197         return ret;
198 }
199
200 /*
201  * Add a TB and if needed apply the FH HW bug workaround;
202  * meta != NULL indicates that it's a page mapping and we
203  * need to dma_unmap_page() and set the meta->tbs bit in
204  * this case.
205  */
206 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
207                                        struct sk_buff *skb,
208                                        struct iwl_tfh_tfd *tfd,
209                                        dma_addr_t phys, void *virt,
210                                        u16 len, struct iwl_cmd_meta *meta)
211 {
212         dma_addr_t oldphys = phys;
213         struct page *page;
214         int ret;
215
216         if (unlikely(dma_mapping_error(trans->dev, phys)))
217                 return -ENOMEM;
218
219         if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
220                 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
221
222                 if (ret < 0)
223                         goto unmap;
224
225                 if (meta)
226                         meta->tbs |= BIT(ret);
227
228                 ret = 0;
229                 goto trace;
230         }
231
232         /*
233          * Work around a hardware bug. If (as expressed in the
234          * condition above) the TB ends on a 32-bit boundary,
235          * then the next TB may be accessed with the wrong
236          * address.
237          * To work around it, copy the data elsewhere and make
238          * a new mapping for it so the device will not fail.
239          */
240
241         if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
242                 ret = -ENOBUFS;
243                 goto unmap;
244         }
245
246         page = get_workaround_page(trans, skb);
247         if (!page) {
248                 ret = -ENOMEM;
249                 goto unmap;
250         }
251
252         memcpy(page_address(page), virt, len);
253
254         phys = dma_map_single(trans->dev, page_address(page), len,
255                               DMA_TO_DEVICE);
256         if (unlikely(dma_mapping_error(trans->dev, phys)))
257                 return -ENOMEM;
258         ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
259         if (ret < 0) {
260                 /* unmap the new allocation as single */
261                 oldphys = phys;
262                 meta = NULL;
263                 goto unmap;
264         }
265         IWL_WARN(trans,
266                  "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
267                  len, (unsigned long long)oldphys, (unsigned long long)phys);
268
269         ret = 0;
270 unmap:
271         if (meta)
272                 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
273         else
274                 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
275 trace:
276         trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
277
278         return ret;
279 }
280
281 #ifdef CONFIG_INET
282 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
283                                       struct sk_buff *skb)
284 {
285         struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
286         struct page **page_ptr;
287
288         page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
289
290         if (WARN_ON(*page_ptr))
291                 return NULL;
292
293         if (!p->page)
294                 goto alloc;
295
296         /*
297          * Check if there's enough room on this page
298          *
299          * Note that we put a page chaining pointer *last* in the
300          * page - we need it somewhere, and if it's there then we
301          * avoid DMA mapping the last bits of the page which may
302          * trigger the 32-bit boundary hardware bug.
303          *
304          * (see also get_workaround_page() in tx-gen2.c)
305          */
306         if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
307                            sizeof(void *))
308                 goto out;
309
310         /* We don't have enough room on this page, get a new one. */
311         __free_page(p->page);
312
313 alloc:
314         p->page = alloc_page(GFP_ATOMIC);
315         if (!p->page)
316                 return NULL;
317         p->pos = page_address(p->page);
318         /* set the chaining pointer to NULL */
319         *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
320 out:
321         *page_ptr = p->page;
322         get_page(p->page);
323         return p;
324 }
325 #endif
326
327 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
328                                     struct sk_buff *skb,
329                                     struct iwl_tfh_tfd *tfd, int start_len,
330                                     u8 hdr_len,
331                                     struct iwl_device_tx_cmd *dev_cmd)
332 {
333 #ifdef CONFIG_INET
334         struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
335         struct ieee80211_hdr *hdr = (void *)skb->data;
336         unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
337         unsigned int mss = skb_shinfo(skb)->gso_size;
338         u16 length, amsdu_pad;
339         u8 *start_hdr;
340         struct iwl_tso_hdr_page *hdr_page;
341         struct tso_t tso;
342
343         trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
344                              &dev_cmd->hdr, start_len, 0);
345
346         ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
347         snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
348         total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
349         amsdu_pad = 0;
350
351         /* total amount of header we may need for this A-MSDU */
352         hdr_room = DIV_ROUND_UP(total_len, mss) *
353                 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
354
355         /* Our device supports 9 segments at most, it will fit in 1 page */
356         hdr_page = get_page_hdr(trans, hdr_room, skb);
357         if (!hdr_page)
358                 return -ENOMEM;
359
360         start_hdr = hdr_page->pos;
361
362         /*
363          * Pull the ieee80211 header to be able to use TSO core,
364          * we will restore it for the tx_status flow.
365          */
366         skb_pull(skb, hdr_len);
367
368         /*
369          * Remove the length of all the headers that we don't actually
370          * have in the MPDU by themselves, but that we duplicate into
371          * all the different MSDUs inside the A-MSDU.
372          */
373         le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
374
375         tso_start(skb, &tso);
376
377         while (total_len) {
378                 /* this is the data left for this subframe */
379                 unsigned int data_left = min_t(unsigned int, mss, total_len);
380                 unsigned int tb_len;
381                 dma_addr_t tb_phys;
382                 u8 *subf_hdrs_start = hdr_page->pos;
383
384                 total_len -= data_left;
385
386                 memset(hdr_page->pos, 0, amsdu_pad);
387                 hdr_page->pos += amsdu_pad;
388                 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
389                                   data_left)) & 0x3;
390                 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
391                 hdr_page->pos += ETH_ALEN;
392                 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
393                 hdr_page->pos += ETH_ALEN;
394
395                 length = snap_ip_tcp_hdrlen + data_left;
396                 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
397                 hdr_page->pos += sizeof(length);
398
399                 /*
400                  * This will copy the SNAP as well which will be considered
401                  * as MAC header.
402                  */
403                 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
404
405                 hdr_page->pos += snap_ip_tcp_hdrlen;
406
407                 tb_len = hdr_page->pos - start_hdr;
408                 tb_phys = dma_map_single(trans->dev, start_hdr,
409                                          tb_len, DMA_TO_DEVICE);
410                 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
411                         goto out_err;
412                 /*
413                  * No need for _with_wa, this is from the TSO page and
414                  * we leave some space at the end of it so can't hit
415                  * the buggy scenario.
416                  */
417                 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
418                 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
419                                         tb_phys, tb_len);
420                 /* add this subframe's headers' length to the tx_cmd */
421                 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
422
423                 /* prepare the start_hdr for the next subframe */
424                 start_hdr = hdr_page->pos;
425
426                 /* put the payload */
427                 while (data_left) {
428                         int ret;
429
430                         tb_len = min_t(unsigned int, tso.size, data_left);
431                         tb_phys = dma_map_single(trans->dev, tso.data,
432                                                  tb_len, DMA_TO_DEVICE);
433                         ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
434                                                           tb_phys, tso.data,
435                                                           tb_len, NULL);
436                         if (ret)
437                                 goto out_err;
438
439                         data_left -= tb_len;
440                         tso_build_data(skb, &tso, tb_len);
441                 }
442         }
443
444         /* re -add the WiFi header */
445         skb_push(skb, hdr_len);
446
447         return 0;
448
449 out_err:
450 #endif
451         return -EINVAL;
452 }
453
454 static struct
455 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
456                                          struct iwl_txq *txq,
457                                          struct iwl_device_tx_cmd *dev_cmd,
458                                          struct sk_buff *skb,
459                                          struct iwl_cmd_meta *out_meta,
460                                          int hdr_len,
461                                          int tx_cmd_len)
462 {
463         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
464         struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
465         dma_addr_t tb_phys;
466         int len;
467         void *tb1_addr;
468
469         tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
470
471         /*
472          * No need for _with_wa, the first TB allocation is aligned up
473          * to a 64-byte boundary and thus can't be at the end or cross
474          * a page boundary (much less a 2^32 boundary).
475          */
476         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
477
478         /*
479          * The second TB (tb1) points to the remainder of the TX command
480          * and the 802.11 header - dword aligned size
481          * (This calculation modifies the TX command, so do it before the
482          * setup of the first TB)
483          */
484         len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
485               IWL_FIRST_TB_SIZE;
486
487         /* do not align A-MSDU to dword as the subframe header aligns it */
488
489         /* map the data for TB1 */
490         tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
491         tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
492         if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
493                 goto out_err;
494         /*
495          * No need for _with_wa(), we ensure (via alignment) that the data
496          * here can never cross or end at a page boundary.
497          */
498         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
499
500         if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
501                                      hdr_len, dev_cmd))
502                 goto out_err;
503
504         /* building the A-MSDU might have changed this data, memcpy it now */
505         memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
506         return tfd;
507
508 out_err:
509         iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
510         return NULL;
511 }
512
513 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
514                                      struct sk_buff *skb,
515                                      struct iwl_tfh_tfd *tfd,
516                                      struct iwl_cmd_meta *out_meta)
517 {
518         int i;
519
520         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
521                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
522                 dma_addr_t tb_phys;
523                 unsigned int fragsz = skb_frag_size(frag);
524                 int ret;
525
526                 if (!fragsz)
527                         continue;
528
529                 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
530                                            fragsz, DMA_TO_DEVICE);
531                 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
532                                                   skb_frag_address(frag),
533                                                   fragsz, out_meta);
534                 if (ret)
535                         return ret;
536         }
537
538         return 0;
539 }
540
541 static struct
542 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
543                                    struct iwl_txq *txq,
544                                    struct iwl_device_tx_cmd *dev_cmd,
545                                    struct sk_buff *skb,
546                                    struct iwl_cmd_meta *out_meta,
547                                    int hdr_len,
548                                    int tx_cmd_len,
549                                    bool pad)
550 {
551         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
552         struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
553         dma_addr_t tb_phys;
554         int len, tb1_len, tb2_len;
555         void *tb1_addr;
556         struct sk_buff *frag;
557
558         tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
559
560         /* The first TB points to bi-directional DMA data */
561         memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
562
563         /*
564          * No need for _with_wa, the first TB allocation is aligned up
565          * to a 64-byte boundary and thus can't be at the end or cross
566          * a page boundary (much less a 2^32 boundary).
567          */
568         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
569
570         /*
571          * The second TB (tb1) points to the remainder of the TX command
572          * and the 802.11 header - dword aligned size
573          * (This calculation modifies the TX command, so do it before the
574          * setup of the first TB)
575          */
576         len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
577               IWL_FIRST_TB_SIZE;
578
579         if (pad)
580                 tb1_len = ALIGN(len, 4);
581         else
582                 tb1_len = len;
583
584         /* map the data for TB1 */
585         tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
586         tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
587         if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
588                 goto out_err;
589         /*
590          * No need for _with_wa(), we ensure (via alignment) that the data
591          * here can never cross or end at a page boundary.
592          */
593         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
594         trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
595                              IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
596
597         /* set up TFD's third entry to point to remainder of skb's head */
598         tb2_len = skb_headlen(skb) - hdr_len;
599
600         if (tb2_len > 0) {
601                 int ret;
602
603                 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
604                                          tb2_len, DMA_TO_DEVICE);
605                 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
606                                                   skb->data + hdr_len, tb2_len,
607                                                   NULL);
608                 if (ret)
609                         goto out_err;
610         }
611
612         if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
613                 goto out_err;
614
615         skb_walk_frags(skb, frag) {
616                 int ret;
617
618                 tb_phys = dma_map_single(trans->dev, frag->data,
619                                          skb_headlen(frag), DMA_TO_DEVICE);
620                 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
621                                                   frag->data,
622                                                   skb_headlen(frag), NULL);
623                 if (ret)
624                         goto out_err;
625                 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
626                         goto out_err;
627         }
628
629         return tfd;
630
631 out_err:
632         iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
633         return NULL;
634 }
635
636 static
637 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
638                                            struct iwl_txq *txq,
639                                            struct iwl_device_tx_cmd *dev_cmd,
640                                            struct sk_buff *skb,
641                                            struct iwl_cmd_meta *out_meta)
642 {
643         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
644         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
645         struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
646         int len, hdr_len;
647         bool amsdu;
648
649         /* There must be data left over for TB1 or this code must be changed */
650         BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
651         BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
652                      offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
653                      IWL_FIRST_TB_SIZE);
654         BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
655         BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
656                      offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
657                      IWL_FIRST_TB_SIZE);
658
659         memset(tfd, 0, sizeof(*tfd));
660
661         if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
662                 len = sizeof(struct iwl_tx_cmd_gen2);
663         else
664                 len = sizeof(struct iwl_tx_cmd_gen3);
665
666         amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
667                         (*ieee80211_get_qos_ctl(hdr) &
668                          IEEE80211_QOS_CTL_A_MSDU_PRESENT);
669
670         hdr_len = ieee80211_hdrlen(hdr->frame_control);
671
672         /*
673          * Only build A-MSDUs here if doing so by GSO, otherwise it may be
674          * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
675          * built in the higher layers already.
676          */
677         if (amsdu && skb_shinfo(skb)->gso_size)
678                 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
679                                                     out_meta, hdr_len, len);
680         return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
681                                       hdr_len, len, !amsdu);
682 }
683
684 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
685 {
686         unsigned int max;
687         unsigned int used;
688
689         /*
690          * To avoid ambiguity between empty and completely full queues, there
691          * should always be less than max_tfd_queue_size elements in the queue.
692          * If q->n_window is smaller than max_tfd_queue_size, there is no need
693          * to reserve any queue entries for this purpose.
694          */
695         if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
696                 max = q->n_window;
697         else
698                 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
699
700         /*
701          * max_tfd_queue_size is a power of 2, so the following is equivalent to
702          * modulo by max_tfd_queue_size and is well defined.
703          */
704         used = (q->write_ptr - q->read_ptr) &
705                 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
706
707         if (WARN_ON(used > max))
708                 return 0;
709
710         return max - used;
711 }
712
713 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
714                     struct iwl_device_tx_cmd *dev_cmd, int txq_id)
715 {
716         struct iwl_cmd_meta *out_meta;
717         struct iwl_txq *txq = trans->txqs.txq[txq_id];
718         u16 cmd_len;
719         int idx;
720         void *tfd;
721
722         if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
723                       "queue %d out of range", txq_id))
724                 return -EINVAL;
725
726         if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
727                       "TX on unused queue %d\n", txq_id))
728                 return -EINVAL;
729
730         if (skb_is_nonlinear(skb) &&
731             skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
732             __skb_linearize(skb))
733                 return -ENOMEM;
734
735         spin_lock(&txq->lock);
736
737         if (iwl_txq_space(trans, txq) < txq->high_mark) {
738                 iwl_txq_stop(trans, txq);
739
740                 /* don't put the packet on the ring, if there is no room */
741                 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
742                         struct iwl_device_tx_cmd **dev_cmd_ptr;
743
744                         dev_cmd_ptr = (void *)((u8 *)skb->cb +
745                                                trans->txqs.dev_cmd_offs);
746
747                         *dev_cmd_ptr = dev_cmd;
748                         __skb_queue_tail(&txq->overflow_q, skb);
749                         spin_unlock(&txq->lock);
750                         return 0;
751                 }
752         }
753
754         idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
755
756         /* Set up driver data for this TFD */
757         txq->entries[idx].skb = skb;
758         txq->entries[idx].cmd = dev_cmd;
759
760         dev_cmd->hdr.sequence =
761                 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
762                             INDEX_TO_SEQ(idx)));
763
764         /* Set up first empty entry in queue's array of Tx/cmd buffers */
765         out_meta = &txq->entries[idx].meta;
766         out_meta->flags = 0;
767
768         tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
769         if (!tfd) {
770                 spin_unlock(&txq->lock);
771                 return -1;
772         }
773
774         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
775                 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
776                         (void *)dev_cmd->payload;
777
778                 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
779         } else {
780                 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
781                         (void *)dev_cmd->payload;
782
783                 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
784         }
785
786         /* Set up entry for this TFD in Tx byte-count array */
787         iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
788                                       iwl_txq_gen2_get_num_tbs(trans, tfd));
789
790         /* start timer if queue currently empty */
791         if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
792                 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
793
794         /* Tell device the write index *just past* this latest filled TFD */
795         txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
796         iwl_txq_inc_wr_ptr(trans, txq);
797         /*
798          * At this point the frame is "transmitted" successfully
799          * and we will get a TX status notification eventually.
800          */
801         spin_unlock(&txq->lock);
802         return 0;
803 }
804
805 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
806
807 /*
808  * iwl_txq_gen2_unmap -  Unmap any remaining DMA mappings and free skb's
809  */
810 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
811 {
812         struct iwl_txq *txq = trans->txqs.txq[txq_id];
813
814         spin_lock_bh(&txq->lock);
815         while (txq->write_ptr != txq->read_ptr) {
816                 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
817                                    txq_id, txq->read_ptr);
818
819                 if (txq_id != trans->txqs.cmd.q_id) {
820                         int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
821                         struct sk_buff *skb = txq->entries[idx].skb;
822
823                         if (!WARN_ON_ONCE(!skb))
824                                 iwl_txq_free_tso_page(trans, skb);
825                 }
826                 iwl_txq_gen2_free_tfd(trans, txq);
827                 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
828         }
829
830         while (!skb_queue_empty(&txq->overflow_q)) {
831                 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
832
833                 iwl_op_mode_free_skb(trans->op_mode, skb);
834         }
835
836         spin_unlock_bh(&txq->lock);
837
838         /* just in case - this queue may have been stopped */
839         iwl_wake_queue(trans, txq);
840 }
841
842 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
843                                      struct iwl_txq *txq)
844 {
845         struct device *dev = trans->dev;
846
847         /* De-alloc circular buffer of TFDs */
848         if (txq->tfds) {
849                 dma_free_coherent(dev,
850                                   trans->txqs.tfd.size * txq->n_window,
851                                   txq->tfds, txq->dma_addr);
852                 dma_free_coherent(dev,
853                                   sizeof(*txq->first_tb_bufs) * txq->n_window,
854                                   txq->first_tb_bufs, txq->first_tb_dma);
855         }
856
857         kfree(txq->entries);
858         if (txq->bc_tbl.addr)
859                 dma_pool_free(trans->txqs.bc_pool,
860                               txq->bc_tbl.addr, txq->bc_tbl.dma);
861         kfree(txq);
862 }
863
864 /*
865  * iwl_pcie_txq_free - Deallocate DMA queue.
866  * @txq: Transmit queue to deallocate.
867  *
868  * Empty queue by removing and destroying all BD's.
869  * Free all buffers.
870  * 0-fill, but do not free "txq" descriptor structure.
871  */
872 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
873 {
874         struct iwl_txq *txq;
875         int i;
876
877         if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
878                       "queue %d out of range", txq_id))
879                 return;
880
881         txq = trans->txqs.txq[txq_id];
882
883         if (WARN_ON(!txq))
884                 return;
885
886         iwl_txq_gen2_unmap(trans, txq_id);
887
888         /* De-alloc array of command/tx buffers */
889         if (txq_id == trans->txqs.cmd.q_id)
890                 for (i = 0; i < txq->n_window; i++) {
891                         kfree_sensitive(txq->entries[i].cmd);
892                         kfree_sensitive(txq->entries[i].free_buf);
893                 }
894         del_timer_sync(&txq->stuck_timer);
895
896         iwl_txq_gen2_free_memory(trans, txq);
897
898         trans->txqs.txq[txq_id] = NULL;
899
900         clear_bit(txq_id, trans->txqs.queue_used);
901 }
902
903 /*
904  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
905  */
906 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
907 {
908         q->n_window = slots_num;
909
910         /* slots_num must be power-of-two size, otherwise
911          * iwl_txq_get_cmd_index is broken. */
912         if (WARN_ON(!is_power_of_2(slots_num)))
913                 return -EINVAL;
914
915         q->low_mark = q->n_window / 4;
916         if (q->low_mark < 4)
917                 q->low_mark = 4;
918
919         q->high_mark = q->n_window / 8;
920         if (q->high_mark < 2)
921                 q->high_mark = 2;
922
923         q->write_ptr = 0;
924         q->read_ptr = 0;
925
926         return 0;
927 }
928
929 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
930                  bool cmd_queue)
931 {
932         int ret;
933         u32 tfd_queue_max_size =
934                 trans->trans_cfg->base_params->max_tfd_queue_size;
935
936         txq->need_update = false;
937
938         /* max_tfd_queue_size must be power-of-two size, otherwise
939          * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
940         if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
941                       "Max tfd queue size must be a power of two, but is %d",
942                       tfd_queue_max_size))
943                 return -EINVAL;
944
945         /* Initialize queue's high/low-water marks, and head/tail indexes */
946         ret = iwl_queue_init(txq, slots_num);
947         if (ret)
948                 return ret;
949
950         spin_lock_init(&txq->lock);
951
952         if (cmd_queue) {
953                 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
954
955                 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
956         }
957
958         __skb_queue_head_init(&txq->overflow_q);
959
960         return 0;
961 }
962
963 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
964 {
965         struct page **page_ptr;
966         struct page *next;
967
968         page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
969         next = *page_ptr;
970         *page_ptr = NULL;
971
972         while (next) {
973                 struct page *tmp = next;
974
975                 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
976                                   sizeof(void *));
977                 __free_page(tmp);
978         }
979 }
980
981 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
982 {
983         u32 txq_id = txq->id;
984         u32 status;
985         bool active;
986         u8 fifo;
987
988         if (trans->trans_cfg->gen2) {
989                 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
990                         txq->read_ptr, txq->write_ptr);
991                 /* TODO: access new SCD registers and dump them */
992                 return;
993         }
994
995         status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
996         fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
997         active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
998
999         IWL_ERR(trans,
1000                 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1001                 txq_id, active ? "" : "in", fifo,
1002                 jiffies_to_msecs(txq->wd_timeout),
1003                 txq->read_ptr, txq->write_ptr,
1004                 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1005                         (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1006                         iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1007                         (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1008                         iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1009 }
1010
1011 static void iwl_txq_stuck_timer(struct timer_list *t)
1012 {
1013         struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1014         struct iwl_trans *trans = txq->trans;
1015
1016         spin_lock(&txq->lock);
1017         /* check if triggered erroneously */
1018         if (txq->read_ptr == txq->write_ptr) {
1019                 spin_unlock(&txq->lock);
1020                 return;
1021         }
1022         spin_unlock(&txq->lock);
1023
1024         iwl_txq_log_scd_error(trans, txq);
1025
1026         iwl_force_nmi(trans);
1027 }
1028
1029 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1030                   bool cmd_queue)
1031 {
1032         size_t tfd_sz = trans->txqs.tfd.size *
1033                 trans->trans_cfg->base_params->max_tfd_queue_size;
1034         size_t tb0_buf_sz;
1035         int i;
1036
1037         if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1038                 return -EINVAL;
1039
1040         if (WARN_ON(txq->entries || txq->tfds))
1041                 return -EINVAL;
1042
1043         if (trans->trans_cfg->gen2)
1044                 tfd_sz = trans->txqs.tfd.size * slots_num;
1045
1046         timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1047         txq->trans = trans;
1048
1049         txq->n_window = slots_num;
1050
1051         txq->entries = kcalloc(slots_num,
1052                                sizeof(struct iwl_pcie_txq_entry),
1053                                GFP_KERNEL);
1054
1055         if (!txq->entries)
1056                 goto error;
1057
1058         if (cmd_queue)
1059                 for (i = 0; i < slots_num; i++) {
1060                         txq->entries[i].cmd =
1061                                 kmalloc(sizeof(struct iwl_device_cmd),
1062                                         GFP_KERNEL);
1063                         if (!txq->entries[i].cmd)
1064                                 goto error;
1065                 }
1066
1067         /* Circular buffer of transmit frame descriptors (TFDs),
1068          * shared with device */
1069         txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1070                                        &txq->dma_addr, GFP_KERNEL);
1071         if (!txq->tfds)
1072                 goto error;
1073
1074         BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1075
1076         tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1077
1078         txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1079                                                 &txq->first_tb_dma,
1080                                                 GFP_KERNEL);
1081         if (!txq->first_tb_bufs)
1082                 goto err_free_tfds;
1083
1084         return 0;
1085 err_free_tfds:
1086         dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1087         txq->tfds = NULL;
1088 error:
1089         if (txq->entries && cmd_queue)
1090                 for (i = 0; i < slots_num; i++)
1091                         kfree(txq->entries[i].cmd);
1092         kfree(txq->entries);
1093         txq->entries = NULL;
1094
1095         return -ENOMEM;
1096 }
1097
1098 static struct iwl_txq *
1099 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1100 {
1101         size_t bc_tbl_size, bc_tbl_entries;
1102         struct iwl_txq *txq;
1103         int ret;
1104
1105         WARN_ON(!trans->txqs.bc_tbl_size);
1106
1107         bc_tbl_size = trans->txqs.bc_tbl_size;
1108         bc_tbl_entries = bc_tbl_size / sizeof(u16);
1109
1110         if (WARN_ON(size > bc_tbl_entries))
1111                 return ERR_PTR(-EINVAL);
1112
1113         txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1114         if (!txq)
1115                 return ERR_PTR(-ENOMEM);
1116
1117         txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1118                                           &txq->bc_tbl.dma);
1119         if (!txq->bc_tbl.addr) {
1120                 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1121                 kfree(txq);
1122                 return ERR_PTR(-ENOMEM);
1123         }
1124
1125         ret = iwl_txq_alloc(trans, txq, size, false);
1126         if (ret) {
1127                 IWL_ERR(trans, "Tx queue alloc failed\n");
1128                 goto error;
1129         }
1130         ret = iwl_txq_init(trans, txq, size, false);
1131         if (ret) {
1132                 IWL_ERR(trans, "Tx queue init failed\n");
1133                 goto error;
1134         }
1135
1136         txq->wd_timeout = msecs_to_jiffies(timeout);
1137
1138         return txq;
1139
1140 error:
1141         iwl_txq_gen2_free_memory(trans, txq);
1142         return ERR_PTR(ret);
1143 }
1144
1145 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1146                                   struct iwl_host_cmd *hcmd)
1147 {
1148         struct iwl_tx_queue_cfg_rsp *rsp;
1149         int ret, qid;
1150         u32 wr_ptr;
1151
1152         if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1153                     sizeof(*rsp))) {
1154                 ret = -EINVAL;
1155                 goto error_free_resp;
1156         }
1157
1158         rsp = (void *)hcmd->resp_pkt->data;
1159         qid = le16_to_cpu(rsp->queue_number);
1160         wr_ptr = le16_to_cpu(rsp->write_pointer);
1161
1162         if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1163                 WARN_ONCE(1, "queue index %d unsupported", qid);
1164                 ret = -EIO;
1165                 goto error_free_resp;
1166         }
1167
1168         if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1169                 WARN_ONCE(1, "queue %d already used", qid);
1170                 ret = -EIO;
1171                 goto error_free_resp;
1172         }
1173
1174         if (WARN_ONCE(trans->txqs.txq[qid],
1175                       "queue %d already allocated\n", qid)) {
1176                 ret = -EIO;
1177                 goto error_free_resp;
1178         }
1179
1180         txq->id = qid;
1181         trans->txqs.txq[qid] = txq;
1182         wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1183
1184         /* Place first TFD at index corresponding to start sequence number */
1185         txq->read_ptr = wr_ptr;
1186         txq->write_ptr = wr_ptr;
1187
1188         IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1189
1190         iwl_free_resp(hcmd);
1191         return qid;
1192
1193 error_free_resp:
1194         iwl_free_resp(hcmd);
1195         iwl_txq_gen2_free_memory(trans, txq);
1196         return ret;
1197 }
1198
1199 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1200                       u8 tid, int size, unsigned int timeout)
1201 {
1202         struct iwl_txq *txq;
1203         union {
1204                 struct iwl_tx_queue_cfg_cmd old;
1205                 struct iwl_scd_queue_cfg_cmd new;
1206         } cmd;
1207         struct iwl_host_cmd hcmd = {
1208                 .flags = CMD_WANT_SKB,
1209         };
1210         int ret;
1211
1212         if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1213             trans->hw_rev_step == SILICON_A_STEP)
1214                 size = 4096;
1215
1216         txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1217         if (IS_ERR(txq))
1218                 return PTR_ERR(txq);
1219
1220         if (trans->txqs.queue_alloc_cmd_ver == 0) {
1221                 memset(&cmd.old, 0, sizeof(cmd.old));
1222                 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1223                 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1224                 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1225                 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1226                 cmd.old.tid = tid;
1227
1228                 if (hweight32(sta_mask) != 1) {
1229                         ret = -EINVAL;
1230                         goto error;
1231                 }
1232                 cmd.old.sta_id = ffs(sta_mask) - 1;
1233
1234                 hcmd.id = SCD_QUEUE_CFG;
1235                 hcmd.len[0] = sizeof(cmd.old);
1236                 hcmd.data[0] = &cmd.old;
1237         } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1238                 memset(&cmd.new, 0, sizeof(cmd.new));
1239                 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1240                 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1241                 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1242                 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1243                 cmd.new.u.add.flags = cpu_to_le32(flags);
1244                 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1245                 cmd.new.u.add.tid = tid;
1246
1247                 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1248                 hcmd.len[0] = sizeof(cmd.new);
1249                 hcmd.data[0] = &cmd.new;
1250         } else {
1251                 ret = -EOPNOTSUPP;
1252                 goto error;
1253         }
1254
1255         ret = iwl_trans_send_cmd(trans, &hcmd);
1256         if (ret)
1257                 goto error;
1258
1259         return iwl_txq_alloc_response(trans, txq, &hcmd);
1260
1261 error:
1262         iwl_txq_gen2_free_memory(trans, txq);
1263         return ret;
1264 }
1265
1266 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1267 {
1268         if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1269                  "queue %d out of range", queue))
1270                 return;
1271
1272         /*
1273          * Upon HW Rfkill - we stop the device, and then stop the queues
1274          * in the op_mode. Just for the sake of the simplicity of the op_mode,
1275          * allow the op_mode to call txq_disable after it already called
1276          * stop_device.
1277          */
1278         if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1279                 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1280                           "queue %d not used", queue);
1281                 return;
1282         }
1283
1284         iwl_txq_gen2_free(trans, queue);
1285
1286         IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1287 }
1288
1289 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1290 {
1291         int i;
1292
1293         memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1294
1295         /* Free all TX queues */
1296         for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1297                 if (!trans->txqs.txq[i])
1298                         continue;
1299
1300                 iwl_txq_gen2_free(trans, i);
1301         }
1302 }
1303
1304 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1305 {
1306         struct iwl_txq *queue;
1307         int ret;
1308
1309         /* alloc and init the tx queue */
1310         if (!trans->txqs.txq[txq_id]) {
1311                 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1312                 if (!queue) {
1313                         IWL_ERR(trans, "Not enough memory for tx queue\n");
1314                         return -ENOMEM;
1315                 }
1316                 trans->txqs.txq[txq_id] = queue;
1317                 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1318                 if (ret) {
1319                         IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1320                         goto error;
1321                 }
1322         } else {
1323                 queue = trans->txqs.txq[txq_id];
1324         }
1325
1326         ret = iwl_txq_init(trans, queue, queue_size,
1327                            (txq_id == trans->txqs.cmd.q_id));
1328         if (ret) {
1329                 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1330                 goto error;
1331         }
1332         trans->txqs.txq[txq_id]->id = txq_id;
1333         set_bit(txq_id, trans->txqs.queue_used);
1334
1335         return 0;
1336
1337 error:
1338         iwl_txq_gen2_tx_free(trans);
1339         return ret;
1340 }
1341
1342 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1343                                                       void *_tfd, u8 idx)
1344 {
1345         struct iwl_tfd *tfd;
1346         struct iwl_tfd_tb *tb;
1347         dma_addr_t addr;
1348         dma_addr_t hi_len;
1349
1350         if (trans->trans_cfg->gen2) {
1351                 struct iwl_tfh_tfd *tfh_tfd = _tfd;
1352                 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
1353
1354                 return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
1355         }
1356
1357         tfd = _tfd;
1358         tb = &tfd->tbs[idx];
1359         addr = get_unaligned_le32(&tb->lo);
1360
1361         if (sizeof(dma_addr_t) <= sizeof(u32))
1362                 return addr;
1363
1364         hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1365
1366         /*
1367          * shift by 16 twice to avoid warnings on 32-bit
1368          * (where this code never runs anyway due to the
1369          * if statement above)
1370          */
1371         return addr | ((hi_len << 16) << 16);
1372 }
1373
1374 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1375                             struct iwl_cmd_meta *meta,
1376                             struct iwl_txq *txq, int index)
1377 {
1378         int i, num_tbs;
1379         void *tfd = iwl_txq_get_tfd(trans, txq, index);
1380
1381         /* Sanity check on number of chunks */
1382         num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1383
1384         if (num_tbs > trans->txqs.tfd.max_tbs) {
1385                 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1386                 /* @todo issue fatal error, it is quite serious situation */
1387                 return;
1388         }
1389
1390         /* first TB is never freed - it's the bidirectional DMA data */
1391
1392         for (i = 1; i < num_tbs; i++) {
1393                 if (meta->tbs & BIT(i))
1394                         dma_unmap_page(trans->dev,
1395                                        iwl_txq_gen1_tfd_tb_get_addr(trans,
1396                                                                     tfd, i),
1397                                        iwl_txq_gen1_tfd_tb_get_len(trans,
1398                                                                    tfd, i),
1399                                        DMA_TO_DEVICE);
1400                 else
1401                         dma_unmap_single(trans->dev,
1402                                          iwl_txq_gen1_tfd_tb_get_addr(trans,
1403                                                                       tfd, i),
1404                                          iwl_txq_gen1_tfd_tb_get_len(trans,
1405                                                                      tfd, i),
1406                                          DMA_TO_DEVICE);
1407         }
1408
1409         meta->tbs = 0;
1410
1411         if (trans->trans_cfg->gen2) {
1412                 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1413
1414                 tfd_fh->num_tbs = 0;
1415         } else {
1416                 struct iwl_tfd *tfd_fh = (void *)tfd;
1417
1418                 tfd_fh->num_tbs = 0;
1419         }
1420 }
1421
1422 #define IWL_TX_CRC_SIZE 4
1423 #define IWL_TX_DELIMITER_SIZE 4
1424
1425 /*
1426  * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1427  */
1428 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1429                                       struct iwl_txq *txq, u16 byte_cnt,
1430                                       int num_tbs)
1431 {
1432         struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1433         int write_ptr = txq->write_ptr;
1434         int txq_id = txq->id;
1435         u8 sec_ctl = 0;
1436         u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1437         __le16 bc_ent;
1438         struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1439         struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1440         u8 sta_id = tx_cmd->sta_id;
1441
1442         scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1443
1444         sec_ctl = tx_cmd->sec_ctl;
1445
1446         switch (sec_ctl & TX_CMD_SEC_MSK) {
1447         case TX_CMD_SEC_CCM:
1448                 len += IEEE80211_CCMP_MIC_LEN;
1449                 break;
1450         case TX_CMD_SEC_TKIP:
1451                 len += IEEE80211_TKIP_ICV_LEN;
1452                 break;
1453         case TX_CMD_SEC_WEP:
1454                 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1455                 break;
1456         }
1457         if (trans->txqs.bc_table_dword)
1458                 len = DIV_ROUND_UP(len, 4);
1459
1460         if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1461                 return;
1462
1463         bc_ent = cpu_to_le16(len | (sta_id << 12));
1464
1465         scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1466
1467         if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1468                 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1469                         bc_ent;
1470 }
1471
1472 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1473                                      struct iwl_txq *txq)
1474 {
1475         struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1476         int txq_id = txq->id;
1477         int read_ptr = txq->read_ptr;
1478         u8 sta_id = 0;
1479         __le16 bc_ent;
1480         struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1481         struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1482
1483         WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1484
1485         if (txq_id != trans->txqs.cmd.q_id)
1486                 sta_id = tx_cmd->sta_id;
1487
1488         bc_ent = cpu_to_le16(1 | (sta_id << 12));
1489
1490         scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1491
1492         if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1493                 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1494                         bc_ent;
1495 }
1496
1497 /*
1498  * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1499  * @trans - transport private data
1500  * @txq - tx queue
1501  * @dma_dir - the direction of the DMA mapping
1502  *
1503  * Does NOT advance any TFD circular buffer read/write indexes
1504  * Does NOT free the TFD itself (which is within circular buffer)
1505  */
1506 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1507 {
1508         /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1509          * idx is bounded by n_window
1510          */
1511         int rd_ptr = txq->read_ptr;
1512         int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1513         struct sk_buff *skb;
1514
1515         lockdep_assert_held(&txq->lock);
1516
1517         if (!txq->entries)
1518                 return;
1519
1520         /* We have only q->n_window txq->entries, but we use
1521          * TFD_QUEUE_SIZE_MAX tfds
1522          */
1523         iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1524
1525         /* free SKB */
1526         skb = txq->entries[idx].skb;
1527
1528         /* Can be called from irqs-disabled context
1529          * If skb is not NULL, it means that the whole queue is being
1530          * freed and that the queue is not empty - free the skb
1531          */
1532         if (skb) {
1533                 iwl_op_mode_free_skb(trans->op_mode, skb);
1534                 txq->entries[idx].skb = NULL;
1535         }
1536 }
1537
1538 void iwl_txq_progress(struct iwl_txq *txq)
1539 {
1540         lockdep_assert_held(&txq->lock);
1541
1542         if (!txq->wd_timeout)
1543                 return;
1544
1545         /*
1546          * station is asleep and we send data - that must
1547          * be uAPSD or PS-Poll. Don't rearm the timer.
1548          */
1549         if (txq->frozen)
1550                 return;
1551
1552         /*
1553          * if empty delete timer, otherwise move timer forward
1554          * since we're making progress on this queue
1555          */
1556         if (txq->read_ptr == txq->write_ptr)
1557                 del_timer(&txq->stuck_timer);
1558         else
1559                 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1560 }
1561
1562 /* Frees buffers until index _not_ inclusive */
1563 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1564                      struct sk_buff_head *skbs)
1565 {
1566         struct iwl_txq *txq = trans->txqs.txq[txq_id];
1567         int tfd_num, read_ptr, last_to_free;
1568
1569         /* This function is not meant to release cmd queue*/
1570         if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1571                 return;
1572
1573         if (WARN_ON(!txq))
1574                 return;
1575
1576         tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1577         read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1578
1579         spin_lock_bh(&txq->lock);
1580
1581         if (!test_bit(txq_id, trans->txqs.queue_used)) {
1582                 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1583                                     txq_id, ssn);
1584                 goto out;
1585         }
1586
1587         if (read_ptr == tfd_num)
1588                 goto out;
1589
1590         IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1591                            txq_id, txq->read_ptr, tfd_num, ssn);
1592
1593         /*Since we free until index _not_ inclusive, the one before index is
1594          * the last we will free. This one must be used */
1595         last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1596
1597         if (!iwl_txq_used(txq, last_to_free)) {
1598                 IWL_ERR(trans,
1599                         "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1600                         __func__, txq_id, last_to_free,
1601                         trans->trans_cfg->base_params->max_tfd_queue_size,
1602                         txq->write_ptr, txq->read_ptr);
1603
1604                 iwl_op_mode_time_point(trans->op_mode,
1605                                        IWL_FW_INI_TIME_POINT_FAKE_TX,
1606                                        NULL);
1607                 goto out;
1608         }
1609
1610         if (WARN_ON(!skb_queue_empty(skbs)))
1611                 goto out;
1612
1613         for (;
1614              read_ptr != tfd_num;
1615              txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1616              read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1617                 struct sk_buff *skb = txq->entries[read_ptr].skb;
1618
1619                 if (WARN_ON_ONCE(!skb))
1620                         continue;
1621
1622                 iwl_txq_free_tso_page(trans, skb);
1623
1624                 __skb_queue_tail(skbs, skb);
1625
1626                 txq->entries[read_ptr].skb = NULL;
1627
1628                 if (!trans->trans_cfg->gen2)
1629                         iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1630
1631                 iwl_txq_free_tfd(trans, txq);
1632         }
1633
1634         iwl_txq_progress(txq);
1635
1636         if (iwl_txq_space(trans, txq) > txq->low_mark &&
1637             test_bit(txq_id, trans->txqs.queue_stopped)) {
1638                 struct sk_buff_head overflow_skbs;
1639
1640                 __skb_queue_head_init(&overflow_skbs);
1641                 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1642
1643                 /*
1644                  * We are going to transmit from the overflow queue.
1645                  * Remember this state so that wait_for_txq_empty will know we
1646                  * are adding more packets to the TFD queue. It cannot rely on
1647                  * the state of &txq->overflow_q, as we just emptied it, but
1648                  * haven't TXed the content yet.
1649                  */
1650                 txq->overflow_tx = true;
1651
1652                 /*
1653                  * This is tricky: we are in reclaim path which is non
1654                  * re-entrant, so noone will try to take the access the
1655                  * txq data from that path. We stopped tx, so we can't
1656                  * have tx as well. Bottom line, we can unlock and re-lock
1657                  * later.
1658                  */
1659                 spin_unlock_bh(&txq->lock);
1660
1661                 while (!skb_queue_empty(&overflow_skbs)) {
1662                         struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1663                         struct iwl_device_tx_cmd *dev_cmd_ptr;
1664
1665                         dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1666                                                  trans->txqs.dev_cmd_offs);
1667
1668                         /*
1669                          * Note that we can very well be overflowing again.
1670                          * In that case, iwl_txq_space will be small again
1671                          * and we won't wake mac80211's queue.
1672                          */
1673                         iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1674                 }
1675
1676                 if (iwl_txq_space(trans, txq) > txq->low_mark)
1677                         iwl_wake_queue(trans, txq);
1678
1679                 spin_lock_bh(&txq->lock);
1680                 txq->overflow_tx = false;
1681         }
1682
1683 out:
1684         spin_unlock_bh(&txq->lock);
1685 }
1686
1687 /* Set wr_ptr of specific device and txq  */
1688 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1689 {
1690         struct iwl_txq *txq = trans->txqs.txq[txq_id];
1691
1692         spin_lock_bh(&txq->lock);
1693
1694         txq->write_ptr = ptr;
1695         txq->read_ptr = txq->write_ptr;
1696
1697         spin_unlock_bh(&txq->lock);
1698 }
1699
1700 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1701                                 bool freeze)
1702 {
1703         int queue;
1704
1705         for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1706                 struct iwl_txq *txq = trans->txqs.txq[queue];
1707                 unsigned long now;
1708
1709                 spin_lock_bh(&txq->lock);
1710
1711                 now = jiffies;
1712
1713                 if (txq->frozen == freeze)
1714                         goto next_queue;
1715
1716                 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1717                                     freeze ? "Freezing" : "Waking", queue);
1718
1719                 txq->frozen = freeze;
1720
1721                 if (txq->read_ptr == txq->write_ptr)
1722                         goto next_queue;
1723
1724                 if (freeze) {
1725                         if (unlikely(time_after(now,
1726                                                 txq->stuck_timer.expires))) {
1727                                 /*
1728                                  * The timer should have fired, maybe it is
1729                                  * spinning right now on the lock.
1730                                  */
1731                                 goto next_queue;
1732                         }
1733                         /* remember how long until the timer fires */
1734                         txq->frozen_expiry_remainder =
1735                                 txq->stuck_timer.expires - now;
1736                         del_timer(&txq->stuck_timer);
1737                         goto next_queue;
1738                 }
1739
1740                 /*
1741                  * Wake a non-empty queue -> arm timer with the
1742                  * remainder before it froze
1743                  */
1744                 mod_timer(&txq->stuck_timer,
1745                           now + txq->frozen_expiry_remainder);
1746
1747 next_queue:
1748                 spin_unlock_bh(&txq->lock);
1749         }
1750 }
1751
1752 #define HOST_COMPLETE_TIMEOUT   (2 * HZ)
1753
1754 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1755                                         struct iwl_host_cmd *cmd)
1756 {
1757         const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1758         struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1759         int cmd_idx;
1760         int ret;
1761
1762         IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1763
1764         if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1765                                   &trans->status),
1766                  "Command %s: a command is already active!\n", cmd_str))
1767                 return -EIO;
1768
1769         IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1770
1771         cmd_idx = trans->ops->send_cmd(trans, cmd);
1772         if (cmd_idx < 0) {
1773                 ret = cmd_idx;
1774                 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1775                 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1776                         cmd_str, ret);
1777                 return ret;
1778         }
1779
1780         ret = wait_event_timeout(trans->wait_command_queue,
1781                                  !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1782                                            &trans->status),
1783                                  HOST_COMPLETE_TIMEOUT);
1784         if (!ret) {
1785                 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1786                         cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1787
1788                 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1789                         txq->read_ptr, txq->write_ptr);
1790
1791                 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1792                 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1793                                cmd_str);
1794                 ret = -ETIMEDOUT;
1795
1796                 iwl_trans_sync_nmi(trans);
1797                 goto cancel;
1798         }
1799
1800         if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1801                 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1802                                         &trans->status)) {
1803                         IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1804                         dump_stack();
1805                 }
1806                 ret = -EIO;
1807                 goto cancel;
1808         }
1809
1810         if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1811             test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1812                 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1813                 ret = -ERFKILL;
1814                 goto cancel;
1815         }
1816
1817         if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1818                 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1819                 ret = -EIO;
1820                 goto cancel;
1821         }
1822
1823         return 0;
1824
1825 cancel:
1826         if (cmd->flags & CMD_WANT_SKB) {
1827                 /*
1828                  * Cancel the CMD_WANT_SKB flag for the cmd in the
1829                  * TX cmd queue. Otherwise in case the cmd comes
1830                  * in later, it will possibly set an invalid
1831                  * address (cmd->meta.source).
1832                  */
1833                 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1834         }
1835
1836         if (cmd->resp_pkt) {
1837                 iwl_free_resp(cmd);
1838                 cmd->resp_pkt = NULL;
1839         }
1840
1841         return ret;
1842 }
1843
1844 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1845                             struct iwl_host_cmd *cmd)
1846 {
1847         /* Make sure the NIC is still alive in the bus */
1848         if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1849                 return -ENODEV;
1850
1851         if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1852             test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1853                 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1854                                   cmd->id);
1855                 return -ERFKILL;
1856         }
1857
1858         if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1859                      !(cmd->flags & CMD_SEND_IN_D3))) {
1860                 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1861                 return -EHOSTDOWN;
1862         }
1863
1864         if (cmd->flags & CMD_ASYNC) {
1865                 int ret;
1866
1867                 /* An asynchronous command can not expect an SKB to be set. */
1868                 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1869                         return -EINVAL;
1870
1871                 ret = trans->ops->send_cmd(trans, cmd);
1872                 if (ret < 0) {
1873                         IWL_ERR(trans,
1874                                 "Error sending %s: enqueue_hcmd failed: %d\n",
1875                                 iwl_get_cmd_string(trans, cmd->id), ret);
1876                         return ret;
1877                 }
1878                 return 0;
1879         }
1880
1881         return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1882 }
1883