3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 if (!wl->conf.rx_streaming.interval)
156 ret = wl1271_ps_elp_wakeup(wl);
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
169 wl1271_ps_elp_sleep(wl);
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
186 ret = wl1271_ps_elp_wakeup(wl);
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
195 wl1271_ps_elp_sleep(wl);
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(unsigned long data)
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
224 struct wl1271 *wl = wlvif->wl;
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 wlcore_hw_sta_rc_update(wl, wlvif);
237 wl1271_ps_elp_sleep(wl);
239 mutex_unlock(&wl->mutex);
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
376 unsigned long cur_fw_ps_map;
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
386 wl->ap_fw_ps_map = cur_fw_ps_map;
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
396 struct wl12xx_vif *wlvif;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
402 struct wl1271_link *lnk;
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
406 wl->fw_status_len, false);
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
455 wl->tx_blocks_freed = status->total_released_blks;
457 wl->tx_allocated_blocks -= freed_blocks;
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
468 cancel_delayed_work(&wl->tx_watchdog_work);
471 avail = status->tx_total - wl->tx_allocated_blocks;
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
493 /* update the host-chipset time offset */
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
516 static void wl1271_netstack_work(struct work_struct *work)
519 container_of(work, struct wl1271, netstack_work);
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
526 #define WL1271_IRQ_MAX_LOOPS 256
528 static int wlcore_irq_locked(struct wl1271 *wl)
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
534 unsigned int defer_count;
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
541 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
546 if (unlikely(wl->state != WLCORE_STATE_ON))
549 ret = wl1271_ps_elp_wakeup(wl);
553 while (!done && loopcount--) {
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
562 ret = wlcore_fw_status(wl, wl->fw_status);
566 wlcore_hw_tx_immediate_compl(wl);
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
597 ret = wlcore_rx(wl, wl->fw_status);
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
610 ret = wlcore_tx_work_locked(wl);
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
651 wl1271_ps_elp_sleep(wl);
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
661 struct wl1271 *wl = cookie;
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
686 mutex_lock(&wl->mutex);
688 ret = wlcore_irq_locked(wl);
690 wl12xx_queue_recovery_work(wl);
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
700 mutex_unlock(&wl->mutex);
705 struct vif_counter_data {
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
715 struct vif_counter_data *counter = data;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
736 const struct firmware *fw;
738 enum wl12xx_fw_type fw_type;
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
758 if (wl->fw_type == fw_type)
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
763 ret = request_firmware(&fw, fw_name, wl->dev);
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
783 wl1271_error("could not allocate memory for the firmware");
788 memcpy(wl->fw, fw->data, wl->fw_len);
790 wl->fw_type = fw_type;
792 release_firmware(fw);
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 struct wlcore_partition_set part, old_part;
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
839 wl1271_info("Reading FW panic log");
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
850 if (wl1271_ps_elp_wakeup(wl))
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
861 addr = wl->fw_status->log_start_addr;
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
869 offset = sizeof(addr);
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
876 /* Traverse the memory blocks linked list */
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
881 ret = wlcore_set_partition(wl, &part);
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
901 addr = le32_to_cpup((__le32 *)block);
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
906 } while (addr && (addr != end_of_log));
908 wake_up_interruptible(&wl->fwlog_waitq);
912 wlcore_set_partition(wl, &old_part);
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
947 sta = ieee80211_find_sta(vif, addr);
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
953 static void wlcore_print_recovery(struct wl1271 *wl)
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
982 static void wl1271_recovery_work(struct work_struct *work)
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
989 mutex_lock(&wl->mutex);
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1023 __wl1271_op_remove_interface(wl, vif, false);
1026 wlcore_op_stop_locked(wl);
1028 ieee80211_restart_hw(wl->hw);
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1047 static int wl1271_setup(struct wl1271 *wl)
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1093 wl1271_power_off(wl);
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1101 ret = wl12xx_set_power_on(wl);
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1119 /* TODO: make sure the lower driver has set things up correctly */
1121 ret = wl1271_setup(wl);
1125 ret = wl12xx_fetch_firmware(wl, plt);
1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1135 int retries = WL1271_BOOT_RETRIES;
1136 struct wiphy *wiphy = wl->hw->wiphy;
1138 static const char* const PLT_MODE[] = {
1147 mutex_lock(&wl->mutex);
1149 wl1271_notice("power up");
1151 if (wl->state != WLCORE_STATE_OFF) {
1152 wl1271_error("cannot go into PLT state because not "
1153 "in off state: %d", wl->state);
1158 /* Indicate to lower levels that we are now in PLT mode */
1160 wl->plt_mode = plt_mode;
1164 ret = wl12xx_chip_wakeup(wl, true);
1168 if (plt_mode != PLT_CHIP_AWAKE) {
1169 ret = wl->ops->plt_init(wl);
1174 wl->state = WLCORE_STATE_ON;
1175 wl1271_notice("firmware booted in PLT mode %s (%s)",
1177 wl->chip.fw_ver_str);
1179 /* update hw/fw version info in wiphy struct */
1180 wiphy->hw_version = wl->chip.id;
1181 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 sizeof(wiphy->fw_version));
1187 wl1271_power_off(wl);
1191 wl->plt_mode = PLT_OFF;
1193 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 WL1271_BOOT_RETRIES);
1196 mutex_unlock(&wl->mutex);
1201 int wl1271_plt_stop(struct wl1271 *wl)
1205 wl1271_notice("power down");
1208 * Interrupts must be disabled before setting the state to OFF.
1209 * Otherwise, the interrupt handler might be called and exit without
1210 * reading the interrupt status.
1212 wlcore_disable_interrupts(wl);
1213 mutex_lock(&wl->mutex);
1215 mutex_unlock(&wl->mutex);
1218 * This will not necessarily enable interrupts as interrupts
1219 * may have been disabled when op_stop was called. It will,
1220 * however, balance the above call to disable_interrupts().
1222 wlcore_enable_interrupts(wl);
1224 wl1271_error("cannot power down because not in PLT "
1225 "state: %d", wl->state);
1230 mutex_unlock(&wl->mutex);
1232 wl1271_flush_deferred_work(wl);
1233 cancel_work_sync(&wl->netstack_work);
1234 cancel_work_sync(&wl->recovery_work);
1235 cancel_delayed_work_sync(&wl->elp_work);
1236 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1238 mutex_lock(&wl->mutex);
1239 wl1271_power_off(wl);
1241 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 wl->state = WLCORE_STATE_OFF;
1244 wl->plt_mode = PLT_OFF;
1246 mutex_unlock(&wl->mutex);
1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 struct ieee80211_tx_control *control,
1254 struct sk_buff *skb)
1256 struct wl1271 *wl = hw->priv;
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ieee80211_vif *vif = info->control.vif;
1259 struct wl12xx_vif *wlvif = NULL;
1260 unsigned long flags;
1265 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 ieee80211_free_txskb(hw, skb);
1270 wlvif = wl12xx_vif_to_data(vif);
1271 mapping = skb_get_queue_mapping(skb);
1272 q = wl1271_tx_get_queue(mapping);
1274 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1279 * drop the packet if the link is invalid or the queue is stopped
1280 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 * allow these packets through.
1283 if (hlid == WL12XX_INVALID_LINK_ID ||
1284 (!test_bit(hlid, wlvif->links_map)) ||
1285 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 ieee80211_free_txskb(hw, skb);
1293 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1295 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1297 wl->tx_queue_count[q]++;
1298 wlvif->tx_queue_count[q]++;
1301 * The workqueue is slow to process the tx_queue and we need stop
1302 * the queue here, otherwise the queue will get too long.
1304 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 wlcore_stop_queue_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1313 * The chip specific setup must run before the first TX packet -
1314 * before that, the tx_work will not be initialized!
1317 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 ieee80211_queue_work(wl->hw, &wl->tx_work);
1322 spin_unlock_irqrestore(&wl->wl_lock, flags);
1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1327 unsigned long flags;
1330 /* no need to queue a new dummy packet if one is already pending */
1331 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1334 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1336 spin_lock_irqsave(&wl->wl_lock, flags);
1337 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 wl->tx_queue_count[q]++;
1339 spin_unlock_irqrestore(&wl->wl_lock, flags);
1341 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 return wlcore_tx_work_locked(wl);
1346 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 * interrupt handler function
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1361 struct sk_buff *skb;
1362 struct ieee80211_hdr_3addr *hdr;
1363 unsigned int dummy_packet_size;
1365 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1368 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1370 wl1271_warning("Failed to allocate a dummy packet skb");
1374 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1376 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 memset(hdr, 0, sizeof(*hdr));
1378 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 IEEE80211_STYPE_NULLFUNC |
1380 IEEE80211_FCTL_TODS);
1382 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1384 /* Dummy packets require the TID to be management */
1385 skb->priority = WL1271_TID_MGMT;
1387 /* Initialize all fields that might be used */
1388 skb_set_queue_mapping(skb, 0);
1389 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1399 int num_fields = 0, in_field = 0, fields_size = 0;
1400 int i, pattern_len = 0;
1403 wl1271_warning("No mask in WoWLAN pattern");
1408 * The pattern is broken up into segments of bytes at different offsets
1409 * that need to be checked by the FW filter. Each segment is called
1410 * a field in the FW API. We verify that the total number of fields
1411 * required for this pattern won't exceed FW limits (8)
1412 * as well as the total fields buffer won't exceed the FW limit.
1413 * Note that if there's a pattern which crosses Ethernet/IP header
1414 * boundary a new field is required.
1416 for (i = 0; i < p->pattern_len; i++) {
1417 if (test_bit(i, (unsigned long *)p->mask)) {
1422 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1424 fields_size += pattern_len +
1425 RX_FILTER_FIELD_OVERHEAD;
1433 fields_size += pattern_len +
1434 RX_FILTER_FIELD_OVERHEAD;
1441 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1445 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 wl1271_warning("RX Filter too complex. Too many segments");
1450 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 wl1271_warning("RX filter pattern is too big");
1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1460 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1470 for (i = 0; i < filter->num_fields; i++)
1471 kfree(filter->fields[i].pattern);
1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 u16 offset, u8 flags,
1478 const u8 *pattern, u8 len)
1480 struct wl12xx_rx_filter_field *field;
1482 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 wl1271_warning("Max fields per RX filter. can't alloc another");
1487 field = &filter->fields[filter->num_fields];
1489 field->pattern = kzalloc(len, GFP_KERNEL);
1490 if (!field->pattern) {
1491 wl1271_warning("Failed to allocate RX filter pattern");
1495 filter->num_fields++;
1497 field->offset = cpu_to_le16(offset);
1498 field->flags = flags;
1500 memcpy(field->pattern, pattern, len);
1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1507 int i, fields_size = 0;
1509 for (i = 0; i < filter->num_fields; i++)
1510 fields_size += filter->fields[i].len +
1511 sizeof(struct wl12xx_rx_filter_field) -
1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1521 struct wl12xx_rx_filter_field *field;
1523 for (i = 0; i < filter->num_fields; i++) {
1524 field = (struct wl12xx_rx_filter_field *)buf;
1526 field->offset = filter->fields[i].offset;
1527 field->flags = filter->fields[i].flags;
1528 field->len = filter->fields[i].len;
1530 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 buf += sizeof(struct wl12xx_rx_filter_field) -
1532 sizeof(u8 *) + field->len;
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 struct wl12xx_rx_filter **f)
1545 struct wl12xx_rx_filter *filter;
1549 filter = wl1271_rx_filter_alloc();
1551 wl1271_warning("Failed to alloc rx filter");
1557 while (i < p->pattern_len) {
1558 if (!test_bit(i, (unsigned long *)p->mask)) {
1563 for (j = i; j < p->pattern_len; j++) {
1564 if (!test_bit(j, (unsigned long *)p->mask))
1567 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1572 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1574 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1576 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1582 ret = wl1271_rx_filter_alloc_field(filter,
1585 &p->pattern[i], len);
1592 filter->action = FILTER_SIGNAL;
1598 wl1271_rx_filter_free(filter);
1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 struct cfg80211_wowlan *wow)
1609 if (!wow || wow->any || !wow->n_patterns) {
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1615 ret = wl1271_rx_filter_clear_all(wl);
1622 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1625 /* Validate all incoming patterns before clearing current FW state */
1626 for (i = 0; i < wow->n_patterns; i++) {
1627 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1629 wl1271_warning("Bad wowlan pattern %d", i);
1634 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1638 ret = wl1271_rx_filter_clear_all(wl);
1642 /* Translate WoWLAN patterns into filters */
1643 for (i = 0; i < wow->n_patterns; i++) {
1644 struct cfg80211_pkt_pattern *p;
1645 struct wl12xx_rx_filter *filter = NULL;
1647 p = &wow->patterns[i];
1649 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1651 wl1271_warning("Failed to create an RX filter from "
1652 "wowlan pattern %d", i);
1656 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1658 wl1271_rx_filter_free(filter);
1663 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1678 ret = wl1271_configure_wowlan(wl, wow);
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.suspend_wake_up_event,
1690 wl->conf.conn.suspend_listen_interval);
1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1712 ret = wl1271_configure_wowlan(wl, wow);
1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 struct wl12xx_vif *wlvif,
1723 struct cfg80211_wowlan *wow)
1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1735 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1738 if ((!is_ap) && (!is_sta))
1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1745 wl1271_configure_wowlan(wl, NULL);
1748 if ((wl->conf.conn.suspend_wake_up_event ==
1749 wl->conf.conn.wake_up_event) &&
1750 (wl->conf.conn.suspend_listen_interval ==
1751 wl->conf.conn.listen_interval))
1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 wl->conf.conn.wake_up_event,
1756 wl->conf.conn.listen_interval);
1759 wl1271_error("resume: wake up conditions failed: %d",
1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 struct cfg80211_wowlan *wow)
1770 struct wl1271 *wl = hw->priv;
1771 struct wl12xx_vif *wlvif;
1774 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1777 /* we want to perform the recovery before suspending */
1778 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 wl1271_warning("postponing suspend to perform recovery");
1783 wl1271_tx_flush(wl);
1785 mutex_lock(&wl->mutex);
1787 ret = wl1271_ps_elp_wakeup(wl);
1789 mutex_unlock(&wl->mutex);
1793 wl->wow_enabled = true;
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 ret = wl1271_configure_suspend(wl, wlvif, wow);
1797 mutex_unlock(&wl->mutex);
1798 wl1271_warning("couldn't prepare device to suspend");
1803 /* disable fast link flow control notifications from FW */
1804 ret = wlcore_hw_interrupt_notify(wl, false);
1808 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1809 ret = wlcore_hw_rx_ba_filter(wl,
1810 !!wl->conf.conn.suspend_rx_ba_activity);
1815 wl1271_ps_elp_sleep(wl);
1816 mutex_unlock(&wl->mutex);
1819 wl1271_warning("couldn't prepare device to suspend");
1823 /* flush any remaining work */
1824 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1827 * disable and re-enable interrupts in order to flush
1830 wlcore_disable_interrupts(wl);
1833 * set suspended flag to avoid triggering a new threaded_irq
1834 * work. no need for spinlock as interrupts are disabled.
1836 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1838 wlcore_enable_interrupts(wl);
1839 flush_work(&wl->tx_work);
1840 flush_delayed_work(&wl->elp_work);
1843 * Cancel the watchdog even if above tx_flush failed. We will detect
1844 * it on resume anyway.
1846 cancel_delayed_work(&wl->tx_watchdog_work);
1851 static int wl1271_op_resume(struct ieee80211_hw *hw)
1853 struct wl1271 *wl = hw->priv;
1854 struct wl12xx_vif *wlvif;
1855 unsigned long flags;
1856 bool run_irq_work = false, pending_recovery;
1859 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1861 WARN_ON(!wl->wow_enabled);
1864 * re-enable irq_work enqueuing, and call irq_work directly if
1865 * there is a pending work.
1867 spin_lock_irqsave(&wl->wl_lock, flags);
1868 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1869 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1870 run_irq_work = true;
1871 spin_unlock_irqrestore(&wl->wl_lock, flags);
1873 mutex_lock(&wl->mutex);
1875 /* test the recovery flag before calling any SDIO functions */
1876 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1880 wl1271_debug(DEBUG_MAC80211,
1881 "run postponed irq_work directly");
1883 /* don't talk to the HW if recovery is pending */
1884 if (!pending_recovery) {
1885 ret = wlcore_irq_locked(wl);
1887 wl12xx_queue_recovery_work(wl);
1890 wlcore_enable_interrupts(wl);
1893 if (pending_recovery) {
1894 wl1271_warning("queuing forgotten recovery on resume");
1895 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1899 ret = wl1271_ps_elp_wakeup(wl);
1903 wl12xx_for_each_wlvif(wl, wlvif) {
1904 wl1271_configure_resume(wl, wlvif);
1907 ret = wlcore_hw_interrupt_notify(wl, true);
1911 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1912 ret = wlcore_hw_rx_ba_filter(wl, false);
1917 wl1271_ps_elp_sleep(wl);
1920 wl->wow_enabled = false;
1923 * Set a flag to re-init the watchdog on the first Tx after resume.
1924 * That way we avoid possible conditions where Tx-complete interrupts
1925 * fail to arrive and we perform a spurious recovery.
1927 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1928 mutex_unlock(&wl->mutex);
1934 static int wl1271_op_start(struct ieee80211_hw *hw)
1936 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1939 * We have to delay the booting of the hardware because
1940 * we need to know the local MAC address before downloading and
1941 * initializing the firmware. The MAC address cannot be changed
1942 * after boot, and without the proper MAC address, the firmware
1943 * will not function properly.
1945 * The MAC address is first known when the corresponding interface
1946 * is added. That is where we will initialize the hardware.
1952 static void wlcore_op_stop_locked(struct wl1271 *wl)
1956 if (wl->state == WLCORE_STATE_OFF) {
1957 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1959 wlcore_enable_interrupts(wl);
1965 * this must be before the cancel_work calls below, so that the work
1966 * functions don't perform further work.
1968 wl->state = WLCORE_STATE_OFF;
1971 * Use the nosync variant to disable interrupts, so the mutex could be
1972 * held while doing so without deadlocking.
1974 wlcore_disable_interrupts_nosync(wl);
1976 mutex_unlock(&wl->mutex);
1978 wlcore_synchronize_interrupts(wl);
1979 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1980 cancel_work_sync(&wl->recovery_work);
1981 wl1271_flush_deferred_work(wl);
1982 cancel_delayed_work_sync(&wl->scan_complete_work);
1983 cancel_work_sync(&wl->netstack_work);
1984 cancel_work_sync(&wl->tx_work);
1985 cancel_delayed_work_sync(&wl->elp_work);
1986 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1988 /* let's notify MAC80211 about the remaining pending TX frames */
1989 mutex_lock(&wl->mutex);
1990 wl12xx_tx_reset(wl);
1992 wl1271_power_off(wl);
1994 * In case a recovery was scheduled, interrupts were disabled to avoid
1995 * an interrupt storm. Now that the power is down, it is safe to
1996 * re-enable interrupts to balance the disable depth
1998 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1999 wlcore_enable_interrupts(wl);
2001 wl->band = IEEE80211_BAND_2GHZ;
2004 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2005 wl->channel_type = NL80211_CHAN_NO_HT;
2006 wl->tx_blocks_available = 0;
2007 wl->tx_allocated_blocks = 0;
2008 wl->tx_results_count = 0;
2009 wl->tx_packets_count = 0;
2010 wl->time_offset = 0;
2011 wl->ap_fw_ps_map = 0;
2013 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2014 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2015 memset(wl->links_map, 0, sizeof(wl->links_map));
2016 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2017 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2018 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2019 wl->active_sta_count = 0;
2020 wl->active_link_count = 0;
2022 /* The system link is always allocated */
2023 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2024 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2025 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2028 * this is performed after the cancel_work calls and the associated
2029 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2030 * get executed before all these vars have been reset.
2034 wl->tx_blocks_freed = 0;
2036 for (i = 0; i < NUM_TX_QUEUES; i++) {
2037 wl->tx_pkts_freed[i] = 0;
2038 wl->tx_allocated_pkts[i] = 0;
2041 wl1271_debugfs_reset(wl);
2043 kfree(wl->raw_fw_status);
2044 wl->raw_fw_status = NULL;
2045 kfree(wl->fw_status);
2046 wl->fw_status = NULL;
2047 kfree(wl->tx_res_if);
2048 wl->tx_res_if = NULL;
2049 kfree(wl->target_mem_map);
2050 wl->target_mem_map = NULL;
2053 * FW channels must be re-calibrated after recovery,
2054 * save current Reg-Domain channel configuration and clear it.
2056 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2057 sizeof(wl->reg_ch_conf_pending));
2058 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2061 static void wlcore_op_stop(struct ieee80211_hw *hw)
2063 struct wl1271 *wl = hw->priv;
2065 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2067 mutex_lock(&wl->mutex);
2069 wlcore_op_stop_locked(wl);
2071 mutex_unlock(&wl->mutex);
2074 static void wlcore_channel_switch_work(struct work_struct *work)
2076 struct delayed_work *dwork;
2078 struct ieee80211_vif *vif;
2079 struct wl12xx_vif *wlvif;
2082 dwork = container_of(work, struct delayed_work, work);
2083 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2086 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2088 mutex_lock(&wl->mutex);
2090 if (unlikely(wl->state != WLCORE_STATE_ON))
2093 /* check the channel switch is still ongoing */
2094 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2097 vif = wl12xx_wlvif_to_vif(wlvif);
2098 ieee80211_chswitch_done(vif, false);
2100 ret = wl1271_ps_elp_wakeup(wl);
2104 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2106 wl1271_ps_elp_sleep(wl);
2108 mutex_unlock(&wl->mutex);
2111 static void wlcore_connection_loss_work(struct work_struct *work)
2113 struct delayed_work *dwork;
2115 struct ieee80211_vif *vif;
2116 struct wl12xx_vif *wlvif;
2118 dwork = container_of(work, struct delayed_work, work);
2119 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2122 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2124 mutex_lock(&wl->mutex);
2126 if (unlikely(wl->state != WLCORE_STATE_ON))
2129 /* Call mac80211 connection loss */
2130 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2133 vif = wl12xx_wlvif_to_vif(wlvif);
2134 ieee80211_connection_loss(vif);
2136 mutex_unlock(&wl->mutex);
2139 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2141 struct delayed_work *dwork;
2143 struct wl12xx_vif *wlvif;
2144 unsigned long time_spare;
2147 dwork = container_of(work, struct delayed_work, work);
2148 wlvif = container_of(dwork, struct wl12xx_vif,
2149 pending_auth_complete_work);
2152 mutex_lock(&wl->mutex);
2154 if (unlikely(wl->state != WLCORE_STATE_ON))
2158 * Make sure a second really passed since the last auth reply. Maybe
2159 * a second auth reply arrived while we were stuck on the mutex.
2160 * Check for a little less than the timeout to protect from scheduler
2163 time_spare = jiffies +
2164 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2165 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2168 ret = wl1271_ps_elp_wakeup(wl);
2172 /* cancel the ROC if active */
2173 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2175 wl1271_ps_elp_sleep(wl);
2177 mutex_unlock(&wl->mutex);
2180 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2182 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2183 WL12XX_MAX_RATE_POLICIES);
2184 if (policy >= WL12XX_MAX_RATE_POLICIES)
2187 __set_bit(policy, wl->rate_policies_map);
2192 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2194 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2197 __clear_bit(*idx, wl->rate_policies_map);
2198 *idx = WL12XX_MAX_RATE_POLICIES;
2201 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2203 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2204 WLCORE_MAX_KLV_TEMPLATES);
2205 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2208 __set_bit(policy, wl->klv_templates_map);
2213 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2215 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2218 __clear_bit(*idx, wl->klv_templates_map);
2219 *idx = WLCORE_MAX_KLV_TEMPLATES;
2222 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2224 switch (wlvif->bss_type) {
2225 case BSS_TYPE_AP_BSS:
2227 return WL1271_ROLE_P2P_GO;
2229 return WL1271_ROLE_AP;
2231 case BSS_TYPE_STA_BSS:
2233 return WL1271_ROLE_P2P_CL;
2235 return WL1271_ROLE_STA;
2238 return WL1271_ROLE_IBSS;
2241 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2243 return WL12XX_INVALID_ROLE_TYPE;
2246 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2248 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2251 /* clear everything but the persistent data */
2252 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2254 switch (ieee80211_vif_type_p2p(vif)) {
2255 case NL80211_IFTYPE_P2P_CLIENT:
2258 case NL80211_IFTYPE_STATION:
2259 wlvif->bss_type = BSS_TYPE_STA_BSS;
2261 case NL80211_IFTYPE_ADHOC:
2262 wlvif->bss_type = BSS_TYPE_IBSS;
2264 case NL80211_IFTYPE_P2P_GO:
2267 case NL80211_IFTYPE_AP:
2268 wlvif->bss_type = BSS_TYPE_AP_BSS;
2271 wlvif->bss_type = MAX_BSS_TYPE;
2275 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2276 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2277 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2279 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2280 wlvif->bss_type == BSS_TYPE_IBSS) {
2281 /* init sta/ibss data */
2282 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2283 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2284 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2285 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2286 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2287 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2288 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2289 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2292 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2293 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2294 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2295 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2296 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2297 wl12xx_allocate_rate_policy(wl,
2298 &wlvif->ap.ucast_rate_idx[i]);
2299 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2301 * TODO: check if basic_rate shouldn't be
2302 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2303 * instead (the same thing for STA above).
2305 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2306 /* TODO: this seems to be used only for STA, check it */
2307 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2310 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2311 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2312 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2315 * mac80211 configures some values globally, while we treat them
2316 * per-interface. thus, on init, we have to copy them from wl
2318 wlvif->band = wl->band;
2319 wlvif->channel = wl->channel;
2320 wlvif->power_level = wl->power_level;
2321 wlvif->channel_type = wl->channel_type;
2323 INIT_WORK(&wlvif->rx_streaming_enable_work,
2324 wl1271_rx_streaming_enable_work);
2325 INIT_WORK(&wlvif->rx_streaming_disable_work,
2326 wl1271_rx_streaming_disable_work);
2327 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2328 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2329 wlcore_channel_switch_work);
2330 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2331 wlcore_connection_loss_work);
2332 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2333 wlcore_pending_auth_complete_work);
2334 INIT_LIST_HEAD(&wlvif->list);
2336 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2337 (unsigned long) wlvif);
2341 static int wl12xx_init_fw(struct wl1271 *wl)
2343 int retries = WL1271_BOOT_RETRIES;
2344 bool booted = false;
2345 struct wiphy *wiphy = wl->hw->wiphy;
2350 ret = wl12xx_chip_wakeup(wl, false);
2354 ret = wl->ops->boot(wl);
2358 ret = wl1271_hw_init(wl);
2366 mutex_unlock(&wl->mutex);
2367 /* Unlocking the mutex in the middle of handling is
2368 inherently unsafe. In this case we deem it safe to do,
2369 because we need to let any possibly pending IRQ out of
2370 the system (and while we are WLCORE_STATE_OFF the IRQ
2371 work function will not do anything.) Also, any other
2372 possible concurrent operations will fail due to the
2373 current state, hence the wl1271 struct should be safe. */
2374 wlcore_disable_interrupts(wl);
2375 wl1271_flush_deferred_work(wl);
2376 cancel_work_sync(&wl->netstack_work);
2377 mutex_lock(&wl->mutex);
2379 wl1271_power_off(wl);
2383 wl1271_error("firmware boot failed despite %d retries",
2384 WL1271_BOOT_RETRIES);
2388 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2390 /* update hw/fw version info in wiphy struct */
2391 wiphy->hw_version = wl->chip.id;
2392 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2393 sizeof(wiphy->fw_version));
2396 * Now we know if 11a is supported (info from the NVS), so disable
2397 * 11a channels if not supported
2399 if (!wl->enable_11a)
2400 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2402 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2403 wl->enable_11a ? "" : "not ");
2405 wl->state = WLCORE_STATE_ON;
2410 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2412 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2416 * Check whether a fw switch (i.e. moving from one loaded
2417 * fw to another) is needed. This function is also responsible
2418 * for updating wl->last_vif_count, so it must be called before
2419 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2422 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2423 struct vif_counter_data vif_counter_data,
2426 enum wl12xx_fw_type current_fw = wl->fw_type;
2427 u8 vif_count = vif_counter_data.counter;
2429 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2432 /* increase the vif count if this is a new vif */
2433 if (add && !vif_counter_data.cur_vif_running)
2436 wl->last_vif_count = vif_count;
2438 /* no need for fw change if the device is OFF */
2439 if (wl->state == WLCORE_STATE_OFF)
2442 /* no need for fw change if a single fw is used */
2443 if (!wl->mr_fw_name)
2446 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2448 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2455 * Enter "forced psm". Make sure the sta is in psm against the ap,
2456 * to make the fw switch a bit more disconnection-persistent.
2458 static void wl12xx_force_active_psm(struct wl1271 *wl)
2460 struct wl12xx_vif *wlvif;
2462 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2463 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2467 struct wlcore_hw_queue_iter_data {
2468 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2470 struct ieee80211_vif *vif;
2471 /* is the current vif among those iterated */
2475 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2476 struct ieee80211_vif *vif)
2478 struct wlcore_hw_queue_iter_data *iter_data = data;
2480 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2483 if (iter_data->cur_running || vif == iter_data->vif) {
2484 iter_data->cur_running = true;
2488 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2491 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2492 struct wl12xx_vif *wlvif)
2494 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2495 struct wlcore_hw_queue_iter_data iter_data = {};
2498 iter_data.vif = vif;
2500 /* mark all bits taken by active interfaces */
2501 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2502 IEEE80211_IFACE_ITER_RESUME_ALL,
2503 wlcore_hw_queue_iter, &iter_data);
2505 /* the current vif is already running in mac80211 (resume/recovery) */
2506 if (iter_data.cur_running) {
2507 wlvif->hw_queue_base = vif->hw_queue[0];
2508 wl1271_debug(DEBUG_MAC80211,
2509 "using pre-allocated hw queue base %d",
2510 wlvif->hw_queue_base);
2512 /* interface type might have changed type */
2513 goto adjust_cab_queue;
2516 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2517 WLCORE_NUM_MAC_ADDRESSES);
2518 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2521 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2522 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2523 wlvif->hw_queue_base);
2525 for (i = 0; i < NUM_TX_QUEUES; i++) {
2526 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2527 /* register hw queues in mac80211 */
2528 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2532 /* the last places are reserved for cab queues per interface */
2533 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2534 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2535 wlvif->hw_queue_base / NUM_TX_QUEUES;
2537 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2542 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2543 struct ieee80211_vif *vif)
2545 struct wl1271 *wl = hw->priv;
2546 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2547 struct vif_counter_data vif_count;
2552 wl1271_error("Adding Interface not allowed while in PLT mode");
2556 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2557 IEEE80211_VIF_SUPPORTS_UAPSD |
2558 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2560 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2561 ieee80211_vif_type_p2p(vif), vif->addr);
2563 wl12xx_get_vif_count(hw, vif, &vif_count);
2565 mutex_lock(&wl->mutex);
2566 ret = wl1271_ps_elp_wakeup(wl);
2571 * in some very corner case HW recovery scenarios its possible to
2572 * get here before __wl1271_op_remove_interface is complete, so
2573 * opt out if that is the case.
2575 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2576 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2582 ret = wl12xx_init_vif_data(wl, vif);
2587 role_type = wl12xx_get_role_type(wl, wlvif);
2588 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2593 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2597 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2598 wl12xx_force_active_psm(wl);
2599 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2600 mutex_unlock(&wl->mutex);
2601 wl1271_recovery_work(&wl->recovery_work);
2606 * TODO: after the nvs issue will be solved, move this block
2607 * to start(), and make sure here the driver is ON.
2609 if (wl->state == WLCORE_STATE_OFF) {
2611 * we still need this in order to configure the fw
2612 * while uploading the nvs
2614 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2616 ret = wl12xx_init_fw(wl);
2621 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2622 role_type, &wlvif->role_id);
2626 ret = wl1271_init_vif_specific(wl, vif);
2630 list_add(&wlvif->list, &wl->wlvif_list);
2631 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2633 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2638 wl1271_ps_elp_sleep(wl);
2640 mutex_unlock(&wl->mutex);
2645 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2646 struct ieee80211_vif *vif,
2647 bool reset_tx_queues)
2649 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2651 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2653 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2655 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2658 /* because of hardware recovery, we may get here twice */
2659 if (wl->state == WLCORE_STATE_OFF)
2662 wl1271_info("down");
2664 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2665 wl->scan_wlvif == wlvif) {
2667 * Rearm the tx watchdog just before idling scan. This
2668 * prevents just-finished scans from triggering the watchdog
2670 wl12xx_rearm_tx_watchdog_locked(wl);
2672 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2673 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2674 wl->scan_wlvif = NULL;
2675 wl->scan.req = NULL;
2676 ieee80211_scan_completed(wl->hw, true);
2679 if (wl->sched_vif == wlvif)
2680 wl->sched_vif = NULL;
2682 if (wl->roc_vif == vif) {
2684 ieee80211_remain_on_channel_expired(wl->hw);
2687 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2688 /* disable active roles */
2689 ret = wl1271_ps_elp_wakeup(wl);
2693 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2694 wlvif->bss_type == BSS_TYPE_IBSS) {
2695 if (wl12xx_dev_role_started(wlvif))
2696 wl12xx_stop_dev(wl, wlvif);
2699 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2703 wl1271_ps_elp_sleep(wl);
2706 wl12xx_tx_reset_wlvif(wl, wlvif);
2708 /* clear all hlids (except system_hlid) */
2709 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2711 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2712 wlvif->bss_type == BSS_TYPE_IBSS) {
2713 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2714 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2715 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2716 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2717 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2719 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2720 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2721 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2722 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2723 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2724 wl12xx_free_rate_policy(wl,
2725 &wlvif->ap.ucast_rate_idx[i]);
2726 wl1271_free_ap_keys(wl, wlvif);
2729 dev_kfree_skb(wlvif->probereq);
2730 wlvif->probereq = NULL;
2731 if (wl->last_wlvif == wlvif)
2732 wl->last_wlvif = NULL;
2733 list_del(&wlvif->list);
2734 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2735 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2736 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2744 * Last AP, have more stations. Configure sleep auth according to STA.
2745 * Don't do thin on unintended recovery.
2747 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2748 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2751 if (wl->ap_count == 0 && is_ap) {
2752 /* mask ap events */
2753 wl->event_mask &= ~wl->ap_event_mask;
2754 wl1271_event_unmask(wl);
2757 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2758 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2759 /* Configure for power according to debugfs */
2760 if (sta_auth != WL1271_PSM_ILLEGAL)
2761 wl1271_acx_sleep_auth(wl, sta_auth);
2762 /* Configure for ELP power saving */
2764 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2768 mutex_unlock(&wl->mutex);
2770 del_timer_sync(&wlvif->rx_streaming_timer);
2771 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2772 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2773 cancel_work_sync(&wlvif->rc_update_work);
2774 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2775 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2776 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2778 mutex_lock(&wl->mutex);
2781 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2782 struct ieee80211_vif *vif)
2784 struct wl1271 *wl = hw->priv;
2785 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2786 struct wl12xx_vif *iter;
2787 struct vif_counter_data vif_count;
2789 wl12xx_get_vif_count(hw, vif, &vif_count);
2790 mutex_lock(&wl->mutex);
2792 if (wl->state == WLCORE_STATE_OFF ||
2793 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2797 * wl->vif can be null here if someone shuts down the interface
2798 * just when hardware recovery has been started.
2800 wl12xx_for_each_wlvif(wl, iter) {
2804 __wl1271_op_remove_interface(wl, vif, true);
2807 WARN_ON(iter != wlvif);
2808 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2809 wl12xx_force_active_psm(wl);
2810 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2811 wl12xx_queue_recovery_work(wl);
2814 mutex_unlock(&wl->mutex);
2817 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2818 struct ieee80211_vif *vif,
2819 enum nl80211_iftype new_type, bool p2p)
2821 struct wl1271 *wl = hw->priv;
2824 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2825 wl1271_op_remove_interface(hw, vif);
2827 vif->type = new_type;
2829 ret = wl1271_op_add_interface(hw, vif);
2831 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2835 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2838 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2841 * One of the side effects of the JOIN command is that is clears
2842 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2843 * to a WPA/WPA2 access point will therefore kill the data-path.
2844 * Currently the only valid scenario for JOIN during association
2845 * is on roaming, in which case we will also be given new keys.
2846 * Keep the below message for now, unless it starts bothering
2847 * users who really like to roam a lot :)
2849 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2850 wl1271_info("JOIN while associated.");
2852 /* clear encryption type */
2853 wlvif->encryption_type = KEY_NONE;
2856 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2858 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2860 * TODO: this is an ugly workaround for wl12xx fw
2861 * bug - we are not able to tx/rx after the first
2862 * start_sta, so make dummy start+stop calls,
2863 * and then call start_sta again.
2864 * this should be fixed in the fw.
2866 wl12xx_cmd_role_start_sta(wl, wlvif);
2867 wl12xx_cmd_role_stop_sta(wl, wlvif);
2870 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2876 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2880 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2884 wl1271_error("No SSID in IEs!");
2889 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2890 wl1271_error("SSID is too long!");
2894 wlvif->ssid_len = ssid_len;
2895 memcpy(wlvif->ssid, ptr+2, ssid_len);
2899 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2901 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2902 struct sk_buff *skb;
2905 /* we currently only support setting the ssid from the ap probe req */
2906 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2909 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2913 ieoffset = offsetof(struct ieee80211_mgmt,
2914 u.probe_req.variable);
2915 wl1271_ssid_set(wlvif, skb, ieoffset);
2921 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2922 struct ieee80211_bss_conf *bss_conf,
2928 wlvif->aid = bss_conf->aid;
2929 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2930 wlvif->beacon_int = bss_conf->beacon_int;
2931 wlvif->wmm_enabled = bss_conf->qos;
2933 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2936 * with wl1271, we don't need to update the
2937 * beacon_int and dtim_period, because the firmware
2938 * updates it by itself when the first beacon is
2939 * received after a join.
2941 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2946 * Get a template for hardware connection maintenance
2948 dev_kfree_skb(wlvif->probereq);
2949 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2952 ieoffset = offsetof(struct ieee80211_mgmt,
2953 u.probe_req.variable);
2954 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2956 /* enable the connection monitoring feature */
2957 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2962 * The join command disable the keep-alive mode, shut down its process,
2963 * and also clear the template config, so we need to reset it all after
2964 * the join. The acx_aid starts the keep-alive process, and the order
2965 * of the commands below is relevant.
2967 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2971 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2975 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2979 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2980 wlvif->sta.klv_template_id,
2981 ACX_KEEP_ALIVE_TPL_VALID);
2986 * The default fw psm configuration is AUTO, while mac80211 default
2987 * setting is off (ACTIVE), so sync the fw with the correct value.
2989 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2995 wl1271_tx_enabled_rates_get(wl,
2998 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3006 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3009 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3011 /* make sure we are connected (sta) joined */
3013 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3016 /* make sure we are joined (ibss) */
3018 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3022 /* use defaults when not associated */
3025 /* free probe-request template */
3026 dev_kfree_skb(wlvif->probereq);
3027 wlvif->probereq = NULL;
3029 /* disable connection monitor features */
3030 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3034 /* Disable the keep-alive feature */
3035 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3039 /* disable beacon filtering */
3040 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3045 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3046 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3048 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3049 ieee80211_chswitch_done(vif, false);
3050 cancel_delayed_work(&wlvif->channel_switch_work);
3053 /* invalidate keep-alive template */
3054 wl1271_acx_keep_alive_config(wl, wlvif,
3055 wlvif->sta.klv_template_id,
3056 ACX_KEEP_ALIVE_TPL_INVALID);
3061 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3063 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3064 wlvif->rate_set = wlvif->basic_rate_set;
3067 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3070 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3072 if (idle == cur_idle)
3076 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3078 /* The current firmware only supports sched_scan in idle */
3079 if (wl->sched_vif == wlvif)
3080 wl->ops->sched_scan_stop(wl, wlvif);
3082 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3086 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087 struct ieee80211_conf *conf, u32 changed)
3091 if (conf->power_level != wlvif->power_level) {
3092 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3096 wlvif->power_level = conf->power_level;
3102 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3104 struct wl1271 *wl = hw->priv;
3105 struct wl12xx_vif *wlvif;
3106 struct ieee80211_conf *conf = &hw->conf;
3109 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3111 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3113 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3116 mutex_lock(&wl->mutex);
3118 if (changed & IEEE80211_CONF_CHANGE_POWER)
3119 wl->power_level = conf->power_level;
3121 if (unlikely(wl->state != WLCORE_STATE_ON))
3124 ret = wl1271_ps_elp_wakeup(wl);
3128 /* configure each interface */
3129 wl12xx_for_each_wlvif(wl, wlvif) {
3130 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3136 wl1271_ps_elp_sleep(wl);
3139 mutex_unlock(&wl->mutex);
3144 struct wl1271_filter_params {
3147 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3150 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3151 struct netdev_hw_addr_list *mc_list)
3153 struct wl1271_filter_params *fp;
3154 struct netdev_hw_addr *ha;
3156 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3158 wl1271_error("Out of memory setting filters.");
3162 /* update multicast filtering parameters */
3163 fp->mc_list_length = 0;
3164 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3165 fp->enabled = false;
3168 netdev_hw_addr_list_for_each(ha, mc_list) {
3169 memcpy(fp->mc_list[fp->mc_list_length],
3170 ha->addr, ETH_ALEN);
3171 fp->mc_list_length++;
3175 return (u64)(unsigned long)fp;
3178 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3181 FIF_BCN_PRBRESP_PROMISC | \
3185 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3186 unsigned int changed,
3187 unsigned int *total, u64 multicast)
3189 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3190 struct wl1271 *wl = hw->priv;
3191 struct wl12xx_vif *wlvif;
3195 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3196 " total %x", changed, *total);
3198 mutex_lock(&wl->mutex);
3200 *total &= WL1271_SUPPORTED_FILTERS;
3201 changed &= WL1271_SUPPORTED_FILTERS;
3203 if (unlikely(wl->state != WLCORE_STATE_ON))
3206 ret = wl1271_ps_elp_wakeup(wl);
3210 wl12xx_for_each_wlvif(wl, wlvif) {
3211 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3212 if (*total & FIF_ALLMULTI)
3213 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3217 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3220 fp->mc_list_length);
3227 * the fw doesn't provide an api to configure the filters. instead,
3228 * the filters configuration is based on the active roles / ROC
3233 wl1271_ps_elp_sleep(wl);
3236 mutex_unlock(&wl->mutex);
3240 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3241 u8 id, u8 key_type, u8 key_size,
3242 const u8 *key, u8 hlid, u32 tx_seq_32,
3245 struct wl1271_ap_key *ap_key;
3248 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3250 if (key_size > MAX_KEY_SIZE)
3254 * Find next free entry in ap_keys. Also check we are not replacing
3257 for (i = 0; i < MAX_NUM_KEYS; i++) {
3258 if (wlvif->ap.recorded_keys[i] == NULL)
3261 if (wlvif->ap.recorded_keys[i]->id == id) {
3262 wl1271_warning("trying to record key replacement");
3267 if (i == MAX_NUM_KEYS)
3270 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3275 ap_key->key_type = key_type;
3276 ap_key->key_size = key_size;
3277 memcpy(ap_key->key, key, key_size);
3278 ap_key->hlid = hlid;
3279 ap_key->tx_seq_32 = tx_seq_32;
3280 ap_key->tx_seq_16 = tx_seq_16;
3282 wlvif->ap.recorded_keys[i] = ap_key;
3286 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3290 for (i = 0; i < MAX_NUM_KEYS; i++) {
3291 kfree(wlvif->ap.recorded_keys[i]);
3292 wlvif->ap.recorded_keys[i] = NULL;
3296 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3299 struct wl1271_ap_key *key;
3300 bool wep_key_added = false;
3302 for (i = 0; i < MAX_NUM_KEYS; i++) {
3304 if (wlvif->ap.recorded_keys[i] == NULL)
3307 key = wlvif->ap.recorded_keys[i];
3309 if (hlid == WL12XX_INVALID_LINK_ID)
3310 hlid = wlvif->ap.bcast_hlid;
3312 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3313 key->id, key->key_type,
3314 key->key_size, key->key,
3315 hlid, key->tx_seq_32,
3320 if (key->key_type == KEY_WEP)
3321 wep_key_added = true;
3324 if (wep_key_added) {
3325 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3326 wlvif->ap.bcast_hlid);
3332 wl1271_free_ap_keys(wl, wlvif);
3336 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3337 u16 action, u8 id, u8 key_type,
3338 u8 key_size, const u8 *key, u32 tx_seq_32,
3339 u16 tx_seq_16, struct ieee80211_sta *sta)
3342 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3345 struct wl1271_station *wl_sta;
3349 wl_sta = (struct wl1271_station *)sta->drv_priv;
3350 hlid = wl_sta->hlid;
3352 hlid = wlvif->ap.bcast_hlid;
3355 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3357 * We do not support removing keys after AP shutdown.
3358 * Pretend we do to make mac80211 happy.
3360 if (action != KEY_ADD_OR_REPLACE)
3363 ret = wl1271_record_ap_key(wl, wlvif, id,
3365 key, hlid, tx_seq_32,
3368 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3369 id, key_type, key_size,
3370 key, hlid, tx_seq_32,
3378 static const u8 bcast_addr[ETH_ALEN] = {
3379 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3382 addr = sta ? sta->addr : bcast_addr;
3384 if (is_zero_ether_addr(addr)) {
3385 /* We dont support TX only encryption */
3389 /* The wl1271 does not allow to remove unicast keys - they
3390 will be cleared automatically on next CMD_JOIN. Ignore the
3391 request silently, as we dont want the mac80211 to emit
3392 an error message. */
3393 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3396 /* don't remove key if hlid was already deleted */
3397 if (action == KEY_REMOVE &&
3398 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3401 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3402 id, key_type, key_size,
3403 key, addr, tx_seq_32,
3413 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3414 struct ieee80211_vif *vif,
3415 struct ieee80211_sta *sta,
3416 struct ieee80211_key_conf *key_conf)
3418 struct wl1271 *wl = hw->priv;
3420 bool might_change_spare =
3421 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3422 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3424 if (might_change_spare) {
3426 * stop the queues and flush to ensure the next packets are
3427 * in sync with FW spare block accounting
3429 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3430 wl1271_tx_flush(wl);
3433 mutex_lock(&wl->mutex);
3435 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3437 goto out_wake_queues;
3440 ret = wl1271_ps_elp_wakeup(wl);
3442 goto out_wake_queues;
3444 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3446 wl1271_ps_elp_sleep(wl);
3449 if (might_change_spare)
3450 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3452 mutex_unlock(&wl->mutex);
3457 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3458 struct ieee80211_vif *vif,
3459 struct ieee80211_sta *sta,
3460 struct ieee80211_key_conf *key_conf)
3462 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3469 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3471 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3472 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3473 key_conf->cipher, key_conf->keyidx,
3474 key_conf->keylen, key_conf->flags);
3475 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3477 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3479 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3480 hlid = wl_sta->hlid;
3482 hlid = wlvif->ap.bcast_hlid;
3485 hlid = wlvif->sta.hlid;
3487 if (hlid != WL12XX_INVALID_LINK_ID) {
3488 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3489 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3490 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3493 switch (key_conf->cipher) {
3494 case WLAN_CIPHER_SUITE_WEP40:
3495 case WLAN_CIPHER_SUITE_WEP104:
3498 key_conf->hw_key_idx = key_conf->keyidx;
3500 case WLAN_CIPHER_SUITE_TKIP:
3501 key_type = KEY_TKIP;
3502 key_conf->hw_key_idx = key_conf->keyidx;
3504 case WLAN_CIPHER_SUITE_CCMP:
3506 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3508 case WL1271_CIPHER_SUITE_GEM:
3512 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3519 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3520 key_conf->keyidx, key_type,
3521 key_conf->keylen, key_conf->key,
3522 tx_seq_32, tx_seq_16, sta);
3524 wl1271_error("Could not add or replace key");
3529 * reconfiguring arp response if the unicast (or common)
3530 * encryption key type was changed
3532 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3533 (sta || key_type == KEY_WEP) &&
3534 wlvif->encryption_type != key_type) {
3535 wlvif->encryption_type = key_type;
3536 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3538 wl1271_warning("build arp rsp failed: %d", ret);
3545 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3546 key_conf->keyidx, key_type,
3547 key_conf->keylen, key_conf->key,
3550 wl1271_error("Could not remove key");
3556 wl1271_error("Unsupported key cmd 0x%x", cmd);
3562 EXPORT_SYMBOL_GPL(wlcore_set_key);
3564 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3565 struct ieee80211_vif *vif,
3568 struct wl1271 *wl = hw->priv;
3569 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3572 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3575 /* we don't handle unsetting of default key */
3579 mutex_lock(&wl->mutex);
3581 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3586 ret = wl1271_ps_elp_wakeup(wl);
3590 wlvif->default_key = key_idx;
3592 /* the default WEP key needs to be configured at least once */
3593 if (wlvif->encryption_type == KEY_WEP) {
3594 ret = wl12xx_cmd_set_default_wep_key(wl,
3602 wl1271_ps_elp_sleep(wl);
3605 mutex_unlock(&wl->mutex);
3608 void wlcore_regdomain_config(struct wl1271 *wl)
3612 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3615 mutex_lock(&wl->mutex);
3617 if (unlikely(wl->state != WLCORE_STATE_ON))
3620 ret = wl1271_ps_elp_wakeup(wl);
3624 ret = wlcore_cmd_regdomain_config_locked(wl);
3626 wl12xx_queue_recovery_work(wl);
3630 wl1271_ps_elp_sleep(wl);
3632 mutex_unlock(&wl->mutex);
3635 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3636 struct ieee80211_vif *vif,
3637 struct ieee80211_scan_request *hw_req)
3639 struct cfg80211_scan_request *req = &hw_req->req;
3640 struct wl1271 *wl = hw->priv;
3645 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3648 ssid = req->ssids[0].ssid;
3649 len = req->ssids[0].ssid_len;
3652 mutex_lock(&wl->mutex);
3654 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3656 * We cannot return -EBUSY here because cfg80211 will expect
3657 * a call to ieee80211_scan_completed if we do - in this case
3658 * there won't be any call.
3664 ret = wl1271_ps_elp_wakeup(wl);
3668 /* fail if there is any role in ROC */
3669 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3670 /* don't allow scanning right now */
3675 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3677 wl1271_ps_elp_sleep(wl);
3679 mutex_unlock(&wl->mutex);
3684 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3685 struct ieee80211_vif *vif)
3687 struct wl1271 *wl = hw->priv;
3688 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3691 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3693 mutex_lock(&wl->mutex);
3695 if (unlikely(wl->state != WLCORE_STATE_ON))
3698 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3701 ret = wl1271_ps_elp_wakeup(wl);
3705 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3706 ret = wl->ops->scan_stop(wl, wlvif);
3712 * Rearm the tx watchdog just before idling scan. This
3713 * prevents just-finished scans from triggering the watchdog
3715 wl12xx_rearm_tx_watchdog_locked(wl);
3717 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3718 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3719 wl->scan_wlvif = NULL;
3720 wl->scan.req = NULL;
3721 ieee80211_scan_completed(wl->hw, true);
3724 wl1271_ps_elp_sleep(wl);
3726 mutex_unlock(&wl->mutex);
3728 cancel_delayed_work_sync(&wl->scan_complete_work);
3731 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3732 struct ieee80211_vif *vif,
3733 struct cfg80211_sched_scan_request *req,
3734 struct ieee80211_scan_ies *ies)
3736 struct wl1271 *wl = hw->priv;
3737 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3740 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3742 mutex_lock(&wl->mutex);
3744 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3749 ret = wl1271_ps_elp_wakeup(wl);
3753 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3757 wl->sched_vif = wlvif;
3760 wl1271_ps_elp_sleep(wl);
3762 mutex_unlock(&wl->mutex);
3766 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3767 struct ieee80211_vif *vif)
3769 struct wl1271 *wl = hw->priv;
3770 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3773 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3775 mutex_lock(&wl->mutex);
3777 if (unlikely(wl->state != WLCORE_STATE_ON))
3780 ret = wl1271_ps_elp_wakeup(wl);
3784 wl->ops->sched_scan_stop(wl, wlvif);
3786 wl1271_ps_elp_sleep(wl);
3788 mutex_unlock(&wl->mutex);
3793 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3795 struct wl1271 *wl = hw->priv;
3798 mutex_lock(&wl->mutex);
3800 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3805 ret = wl1271_ps_elp_wakeup(wl);
3809 ret = wl1271_acx_frag_threshold(wl, value);
3811 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3813 wl1271_ps_elp_sleep(wl);
3816 mutex_unlock(&wl->mutex);
3821 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3823 struct wl1271 *wl = hw->priv;
3824 struct wl12xx_vif *wlvif;
3827 mutex_lock(&wl->mutex);
3829 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3834 ret = wl1271_ps_elp_wakeup(wl);
3838 wl12xx_for_each_wlvif(wl, wlvif) {
3839 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3841 wl1271_warning("set rts threshold failed: %d", ret);
3843 wl1271_ps_elp_sleep(wl);
3846 mutex_unlock(&wl->mutex);
3851 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3854 const u8 *next, *end = skb->data + skb->len;
3855 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3856 skb->len - ieoffset);
3861 memmove(ie, next, end - next);
3862 skb_trim(skb, skb->len - len);
3865 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3866 unsigned int oui, u8 oui_type,
3870 const u8 *next, *end = skb->data + skb->len;
3871 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3872 skb->data + ieoffset,
3873 skb->len - ieoffset);
3878 memmove(ie, next, end - next);
3879 skb_trim(skb, skb->len - len);
3882 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3883 struct ieee80211_vif *vif)
3885 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3886 struct sk_buff *skb;
3889 skb = ieee80211_proberesp_get(wl->hw, vif);
3893 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3894 CMD_TEMPL_AP_PROBE_RESPONSE,
3903 wl1271_debug(DEBUG_AP, "probe response updated");
3904 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3910 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3911 struct ieee80211_vif *vif,
3913 size_t probe_rsp_len,
3916 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3917 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3918 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3919 int ssid_ie_offset, ie_offset, templ_len;
3922 /* no need to change probe response if the SSID is set correctly */
3923 if (wlvif->ssid_len > 0)
3924 return wl1271_cmd_template_set(wl, wlvif->role_id,
3925 CMD_TEMPL_AP_PROBE_RESPONSE,
3930 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3931 wl1271_error("probe_rsp template too big");
3935 /* start searching from IE offset */
3936 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3938 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3939 probe_rsp_len - ie_offset);
3941 wl1271_error("No SSID in beacon!");
3945 ssid_ie_offset = ptr - probe_rsp_data;
3946 ptr += (ptr[1] + 2);
3948 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3950 /* insert SSID from bss_conf */
3951 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3952 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3953 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3954 bss_conf->ssid, bss_conf->ssid_len);
3955 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3957 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3958 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3959 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3961 return wl1271_cmd_template_set(wl, wlvif->role_id,
3962 CMD_TEMPL_AP_PROBE_RESPONSE,
3968 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3969 struct ieee80211_vif *vif,
3970 struct ieee80211_bss_conf *bss_conf,
3973 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3976 if (changed & BSS_CHANGED_ERP_SLOT) {
3977 if (bss_conf->use_short_slot)
3978 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3980 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3982 wl1271_warning("Set slot time failed %d", ret);
3987 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3988 if (bss_conf->use_short_preamble)
3989 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3991 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3994 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3995 if (bss_conf->use_cts_prot)
3996 ret = wl1271_acx_cts_protect(wl, wlvif,
3999 ret = wl1271_acx_cts_protect(wl, wlvif,
4000 CTSPROTECT_DISABLE);
4002 wl1271_warning("Set ctsprotect failed %d", ret);
4011 static int wlcore_set_beacon_template(struct wl1271 *wl,
4012 struct ieee80211_vif *vif,
4015 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4016 struct ieee80211_hdr *hdr;
4019 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4020 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4028 wl1271_debug(DEBUG_MASTER, "beacon updated");
4030 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4032 dev_kfree_skb(beacon);
4035 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4036 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4038 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4043 dev_kfree_skb(beacon);
4047 wlvif->wmm_enabled =
4048 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4049 WLAN_OUI_TYPE_MICROSOFT_WMM,
4050 beacon->data + ieoffset,
4051 beacon->len - ieoffset);
4054 * In case we already have a probe-resp beacon set explicitly
4055 * by usermode, don't use the beacon data.
4057 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4060 /* remove TIM ie from probe response */
4061 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4064 * remove p2p ie from probe response.
4065 * the fw reponds to probe requests that don't include
4066 * the p2p ie. probe requests with p2p ie will be passed,
4067 * and will be responded by the supplicant (the spec
4068 * forbids including the p2p ie when responding to probe
4069 * requests that didn't include it).
4071 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4072 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4074 hdr = (struct ieee80211_hdr *) beacon->data;
4075 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4076 IEEE80211_STYPE_PROBE_RESP);
4078 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4083 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4084 CMD_TEMPL_PROBE_RESPONSE,
4089 dev_kfree_skb(beacon);
4097 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4098 struct ieee80211_vif *vif,
4099 struct ieee80211_bss_conf *bss_conf,
4102 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4103 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4106 if (changed & BSS_CHANGED_BEACON_INT) {
4107 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4108 bss_conf->beacon_int);
4110 wlvif->beacon_int = bss_conf->beacon_int;
4113 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4114 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4116 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4119 if (changed & BSS_CHANGED_BEACON) {
4120 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4124 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4126 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4133 wl1271_error("beacon info change failed: %d", ret);
4137 /* AP mode changes */
4138 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4139 struct ieee80211_vif *vif,
4140 struct ieee80211_bss_conf *bss_conf,
4143 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4146 if (changed & BSS_CHANGED_BASIC_RATES) {
4147 u32 rates = bss_conf->basic_rates;
4149 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4151 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4152 wlvif->basic_rate_set);
4154 ret = wl1271_init_ap_rates(wl, wlvif);
4156 wl1271_error("AP rate policy change failed %d", ret);
4160 ret = wl1271_ap_init_templates(wl, vif);
4164 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4168 ret = wlcore_set_beacon_template(wl, vif, true);
4173 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4177 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4178 if (bss_conf->enable_beacon) {
4179 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4180 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4184 ret = wl1271_ap_init_hwenc(wl, wlvif);
4188 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4189 wl1271_debug(DEBUG_AP, "started AP");
4192 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4194 * AP might be in ROC in case we have just
4195 * sent auth reply. handle it.
4197 if (test_bit(wlvif->role_id, wl->roc_map))
4198 wl12xx_croc(wl, wlvif->role_id);
4200 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4204 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4205 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4207 wl1271_debug(DEBUG_AP, "stopped AP");
4212 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4216 /* Handle HT information change */
4217 if ((changed & BSS_CHANGED_HT) &&
4218 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4219 ret = wl1271_acx_set_ht_information(wl, wlvif,
4220 bss_conf->ht_operation_mode);
4222 wl1271_warning("Set ht information failed %d", ret);
4231 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4232 struct ieee80211_bss_conf *bss_conf,
4238 wl1271_debug(DEBUG_MAC80211,
4239 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4240 bss_conf->bssid, bss_conf->aid,
4241 bss_conf->beacon_int,
4242 bss_conf->basic_rates, sta_rate_set);
4244 wlvif->beacon_int = bss_conf->beacon_int;
4245 rates = bss_conf->basic_rates;
4246 wlvif->basic_rate_set =
4247 wl1271_tx_enabled_rates_get(wl, rates,
4250 wl1271_tx_min_rate_get(wl,
4251 wlvif->basic_rate_set);
4255 wl1271_tx_enabled_rates_get(wl,
4259 /* we only support sched_scan while not connected */
4260 if (wl->sched_vif == wlvif)
4261 wl->ops->sched_scan_stop(wl, wlvif);
4263 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4267 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4271 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4275 wlcore_set_ssid(wl, wlvif);
4277 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4282 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4286 /* revert back to minimum rates for the current band */
4287 wl1271_set_band_rate(wl, wlvif);
4288 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4290 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4294 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4295 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4296 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4301 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4304 /* STA/IBSS mode changes */
4305 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4306 struct ieee80211_vif *vif,
4307 struct ieee80211_bss_conf *bss_conf,
4310 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4311 bool do_join = false;
4312 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4313 bool ibss_joined = false;
4314 u32 sta_rate_set = 0;
4316 struct ieee80211_sta *sta;
4317 bool sta_exists = false;
4318 struct ieee80211_sta_ht_cap sta_ht_cap;
4321 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4327 if (changed & BSS_CHANGED_IBSS) {
4328 if (bss_conf->ibss_joined) {
4329 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4332 wlcore_unset_assoc(wl, wlvif);
4333 wl12xx_cmd_role_stop_sta(wl, wlvif);
4337 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4340 /* Need to update the SSID (for filtering etc) */
4341 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4344 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4345 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4346 bss_conf->enable_beacon ? "enabled" : "disabled");
4351 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4352 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4354 if (changed & BSS_CHANGED_CQM) {
4355 bool enable = false;
4356 if (bss_conf->cqm_rssi_thold)
4358 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4359 bss_conf->cqm_rssi_thold,
4360 bss_conf->cqm_rssi_hyst);
4363 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4366 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4367 BSS_CHANGED_ASSOC)) {
4369 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4371 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4373 /* save the supp_rates of the ap */
4374 sta_rate_set = sta->supp_rates[wlvif->band];
4375 if (sta->ht_cap.ht_supported)
4377 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4378 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4379 sta_ht_cap = sta->ht_cap;
4386 if (changed & BSS_CHANGED_BSSID) {
4387 if (!is_zero_ether_addr(bss_conf->bssid)) {
4388 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4393 /* Need to update the BSSID (for filtering etc) */
4396 ret = wlcore_clear_bssid(wl, wlvif);
4402 if (changed & BSS_CHANGED_IBSS) {
4403 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4404 bss_conf->ibss_joined);
4406 if (bss_conf->ibss_joined) {
4407 u32 rates = bss_conf->basic_rates;
4408 wlvif->basic_rate_set =
4409 wl1271_tx_enabled_rates_get(wl, rates,
4412 wl1271_tx_min_rate_get(wl,
4413 wlvif->basic_rate_set);
4415 /* by default, use 11b + OFDM rates */
4416 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4417 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4423 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4424 /* enable beacon filtering */
4425 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4430 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4435 ret = wlcore_join(wl, wlvif);
4437 wl1271_warning("cmd join failed %d", ret);
4442 if (changed & BSS_CHANGED_ASSOC) {
4443 if (bss_conf->assoc) {
4444 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4449 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4450 wl12xx_set_authorized(wl, wlvif);
4452 wlcore_unset_assoc(wl, wlvif);
4456 if (changed & BSS_CHANGED_PS) {
4457 if ((bss_conf->ps) &&
4458 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4459 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4463 if (wl->conf.conn.forced_ps) {
4464 ps_mode = STATION_POWER_SAVE_MODE;
4465 ps_mode_str = "forced";
4467 ps_mode = STATION_AUTO_PS_MODE;
4468 ps_mode_str = "auto";
4471 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4473 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4475 wl1271_warning("enter %s ps failed %d",
4477 } else if (!bss_conf->ps &&
4478 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4479 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4481 ret = wl1271_ps_set_mode(wl, wlvif,
4482 STATION_ACTIVE_MODE);
4484 wl1271_warning("exit auto ps failed %d", ret);
4488 /* Handle new association with HT. Do this after join. */
4491 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4493 ret = wlcore_hw_set_peer_cap(wl,
4499 wl1271_warning("Set ht cap failed %d", ret);
4505 ret = wl1271_acx_set_ht_information(wl, wlvif,
4506 bss_conf->ht_operation_mode);
4508 wl1271_warning("Set ht information failed %d",
4515 /* Handle arp filtering. Done after join. */
4516 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4517 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4518 __be32 addr = bss_conf->arp_addr_list[0];
4519 wlvif->sta.qos = bss_conf->qos;
4520 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4522 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4523 wlvif->ip_addr = addr;
4525 * The template should have been configured only upon
4526 * association. however, it seems that the correct ip
4527 * isn't being set (when sending), so we have to
4528 * reconfigure the template upon every ip change.
4530 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4532 wl1271_warning("build arp rsp failed: %d", ret);
4536 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4537 (ACX_ARP_FILTER_ARP_FILTERING |
4538 ACX_ARP_FILTER_AUTO_ARP),
4542 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4553 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4554 struct ieee80211_vif *vif,
4555 struct ieee80211_bss_conf *bss_conf,
4558 struct wl1271 *wl = hw->priv;
4559 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4560 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4563 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4564 wlvif->role_id, (int)changed);
4567 * make sure to cancel pending disconnections if our association
4570 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4571 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4573 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4574 !bss_conf->enable_beacon)
4575 wl1271_tx_flush(wl);
4577 mutex_lock(&wl->mutex);
4579 if (unlikely(wl->state != WLCORE_STATE_ON))
4582 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4585 ret = wl1271_ps_elp_wakeup(wl);
4589 if ((changed & BSS_CHANGED_TXPOWER) &&
4590 bss_conf->txpower != wlvif->power_level) {
4592 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4596 wlvif->power_level = bss_conf->txpower;
4600 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4602 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4604 wl1271_ps_elp_sleep(wl);
4607 mutex_unlock(&wl->mutex);
4610 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4611 struct ieee80211_chanctx_conf *ctx)
4613 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4614 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4615 cfg80211_get_chandef_type(&ctx->def));
4619 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4620 struct ieee80211_chanctx_conf *ctx)
4622 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4623 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4624 cfg80211_get_chandef_type(&ctx->def));
4627 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4628 struct ieee80211_chanctx_conf *ctx,
4631 struct wl1271 *wl = hw->priv;
4632 struct wl12xx_vif *wlvif;
4634 int channel = ieee80211_frequency_to_channel(
4635 ctx->def.chan->center_freq);
4637 wl1271_debug(DEBUG_MAC80211,
4638 "mac80211 change chanctx %d (type %d) changed 0x%x",
4639 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4641 mutex_lock(&wl->mutex);
4643 ret = wl1271_ps_elp_wakeup(wl);
4647 wl12xx_for_each_wlvif(wl, wlvif) {
4648 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4651 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4657 /* start radar if needed */
4658 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4659 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4660 ctx->radar_enabled && !wlvif->radar_enabled &&
4661 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4662 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4663 wlcore_hw_set_cac(wl, wlvif, true);
4664 wlvif->radar_enabled = true;
4668 wl1271_ps_elp_sleep(wl);
4670 mutex_unlock(&wl->mutex);
4673 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4674 struct ieee80211_vif *vif,
4675 struct ieee80211_chanctx_conf *ctx)
4677 struct wl1271 *wl = hw->priv;
4678 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4679 int channel = ieee80211_frequency_to_channel(
4680 ctx->def.chan->center_freq);
4683 wl1271_debug(DEBUG_MAC80211,
4684 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4685 wlvif->role_id, channel,
4686 cfg80211_get_chandef_type(&ctx->def),
4687 ctx->radar_enabled, ctx->def.chan->dfs_state);
4689 mutex_lock(&wl->mutex);
4691 if (unlikely(wl->state != WLCORE_STATE_ON))
4694 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4697 ret = wl1271_ps_elp_wakeup(wl);
4701 wlvif->band = ctx->def.chan->band;
4702 wlvif->channel = channel;
4703 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4705 /* update default rates according to the band */
4706 wl1271_set_band_rate(wl, wlvif);
4708 if (ctx->radar_enabled &&
4709 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4710 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4711 wlcore_hw_set_cac(wl, wlvif, true);
4712 wlvif->radar_enabled = true;
4715 wl1271_ps_elp_sleep(wl);
4717 mutex_unlock(&wl->mutex);
4722 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4723 struct ieee80211_vif *vif,
4724 struct ieee80211_chanctx_conf *ctx)
4726 struct wl1271 *wl = hw->priv;
4727 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4730 wl1271_debug(DEBUG_MAC80211,
4731 "mac80211 unassign chanctx (role %d) %d (type %d)",
4733 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4734 cfg80211_get_chandef_type(&ctx->def));
4736 wl1271_tx_flush(wl);
4738 mutex_lock(&wl->mutex);
4740 if (unlikely(wl->state != WLCORE_STATE_ON))
4743 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4746 ret = wl1271_ps_elp_wakeup(wl);
4750 if (wlvif->radar_enabled) {
4751 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4752 wlcore_hw_set_cac(wl, wlvif, false);
4753 wlvif->radar_enabled = false;
4756 wl1271_ps_elp_sleep(wl);
4758 mutex_unlock(&wl->mutex);
4761 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4762 struct wl12xx_vif *wlvif,
4763 struct ieee80211_chanctx_conf *new_ctx)
4765 int channel = ieee80211_frequency_to_channel(
4766 new_ctx->def.chan->center_freq);
4768 wl1271_debug(DEBUG_MAC80211,
4769 "switch vif (role %d) %d -> %d chan_type: %d",
4770 wlvif->role_id, wlvif->channel, channel,
4771 cfg80211_get_chandef_type(&new_ctx->def));
4773 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4776 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4778 if (wlvif->radar_enabled) {
4779 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4780 wlcore_hw_set_cac(wl, wlvif, false);
4781 wlvif->radar_enabled = false;
4784 wlvif->band = new_ctx->def.chan->band;
4785 wlvif->channel = channel;
4786 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4788 /* start radar if needed */
4789 if (new_ctx->radar_enabled) {
4790 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4791 wlcore_hw_set_cac(wl, wlvif, true);
4792 wlvif->radar_enabled = true;
4799 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4800 struct ieee80211_vif_chanctx_switch *vifs,
4802 enum ieee80211_chanctx_switch_mode mode)
4804 struct wl1271 *wl = hw->priv;
4807 wl1271_debug(DEBUG_MAC80211,
4808 "mac80211 switch chanctx n_vifs %d mode %d",
4811 mutex_lock(&wl->mutex);
4813 ret = wl1271_ps_elp_wakeup(wl);
4817 for (i = 0; i < n_vifs; i++) {
4818 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4820 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4825 wl1271_ps_elp_sleep(wl);
4827 mutex_unlock(&wl->mutex);
4832 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4833 struct ieee80211_vif *vif, u16 queue,
4834 const struct ieee80211_tx_queue_params *params)
4836 struct wl1271 *wl = hw->priv;
4837 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4841 mutex_lock(&wl->mutex);
4843 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4846 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4848 ps_scheme = CONF_PS_SCHEME_LEGACY;
4850 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4853 ret = wl1271_ps_elp_wakeup(wl);
4858 * the txop is confed in units of 32us by the mac80211,
4861 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4862 params->cw_min, params->cw_max,
4863 params->aifs, params->txop << 5);
4867 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4868 CONF_CHANNEL_TYPE_EDCF,
4869 wl1271_tx_get_queue(queue),
4870 ps_scheme, CONF_ACK_POLICY_LEGACY,
4874 wl1271_ps_elp_sleep(wl);
4877 mutex_unlock(&wl->mutex);
4882 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4883 struct ieee80211_vif *vif)
4886 struct wl1271 *wl = hw->priv;
4887 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4888 u64 mactime = ULLONG_MAX;
4891 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4893 mutex_lock(&wl->mutex);
4895 if (unlikely(wl->state != WLCORE_STATE_ON))
4898 ret = wl1271_ps_elp_wakeup(wl);
4902 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4907 wl1271_ps_elp_sleep(wl);
4910 mutex_unlock(&wl->mutex);
4914 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4915 struct survey_info *survey)
4917 struct ieee80211_conf *conf = &hw->conf;
4922 survey->channel = conf->chandef.chan;
4927 static int wl1271_allocate_sta(struct wl1271 *wl,
4928 struct wl12xx_vif *wlvif,
4929 struct ieee80211_sta *sta)
4931 struct wl1271_station *wl_sta;
4935 if (wl->active_sta_count >= wl->max_ap_stations) {
4936 wl1271_warning("could not allocate HLID - too much stations");
4940 wl_sta = (struct wl1271_station *)sta->drv_priv;
4941 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4943 wl1271_warning("could not allocate HLID - too many links");
4947 /* use the previous security seq, if this is a recovery/resume */
4948 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4950 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4951 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4952 wl->active_sta_count++;
4956 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4958 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4961 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4962 __clear_bit(hlid, &wl->ap_ps_map);
4963 __clear_bit(hlid, &wl->ap_fw_ps_map);
4966 * save the last used PN in the private part of iee80211_sta,
4967 * in case of recovery/suspend
4969 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4971 wl12xx_free_link(wl, wlvif, &hlid);
4972 wl->active_sta_count--;
4975 * rearm the tx watchdog when the last STA is freed - give the FW a
4976 * chance to return STA-buffered packets before complaining.
4978 if (wl->active_sta_count == 0)
4979 wl12xx_rearm_tx_watchdog_locked(wl);
4982 static int wl12xx_sta_add(struct wl1271 *wl,
4983 struct wl12xx_vif *wlvif,
4984 struct ieee80211_sta *sta)
4986 struct wl1271_station *wl_sta;
4990 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4992 ret = wl1271_allocate_sta(wl, wlvif, sta);
4996 wl_sta = (struct wl1271_station *)sta->drv_priv;
4997 hlid = wl_sta->hlid;
4999 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5001 wl1271_free_sta(wl, wlvif, hlid);
5006 static int wl12xx_sta_remove(struct wl1271 *wl,
5007 struct wl12xx_vif *wlvif,
5008 struct ieee80211_sta *sta)
5010 struct wl1271_station *wl_sta;
5013 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5015 wl_sta = (struct wl1271_station *)sta->drv_priv;
5017 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5020 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5024 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5028 static void wlcore_roc_if_possible(struct wl1271 *wl,
5029 struct wl12xx_vif *wlvif)
5031 if (find_first_bit(wl->roc_map,
5032 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5035 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5038 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5042 * when wl_sta is NULL, we treat this call as if coming from a
5043 * pending auth reply.
5044 * wl->mutex must be taken and the FW must be awake when the call
5047 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5048 struct wl1271_station *wl_sta, bool in_conn)
5051 if (WARN_ON(wl_sta && wl_sta->in_connection))
5054 if (!wlvif->ap_pending_auth_reply &&
5055 !wlvif->inconn_count)
5056 wlcore_roc_if_possible(wl, wlvif);
5059 wl_sta->in_connection = true;
5060 wlvif->inconn_count++;
5062 wlvif->ap_pending_auth_reply = true;
5065 if (wl_sta && !wl_sta->in_connection)
5068 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5071 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5075 wl_sta->in_connection = false;
5076 wlvif->inconn_count--;
5078 wlvif->ap_pending_auth_reply = false;
5081 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5082 test_bit(wlvif->role_id, wl->roc_map))
5083 wl12xx_croc(wl, wlvif->role_id);
5087 static int wl12xx_update_sta_state(struct wl1271 *wl,
5088 struct wl12xx_vif *wlvif,
5089 struct ieee80211_sta *sta,
5090 enum ieee80211_sta_state old_state,
5091 enum ieee80211_sta_state new_state)
5093 struct wl1271_station *wl_sta;
5094 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5095 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5098 wl_sta = (struct wl1271_station *)sta->drv_priv;
5100 /* Add station (AP mode) */
5102 old_state == IEEE80211_STA_NOTEXIST &&
5103 new_state == IEEE80211_STA_NONE) {
5104 ret = wl12xx_sta_add(wl, wlvif, sta);
5108 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5111 /* Remove station (AP mode) */
5113 old_state == IEEE80211_STA_NONE &&
5114 new_state == IEEE80211_STA_NOTEXIST) {
5116 wl12xx_sta_remove(wl, wlvif, sta);
5118 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5121 /* Authorize station (AP mode) */
5123 new_state == IEEE80211_STA_AUTHORIZED) {
5124 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5128 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5133 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5136 /* Authorize station */
5138 new_state == IEEE80211_STA_AUTHORIZED) {
5139 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5140 ret = wl12xx_set_authorized(wl, wlvif);
5146 old_state == IEEE80211_STA_AUTHORIZED &&
5147 new_state == IEEE80211_STA_ASSOC) {
5148 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5149 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5152 /* save seq number on disassoc (suspend) */
5154 old_state == IEEE80211_STA_ASSOC &&
5155 new_state == IEEE80211_STA_AUTH) {
5156 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5157 wlvif->total_freed_pkts = 0;
5160 /* restore seq number on assoc (resume) */
5162 old_state == IEEE80211_STA_AUTH &&
5163 new_state == IEEE80211_STA_ASSOC) {
5164 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5167 /* clear ROCs on failure or authorization */
5169 (new_state == IEEE80211_STA_AUTHORIZED ||
5170 new_state == IEEE80211_STA_NOTEXIST)) {
5171 if (test_bit(wlvif->role_id, wl->roc_map))
5172 wl12xx_croc(wl, wlvif->role_id);
5176 old_state == IEEE80211_STA_NOTEXIST &&
5177 new_state == IEEE80211_STA_NONE) {
5178 if (find_first_bit(wl->roc_map,
5179 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5180 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5181 wl12xx_roc(wl, wlvif, wlvif->role_id,
5182 wlvif->band, wlvif->channel);
5188 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5189 struct ieee80211_vif *vif,
5190 struct ieee80211_sta *sta,
5191 enum ieee80211_sta_state old_state,
5192 enum ieee80211_sta_state new_state)
5194 struct wl1271 *wl = hw->priv;
5195 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5198 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5199 sta->aid, old_state, new_state);
5201 mutex_lock(&wl->mutex);
5203 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5208 ret = wl1271_ps_elp_wakeup(wl);
5212 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5214 wl1271_ps_elp_sleep(wl);
5216 mutex_unlock(&wl->mutex);
5217 if (new_state < old_state)
5222 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5223 struct ieee80211_vif *vif,
5224 enum ieee80211_ampdu_mlme_action action,
5225 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5228 struct wl1271 *wl = hw->priv;
5229 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5231 u8 hlid, *ba_bitmap;
5233 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5236 /* sanity check - the fields in FW are only 8bits wide */
5237 if (WARN_ON(tid > 0xFF))
5240 mutex_lock(&wl->mutex);
5242 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5247 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5248 hlid = wlvif->sta.hlid;
5249 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5250 struct wl1271_station *wl_sta;
5252 wl_sta = (struct wl1271_station *)sta->drv_priv;
5253 hlid = wl_sta->hlid;
5259 ba_bitmap = &wl->links[hlid].ba_bitmap;
5261 ret = wl1271_ps_elp_wakeup(wl);
5265 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5269 case IEEE80211_AMPDU_RX_START:
5270 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5275 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5277 wl1271_error("exceeded max RX BA sessions");
5281 if (*ba_bitmap & BIT(tid)) {
5283 wl1271_error("cannot enable RX BA session on active "
5288 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5291 *ba_bitmap |= BIT(tid);
5292 wl->ba_rx_session_count++;
5296 case IEEE80211_AMPDU_RX_STOP:
5297 if (!(*ba_bitmap & BIT(tid))) {
5299 * this happens on reconfig - so only output a debug
5300 * message for now, and don't fail the function.
5302 wl1271_debug(DEBUG_MAC80211,
5303 "no active RX BA session on tid: %d",
5309 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5312 *ba_bitmap &= ~BIT(tid);
5313 wl->ba_rx_session_count--;
5318 * The BA initiator session management in FW independently.
5319 * Falling break here on purpose for all TX APDU commands.
5321 case IEEE80211_AMPDU_TX_START:
5322 case IEEE80211_AMPDU_TX_STOP_CONT:
5323 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5324 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5325 case IEEE80211_AMPDU_TX_OPERATIONAL:
5330 wl1271_error("Incorrect ampdu action id=%x\n", action);
5334 wl1271_ps_elp_sleep(wl);
5337 mutex_unlock(&wl->mutex);
5342 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5343 struct ieee80211_vif *vif,
5344 const struct cfg80211_bitrate_mask *mask)
5346 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5347 struct wl1271 *wl = hw->priv;
5350 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5351 mask->control[NL80211_BAND_2GHZ].legacy,
5352 mask->control[NL80211_BAND_5GHZ].legacy);
5354 mutex_lock(&wl->mutex);
5356 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5357 wlvif->bitrate_masks[i] =
5358 wl1271_tx_enabled_rates_get(wl,
5359 mask->control[i].legacy,
5362 if (unlikely(wl->state != WLCORE_STATE_ON))
5365 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5366 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5368 ret = wl1271_ps_elp_wakeup(wl);
5372 wl1271_set_band_rate(wl, wlvif);
5374 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5375 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5377 wl1271_ps_elp_sleep(wl);
5380 mutex_unlock(&wl->mutex);
5385 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5386 struct ieee80211_vif *vif,
5387 struct ieee80211_channel_switch *ch_switch)
5389 struct wl1271 *wl = hw->priv;
5390 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5393 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5395 wl1271_tx_flush(wl);
5397 mutex_lock(&wl->mutex);
5399 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5400 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5401 ieee80211_chswitch_done(vif, false);
5403 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5407 ret = wl1271_ps_elp_wakeup(wl);
5411 /* TODO: change mac80211 to pass vif as param */
5413 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5414 unsigned long delay_usec;
5416 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5420 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5422 /* indicate failure 5 seconds after channel switch time */
5423 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5425 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5426 usecs_to_jiffies(delay_usec) +
5427 msecs_to_jiffies(5000));
5431 wl1271_ps_elp_sleep(wl);
5434 mutex_unlock(&wl->mutex);
5437 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5438 struct wl12xx_vif *wlvif,
5441 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5442 struct sk_buff *beacon =
5443 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5448 return cfg80211_find_ie(eid,
5449 beacon->data + ieoffset,
5450 beacon->len - ieoffset);
5453 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5457 const struct ieee80211_channel_sw_ie *ie_csa;
5459 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5463 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5464 *csa_count = ie_csa->count;
5469 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5470 struct ieee80211_vif *vif,
5471 struct cfg80211_chan_def *chandef)
5473 struct wl1271 *wl = hw->priv;
5474 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5475 struct ieee80211_channel_switch ch_switch = {
5477 .chandef = *chandef,
5481 wl1271_debug(DEBUG_MAC80211,
5482 "mac80211 channel switch beacon (role %d)",
5485 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5487 wl1271_error("error getting beacon (for CSA counter)");
5491 mutex_lock(&wl->mutex);
5493 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5498 ret = wl1271_ps_elp_wakeup(wl);
5502 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5506 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5509 wl1271_ps_elp_sleep(wl);
5511 mutex_unlock(&wl->mutex);
5514 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5515 u32 queues, bool drop)
5517 struct wl1271 *wl = hw->priv;
5519 wl1271_tx_flush(wl);
5522 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5523 struct ieee80211_vif *vif,
5524 struct ieee80211_channel *chan,
5526 enum ieee80211_roc_type type)
5528 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5529 struct wl1271 *wl = hw->priv;
5530 int channel, ret = 0;
5532 channel = ieee80211_frequency_to_channel(chan->center_freq);
5534 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5535 channel, wlvif->role_id);
5537 mutex_lock(&wl->mutex);
5539 if (unlikely(wl->state != WLCORE_STATE_ON))
5542 /* return EBUSY if we can't ROC right now */
5543 if (WARN_ON(wl->roc_vif ||
5544 find_first_bit(wl->roc_map,
5545 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5550 ret = wl1271_ps_elp_wakeup(wl);
5554 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5559 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5560 msecs_to_jiffies(duration));
5562 wl1271_ps_elp_sleep(wl);
5564 mutex_unlock(&wl->mutex);
5568 static int __wlcore_roc_completed(struct wl1271 *wl)
5570 struct wl12xx_vif *wlvif;
5573 /* already completed */
5574 if (unlikely(!wl->roc_vif))
5577 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5579 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5582 ret = wl12xx_stop_dev(wl, wlvif);
5591 static int wlcore_roc_completed(struct wl1271 *wl)
5595 wl1271_debug(DEBUG_MAC80211, "roc complete");
5597 mutex_lock(&wl->mutex);
5599 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5604 ret = wl1271_ps_elp_wakeup(wl);
5608 ret = __wlcore_roc_completed(wl);
5610 wl1271_ps_elp_sleep(wl);
5612 mutex_unlock(&wl->mutex);
5617 static void wlcore_roc_complete_work(struct work_struct *work)
5619 struct delayed_work *dwork;
5623 dwork = container_of(work, struct delayed_work, work);
5624 wl = container_of(dwork, struct wl1271, roc_complete_work);
5626 ret = wlcore_roc_completed(wl);
5628 ieee80211_remain_on_channel_expired(wl->hw);
5631 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5633 struct wl1271 *wl = hw->priv;
5635 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5638 wl1271_tx_flush(wl);
5641 * we can't just flush_work here, because it might deadlock
5642 * (as we might get called from the same workqueue)
5644 cancel_delayed_work_sync(&wl->roc_complete_work);
5645 wlcore_roc_completed(wl);
5650 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5651 struct ieee80211_vif *vif,
5652 struct ieee80211_sta *sta,
5655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5657 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5659 if (!(changed & IEEE80211_RC_BW_CHANGED))
5662 /* this callback is atomic, so schedule a new work */
5663 wlvif->rc_update_bw = sta->bandwidth;
5664 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5667 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5668 struct ieee80211_vif *vif,
5669 struct ieee80211_sta *sta,
5670 struct station_info *sinfo)
5672 struct wl1271 *wl = hw->priv;
5673 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5677 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5679 mutex_lock(&wl->mutex);
5681 if (unlikely(wl->state != WLCORE_STATE_ON))
5684 ret = wl1271_ps_elp_wakeup(wl);
5688 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5692 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5693 sinfo->signal = rssi_dbm;
5696 wl1271_ps_elp_sleep(wl);
5699 mutex_unlock(&wl->mutex);
5702 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5704 struct wl1271 *wl = hw->priv;
5707 mutex_lock(&wl->mutex);
5709 if (unlikely(wl->state != WLCORE_STATE_ON))
5712 /* packets are considered pending if in the TX queue or the FW */
5713 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5715 mutex_unlock(&wl->mutex);
5720 /* can't be const, mac80211 writes to this */
5721 static struct ieee80211_rate wl1271_rates[] = {
5723 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5724 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5726 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5727 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5728 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5730 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5731 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5732 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5734 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5735 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5736 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5738 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5739 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5741 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5742 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5744 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5745 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5747 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5748 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5750 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5751 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5753 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5754 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5756 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5757 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5759 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5760 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5763 /* can't be const, mac80211 writes to this */
5764 static struct ieee80211_channel wl1271_channels[] = {
5765 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5766 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5767 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5768 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5769 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5770 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5771 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5772 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5773 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5774 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5775 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5776 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5777 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5778 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5781 /* can't be const, mac80211 writes to this */
5782 static struct ieee80211_supported_band wl1271_band_2ghz = {
5783 .channels = wl1271_channels,
5784 .n_channels = ARRAY_SIZE(wl1271_channels),
5785 .bitrates = wl1271_rates,
5786 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5789 /* 5 GHz data rates for WL1273 */
5790 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5792 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5793 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5795 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5796 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5798 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5799 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5801 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5802 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5804 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5805 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5807 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5808 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5810 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5811 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5813 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5814 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5817 /* 5 GHz band channels for WL1273 */
5818 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5819 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5820 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5821 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5822 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5823 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5824 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5825 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5826 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5827 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5828 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5829 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5830 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5831 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5832 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5852 static struct ieee80211_supported_band wl1271_band_5ghz = {
5853 .channels = wl1271_channels_5ghz,
5854 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5855 .bitrates = wl1271_rates_5ghz,
5856 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5859 static const struct ieee80211_ops wl1271_ops = {
5860 .start = wl1271_op_start,
5861 .stop = wlcore_op_stop,
5862 .add_interface = wl1271_op_add_interface,
5863 .remove_interface = wl1271_op_remove_interface,
5864 .change_interface = wl12xx_op_change_interface,
5866 .suspend = wl1271_op_suspend,
5867 .resume = wl1271_op_resume,
5869 .config = wl1271_op_config,
5870 .prepare_multicast = wl1271_op_prepare_multicast,
5871 .configure_filter = wl1271_op_configure_filter,
5873 .set_key = wlcore_op_set_key,
5874 .hw_scan = wl1271_op_hw_scan,
5875 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5876 .sched_scan_start = wl1271_op_sched_scan_start,
5877 .sched_scan_stop = wl1271_op_sched_scan_stop,
5878 .bss_info_changed = wl1271_op_bss_info_changed,
5879 .set_frag_threshold = wl1271_op_set_frag_threshold,
5880 .set_rts_threshold = wl1271_op_set_rts_threshold,
5881 .conf_tx = wl1271_op_conf_tx,
5882 .get_tsf = wl1271_op_get_tsf,
5883 .get_survey = wl1271_op_get_survey,
5884 .sta_state = wl12xx_op_sta_state,
5885 .ampdu_action = wl1271_op_ampdu_action,
5886 .tx_frames_pending = wl1271_tx_frames_pending,
5887 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5888 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5889 .channel_switch = wl12xx_op_channel_switch,
5890 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5891 .flush = wlcore_op_flush,
5892 .remain_on_channel = wlcore_op_remain_on_channel,
5893 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5894 .add_chanctx = wlcore_op_add_chanctx,
5895 .remove_chanctx = wlcore_op_remove_chanctx,
5896 .change_chanctx = wlcore_op_change_chanctx,
5897 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5898 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5899 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5900 .sta_rc_update = wlcore_op_sta_rc_update,
5901 .sta_statistics = wlcore_op_sta_statistics,
5902 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5906 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5912 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5913 wl1271_error("Illegal RX rate from HW: %d", rate);
5917 idx = wl->band_rate_to_idx[band][rate];
5918 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5919 wl1271_error("Unsupported RX rate from HW: %d", rate);
5926 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5930 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5933 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5934 wl1271_warning("NIC part of the MAC address wraps around!");
5936 for (i = 0; i < wl->num_mac_addr; i++) {
5937 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5938 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5939 wl->addresses[i].addr[2] = (u8) oui;
5940 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5941 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5942 wl->addresses[i].addr[5] = (u8) nic;
5946 /* we may be one address short at the most */
5947 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5950 * turn on the LAA bit in the first address and use it as
5953 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5954 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5955 memcpy(&wl->addresses[idx], &wl->addresses[0],
5956 sizeof(wl->addresses[0]));
5958 wl->addresses[idx].addr[0] |= BIT(1);
5961 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5962 wl->hw->wiphy->addresses = wl->addresses;
5965 static int wl12xx_get_hw_info(struct wl1271 *wl)
5969 ret = wl12xx_set_power_on(wl);
5973 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5977 wl->fuse_oui_addr = 0;
5978 wl->fuse_nic_addr = 0;
5980 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5984 if (wl->ops->get_mac)
5985 ret = wl->ops->get_mac(wl);
5988 wl1271_power_off(wl);
5992 static int wl1271_register_hw(struct wl1271 *wl)
5995 u32 oui_addr = 0, nic_addr = 0;
5997 if (wl->mac80211_registered)
6000 if (wl->nvs_len >= 12) {
6001 /* NOTE: The wl->nvs->nvs element must be first, in
6002 * order to simplify the casting, we assume it is at
6003 * the beginning of the wl->nvs structure.
6005 u8 *nvs_ptr = (u8 *)wl->nvs;
6008 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6010 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6013 /* if the MAC address is zeroed in the NVS derive from fuse */
6014 if (oui_addr == 0 && nic_addr == 0) {
6015 oui_addr = wl->fuse_oui_addr;
6016 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6017 nic_addr = wl->fuse_nic_addr + 1;
6020 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6022 ret = ieee80211_register_hw(wl->hw);
6024 wl1271_error("unable to register mac80211 hw: %d", ret);
6028 wl->mac80211_registered = true;
6030 wl1271_debugfs_init(wl);
6032 wl1271_notice("loaded");
6038 static void wl1271_unregister_hw(struct wl1271 *wl)
6041 wl1271_plt_stop(wl);
6043 ieee80211_unregister_hw(wl->hw);
6044 wl->mac80211_registered = false;
6048 static int wl1271_init_ieee80211(struct wl1271 *wl)
6051 static const u32 cipher_suites[] = {
6052 WLAN_CIPHER_SUITE_WEP40,
6053 WLAN_CIPHER_SUITE_WEP104,
6054 WLAN_CIPHER_SUITE_TKIP,
6055 WLAN_CIPHER_SUITE_CCMP,
6056 WL1271_CIPHER_SUITE_GEM,
6059 /* The tx descriptor buffer */
6060 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6062 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6063 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6066 /* FIXME: find a proper value */
6067 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6069 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
6070 IEEE80211_HW_SUPPORTS_PS |
6071 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
6072 IEEE80211_HW_HAS_RATE_CONTROL |
6073 IEEE80211_HW_CONNECTION_MONITOR |
6074 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
6075 IEEE80211_HW_SPECTRUM_MGMT |
6076 IEEE80211_HW_AP_LINK_PS |
6077 IEEE80211_HW_AMPDU_AGGREGATION |
6078 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
6079 IEEE80211_HW_QUEUE_CONTROL |
6080 IEEE80211_HW_CHANCTX_STA_CSA;
6082 wl->hw->wiphy->cipher_suites = cipher_suites;
6083 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6085 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6086 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
6087 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
6088 wl->hw->wiphy->max_scan_ssids = 1;
6089 wl->hw->wiphy->max_sched_scan_ssids = 16;
6090 wl->hw->wiphy->max_match_sets = 16;
6092 * Maximum length of elements in scanning probe request templates
6093 * should be the maximum length possible for a template, without
6094 * the IEEE80211 header of the template
6096 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6097 sizeof(struct ieee80211_header);
6099 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6100 sizeof(struct ieee80211_header);
6102 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6104 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6105 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6106 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6107 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6109 /* make sure all our channels fit in the scanned_ch bitmask */
6110 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6111 ARRAY_SIZE(wl1271_channels_5ghz) >
6112 WL1271_MAX_CHANNELS);
6114 * clear channel flags from the previous usage
6115 * and restore max_power & max_antenna_gain values.
6117 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6118 wl1271_band_2ghz.channels[i].flags = 0;
6119 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6120 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6123 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6124 wl1271_band_5ghz.channels[i].flags = 0;
6125 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6126 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6130 * We keep local copies of the band structs because we need to
6131 * modify them on a per-device basis.
6133 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6134 sizeof(wl1271_band_2ghz));
6135 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6136 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6137 sizeof(*wl->ht_cap));
6138 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6139 sizeof(wl1271_band_5ghz));
6140 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6141 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6142 sizeof(*wl->ht_cap));
6144 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6145 &wl->bands[IEEE80211_BAND_2GHZ];
6146 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6147 &wl->bands[IEEE80211_BAND_5GHZ];
6150 * allow 4 queues per mac address we support +
6151 * 1 cab queue per mac + one global offchannel Tx queue
6153 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6155 /* the last queue is the offchannel queue */
6156 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6157 wl->hw->max_rates = 1;
6159 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6161 /* the FW answers probe-requests in AP-mode */
6162 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6163 wl->hw->wiphy->probe_resp_offload =
6164 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6165 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6166 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6168 /* allowed interface combinations */
6169 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6170 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6172 /* register vendor commands */
6173 wlcore_set_vendor_commands(wl->hw->wiphy);
6175 SET_IEEE80211_DEV(wl->hw, wl->dev);
6177 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6178 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6180 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6185 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6188 struct ieee80211_hw *hw;
6193 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6195 wl1271_error("could not alloc ieee80211_hw");
6201 memset(wl, 0, sizeof(*wl));
6203 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6205 wl1271_error("could not alloc wl priv");
6207 goto err_priv_alloc;
6210 INIT_LIST_HEAD(&wl->wlvif_list);
6215 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6216 * we don't allocate any additional resource here, so that's fine.
6218 for (i = 0; i < NUM_TX_QUEUES; i++)
6219 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6220 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6222 skb_queue_head_init(&wl->deferred_rx_queue);
6223 skb_queue_head_init(&wl->deferred_tx_queue);
6225 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6226 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6227 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6228 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6229 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6230 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6231 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6233 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6234 if (!wl->freezable_wq) {
6241 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6242 wl->band = IEEE80211_BAND_2GHZ;
6243 wl->channel_type = NL80211_CHAN_NO_HT;
6245 wl->sg_enabled = true;
6246 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6247 wl->recovery_count = 0;
6250 wl->ap_fw_ps_map = 0;
6252 wl->system_hlid = WL12XX_SYSTEM_HLID;
6253 wl->active_sta_count = 0;
6254 wl->active_link_count = 0;
6256 init_waitqueue_head(&wl->fwlog_waitq);
6258 /* The system link is always allocated */
6259 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6261 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6262 for (i = 0; i < wl->num_tx_desc; i++)
6263 wl->tx_frames[i] = NULL;
6265 spin_lock_init(&wl->wl_lock);
6267 wl->state = WLCORE_STATE_OFF;
6268 wl->fw_type = WL12XX_FW_TYPE_NONE;
6269 mutex_init(&wl->mutex);
6270 mutex_init(&wl->flush_mutex);
6271 init_completion(&wl->nvs_loading_complete);
6273 order = get_order(aggr_buf_size);
6274 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6275 if (!wl->aggr_buf) {
6279 wl->aggr_buf_size = aggr_buf_size;
6281 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6282 if (!wl->dummy_packet) {
6287 /* Allocate one page for the FW log */
6288 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6291 goto err_dummy_packet;
6294 wl->mbox_size = mbox_size;
6295 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6301 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6302 if (!wl->buffer_32) {
6313 free_page((unsigned long)wl->fwlog);
6316 dev_kfree_skb(wl->dummy_packet);
6319 free_pages((unsigned long)wl->aggr_buf, order);
6322 destroy_workqueue(wl->freezable_wq);
6325 wl1271_debugfs_exit(wl);
6329 ieee80211_free_hw(hw);
6333 return ERR_PTR(ret);
6335 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6337 int wlcore_free_hw(struct wl1271 *wl)
6339 /* Unblock any fwlog readers */
6340 mutex_lock(&wl->mutex);
6341 wl->fwlog_size = -1;
6342 wake_up_interruptible_all(&wl->fwlog_waitq);
6343 mutex_unlock(&wl->mutex);
6345 wlcore_sysfs_free(wl);
6347 kfree(wl->buffer_32);
6349 free_page((unsigned long)wl->fwlog);
6350 dev_kfree_skb(wl->dummy_packet);
6351 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6353 wl1271_debugfs_exit(wl);
6357 wl->fw_type = WL12XX_FW_TYPE_NONE;
6361 kfree(wl->raw_fw_status);
6362 kfree(wl->fw_status);
6363 kfree(wl->tx_res_if);
6364 destroy_workqueue(wl->freezable_wq);
6367 ieee80211_free_hw(wl->hw);
6371 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6374 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6375 .flags = WIPHY_WOWLAN_ANY,
6376 .n_patterns = WL1271_MAX_RX_FILTERS,
6377 .pattern_min_len = 1,
6378 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6382 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6384 return IRQ_WAKE_THREAD;
6387 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6389 struct wl1271 *wl = context;
6390 struct platform_device *pdev = wl->pdev;
6391 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6392 struct resource *res;
6395 irq_handler_t hardirq_fn = NULL;
6398 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6400 wl1271_error("Could not allocate nvs data");
6403 wl->nvs_len = fw->size;
6405 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6411 ret = wl->ops->setup(wl);
6415 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6417 /* adjust some runtime configuration parameters */
6418 wlcore_adjust_conf(wl);
6420 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6422 wl1271_error("Could not get IRQ resource");
6426 wl->irq = res->start;
6427 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6428 wl->if_ops = pdev_data->if_ops;
6430 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6431 hardirq_fn = wlcore_hardirq;
6433 wl->irq_flags |= IRQF_ONESHOT;
6435 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6436 wl->irq_flags, pdev->name, wl);
6438 wl1271_error("request_irq() failed: %d", ret);
6443 ret = enable_irq_wake(wl->irq);
6445 wl->irq_wake_enabled = true;
6446 device_init_wakeup(wl->dev, 1);
6447 if (pdev_data->pwr_in_suspend)
6448 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6451 disable_irq(wl->irq);
6453 ret = wl12xx_get_hw_info(wl);
6455 wl1271_error("couldn't get hw info");
6459 ret = wl->ops->identify_chip(wl);
6463 ret = wl1271_init_ieee80211(wl);
6467 ret = wl1271_register_hw(wl);
6471 ret = wlcore_sysfs_init(wl);
6475 wl->initialized = true;
6479 wl1271_unregister_hw(wl);
6482 free_irq(wl->irq, wl);
6488 release_firmware(fw);
6489 complete_all(&wl->nvs_loading_complete);
6492 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6496 if (!wl->ops || !wl->ptable)
6499 wl->dev = &pdev->dev;
6501 platform_set_drvdata(pdev, wl);
6503 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6504 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6507 wl1271_error("request_firmware_nowait failed: %d", ret);
6508 complete_all(&wl->nvs_loading_complete);
6513 EXPORT_SYMBOL_GPL(wlcore_probe);
6515 int wlcore_remove(struct platform_device *pdev)
6517 struct wl1271 *wl = platform_get_drvdata(pdev);
6519 wait_for_completion(&wl->nvs_loading_complete);
6520 if (!wl->initialized)
6523 if (wl->irq_wake_enabled) {
6524 device_init_wakeup(wl->dev, 0);
6525 disable_irq_wake(wl->irq);
6527 wl1271_unregister_hw(wl);
6528 free_irq(wl->irq, wl);
6533 EXPORT_SYMBOL_GPL(wlcore_remove);
6535 u32 wl12xx_debug_level = DEBUG_NONE;
6536 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6537 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6538 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6540 module_param_named(fwlog, fwlog_param, charp, 0);
6541 MODULE_PARM_DESC(fwlog,
6542 "FW logger options: continuous, ondemand, dbgpins or disable");
6544 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6545 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6547 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6548 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6550 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6551 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6553 MODULE_LICENSE("GPL");
6554 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6555 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6556 MODULE_FIRMWARE(WL12XX_NVS_NAME);